* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <asm/div64.h>
#include <linux/seq_file.h>
#include <linux/namei.h>
+#include <asm/uaccess.h>
#else
#include <liblustre.h>
#endif
RETURN(rc);
}
+static int lmv_hsm_req_count(struct lmv_obd *lmv,
+ const struct hsm_user_request *hur,
+ const struct lmv_tgt_desc *tgt_mds)
+{
+ int i, nr = 0;
+ struct lmv_tgt_desc *curr_tgt;
+
+ /* count how many requests must be sent to the given target */
+ for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
+ curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
+ if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
+ nr++;
+ }
+ return nr;
+}
+
+static void lmv_hsm_req_build(struct lmv_obd *lmv,
+ struct hsm_user_request *hur_in,
+ const struct lmv_tgt_desc *tgt_mds,
+ struct hsm_user_request *hur_out)
+{
+ int i, nr_out;
+ struct lmv_tgt_desc *curr_tgt;
+
+ /* build the hsm_user_request for the given target */
+ hur_out->hur_request = hur_in->hur_request;
+ nr_out = 0;
+ for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
+ curr_tgt = lmv_find_target(lmv,
+ &hur_in->hur_user_item[i].hui_fid);
+ if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
+ hur_out->hur_user_item[nr_out] =
+ hur_in->hur_user_item[i];
+ nr_out++;
+ }
+ }
+ hur_out->hur_request.hr_itemcount = nr_out;
+ memcpy(hur_data(hur_out), hur_data(hur_in),
+ hur_in->hur_request.hr_data_len);
+}
+
+static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
+ struct lustre_kernelcomm *lk, void *uarg)
+{
+ int i, rc = 0;
+ ENTRY;
+
+ /* unregister request (call from llapi_hsm_copytool_fini) */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ /* best effort: try to clean as much as possible
+ * (continue on error) */
+ obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
+ }
+
+ /* Whatever the result, remove copytool from kuc groups.
+ * Unreached coordinators will get EPIPE on next requests
+ * and will unregister automatically.
+ */
+ rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group);
+ RETURN(rc);
+}
+
+static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
+ struct lustre_kernelcomm *lk, void *uarg)
+{
+ struct file *filp;
+ int i, j, err;
+ int rc = 0;
+ bool any_set = false;
+ ENTRY;
+
+ /* All or nothing: try to register to all MDS.
+ * In case of failure, unregister from previous MDS,
+ * except if it because of inactive target. */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
+ len, lk, uarg);
+ if (err) {
+ if (lmv->tgts[i]->ltd_active) {
+ /* permanent error */
+ CERROR("error: iocontrol MDC %s on MDT"
+ "idx %d cmd %x: err = %d\n",
+ lmv->tgts[i]->ltd_uuid.uuid,
+ i, cmd, err);
+ rc = err;
+ lk->lk_flags |= LK_FLG_STOP;
+ /* unregister from previous MDS */
+ for (j = 0; j < i; j++)
+ obd_iocontrol(cmd,
+ lmv->tgts[j]->ltd_exp,
+ len, lk, uarg);
+ RETURN(rc);
+ }
+ /* else: transient error.
+ * kuc will register to the missing MDT
+ * when it is back */
+ } else {
+ any_set = true;
+ }
+ }
+
+ if (!any_set)
+ /* no registration done: return error */
+ RETURN(-ENOTCONN);
+
+ /* at least one registration done, with no failure */
+ filp = fget(lk->lk_wfd);
+ if (filp == NULL) {
+ RETURN(-EBADF);
+ }
+ rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, lk->lk_data);
+ if (rc != 0 && filp != NULL)
+ fput(filp);
+ RETURN(rc);
+}
+
+
+
+
static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
int len, void *karg, void *uarg)
{
RETURN(-EINVAL);
/* copy UUID */
- if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
+ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
min((int) data->ioc_plen2,
(int) sizeof(struct obd_uuid))))
RETURN(-EFAULT);
0);
if (rc)
RETURN(rc);
- if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
+ if (copy_to_user(data->ioc_pbuf1, &stat_buf,
min((int) data->ioc_plen1,
(int) sizeof(stat_buf))))
RETURN(-EFAULT);
}
case LL_IOC_HSM_STATE_GET:
case LL_IOC_HSM_STATE_SET:
- case LL_IOC_HSM_ACTION:
+ case LL_IOC_HSM_ACTION: {
+ struct md_op_data *op_data = karg;
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ RETURN(PTR_ERR(tgt));
+
+ if (tgt->ltd_exp == NULL)
+ RETURN(-EINVAL);
+
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
+ break;
+ }
+ case LL_IOC_HSM_PROGRESS: {
+ const struct hsm_progress_kernel *hpk = karg;
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, &hpk->hpk_fid);
+ if (IS_ERR(tgt))
+ RETURN(PTR_ERR(tgt));
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
+ break;
+ }
+ case LL_IOC_HSM_REQUEST: {
+ struct hsm_user_request *hur = karg;
+ struct lmv_tgt_desc *tgt;
+ unsigned int reqcount = hur->hur_request.hr_itemcount;
+
+ if (reqcount == 0)
+ RETURN(0);
+
+ /* if the request is about a single fid
+ * or if there is a single MDS, no need to split
+ * the request. */
+ if (reqcount == 1 || count == 1) {
+ tgt = lmv_find_target(lmv,
+ &hur->hur_user_item[0].hui_fid);
+ if (IS_ERR(tgt))
+ RETURN(PTR_ERR(tgt));
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
+ } else {
+ /* split fid list to their respective MDS */
+ for (i = 0; i < count; i++) {
+ unsigned int nr, reqlen;
+ int rc1;
+ struct hsm_user_request *req;
+
+ nr = lmv_hsm_req_count(lmv, hur, lmv->tgts[i]);
+ if (nr == 0) /* nothing for this MDS */
+ continue;
+
+ /* build a request with fids for this MDS */
+ reqlen = offsetof(typeof(*hur),
+ hur_user_item[nr])
+ + hur->hur_request.hr_data_len;
+ OBD_ALLOC_LARGE(req, reqlen);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req);
+
+ rc1 = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
+ reqlen, req, uarg);
+ if (rc1 != 0 && rc == 0)
+ rc = rc1;
+ OBD_FREE_LARGE(req, reqlen);
+ }
+ }
+ break;
+ }
case LL_IOC_LOV_SWAP_LAYOUTS: {
struct md_op_data *op_data = karg;
struct lmv_tgt_desc *tgt1, *tgt2;
rc = obd_iocontrol(cmd, tgt1->ltd_exp, len, karg, uarg);
break;
}
+ case LL_IOC_HSM_CT_START: {
+ struct lustre_kernelcomm *lk = karg;
+ if (lk->lk_flags & LK_FLG_STOP)
+ rc = lmv_hsm_ct_unregister(lmv, cmd, len, lk, uarg);
+ else
+ rc = lmv_hsm_ct_register(lmv, cmd, len, lk, uarg);
+ break;
+ }
default:
for (i = 0; i < count; i++) {
struct obd_device *mdc_obd;
* |s|e|f|p|ent| 0 | ... | 0 |
* '----------------- -----'
*
- * However, on hosts where the native VM page size (CFS_PAGE_SIZE) is
+ * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
* larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately
* - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
* to the first entry of the next lu_dirpage.
*/
-#if CFS_PAGE_SIZE > LU_PAGE_SIZE
+#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
{
int i;
for (i = 0; i < ncfspgs; i++) {
- struct lu_dirpage *dp = cfs_kmap(pages[i]);
+ struct lu_dirpage *dp = kmap(pages[i]);
struct lu_dirpage *first = dp;
struct lu_dirent *end_dirent = NULL;
struct lu_dirent *ent;
first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
- cfs_kunmap(pages[i]);
+ kunmap(pages[i]);
}
}
#else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
-#endif /* CFS_PAGE_SIZE > LU_PAGE_SIZE */
+#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request)
struct lmv_obd *lmv = &obd->u.lmv;
__u64 offset = op_data->op_offset;
int rc;
- int ncfspgs; /* pages read in CFS_PAGE_SIZE */
+ int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt;
ENTRY;
if (rc != 0)
RETURN(rc);
- ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + CFS_PAGE_SIZE - 1)
- >> CFS_PAGE_SHIFT;
+ ncfspgs = ((*request)->rq_bulk->bd_nob_transferred +
+ PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);