* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
if (lsm) {
LASSERT(lsm->lsm_object_id);
- LASSERT_MDS_GROUP(lsm->lsm_object_gr);
+ LASSERT_SEQ_IS_MDT(lsm->lsm_object_seq);
(*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
- (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
+ (*lmmp)->lmm_object_seq = cpu_to_le64(lsm->lsm_object_seq);
}
RETURN(lmm_size);
if (lmm != NULL) {
/* XXX zero *lsmp? */
(*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
- (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
+ (*lsmp)->lsm_object_seq = le64_to_cpu (lmm->lmm_object_seq);
LASSERT((*lsmp)->lsm_object_id);
- LASSERT_MDS_GROUP((*lsmp)->lsm_object_gr);
+ LASSERT_SEQ_IS_MDT((*lsmp)->lsm_object_seq);
}
(*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
static int osc_setattr_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
- struct osc_async_args *aa, int rc)
+ struct osc_setattr_args *sa, int rc)
{
struct ost_body *body;
ENTRY;
if (body == NULL)
GOTO(out, rc = -EPROTO);
- lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa);
+ lustre_get_wire_obdo(sa->sa_oa, &body->oa);
out:
- rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
+ rc = sa->sa_upcall(sa->sa_cookie, rc);
RETURN(rc);
}
-static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset)
+int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset)
{
- struct ptlrpc_request *req;
- struct osc_async_args *aa;
- int rc;
+ struct ptlrpc_request *req;
+ struct osc_setattr_args *sa;
+ int rc;
ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
RETURN(rc);
}
- if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
+ if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
osc_pack_req_body(req, oinfo);
req->rq_interpret_reply =
(ptlrpc_interpterer_t)osc_setattr_interpret;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oi = oinfo;
+ CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
+ sa = ptlrpc_req_async_args(req);
+ sa->sa_oa = oinfo->oi_oa;
+ sa->sa_upcall = upcall;
+ sa->sa_cookie = cookie;
- ptlrpc_set_add_req(rqset, req);
+ if (rqset == PTLRPCD_SET)
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
+ else
+ ptlrpc_set_add_req(rqset, req);
}
RETURN(0);
}
+static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct ptlrpc_request_set *rqset)
+{
+ return osc_setattr_async_base(exp, oinfo, oti,
+ oinfo->oi_cb_up, oinfo, rqset);
+}
+
int osc_real_create(struct obd_export *exp, struct obdo *oa,
struct lov_stripe_md **ea, struct obd_trans_info *oti)
{
* This needs to be fixed in a big way.
*/
lsm->lsm_object_id = oa->o_id;
- lsm->lsm_object_gr = oa->o_gr;
+ lsm->lsm_object_seq = oa->o_seq;
*ea = lsm;
if (oti != NULL) {
RETURN(rc);
}
-static int osc_punch_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_punch_args *aa, int rc)
-{
- struct ost_body *body;
- ENTRY;
-
- if (rc != 0)
- GOTO(out, rc);
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL)
- GOTO(out, rc = -EPROTO);
-
- lustre_get_wire_obdo(aa->pa_oa, &body->oa);
-out:
- rc = aa->pa_upcall(aa->pa_cookie, rc);
- RETURN(rc);
-}
-
-int osc_punch_base(struct obd_export *exp, struct obdo *oa,
- struct obd_capa *capa,
+int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
obd_enqueue_update_f upcall, void *cookie,
struct ptlrpc_request_set *rqset)
{
- struct ptlrpc_request *req;
- struct osc_punch_args *aa;
- struct ost_body *body;
- int rc;
+ struct ptlrpc_request *req;
+ struct osc_setattr_args *sa;
+ struct ost_body *body;
+ int rc;
ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
if (req == NULL)
RETURN(-ENOMEM);
- osc_set_capa_size(req, &RMF_CAPA1, capa);
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
if (rc) {
ptlrpc_request_free(req);
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
- lustre_set_wire_obdo(&body->oa, oa);
- osc_pack_capa(req, body, capa);
+ lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
+ osc_pack_capa(req, body, oinfo->oi_capa);
ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_punch_interpret;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->pa_oa = oa;
- aa->pa_upcall = upcall;
- aa->pa_cookie = cookie;
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
+ CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
+ sa = ptlrpc_req_async_args(req);
+ sa->sa_oa = oinfo->oi_oa;
+ sa->sa_upcall = upcall;
+ sa->sa_cookie = cookie;
if (rqset == PTLRPCD_SET)
ptlrpcd_add_req(req, PSCOPE_OTHER);
else
oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start;
oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end;
oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
- return osc_punch_base(exp, oinfo->oi_oa, oinfo->oi_capa,
+ return osc_punch_base(exp, oinfo,
oinfo->oi_cb_up, oinfo, rqset);
}
int count;
ENTRY;
- osc_build_res_name(oa->o_id, oa->o_gr, &res_id);
+ osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
if (res == NULL)
RETURN(0);
{
cfs_time_t time = cfs_time_current();
cfs_time_t next_shrink = client->cl_next_shrink_grant;
+
+ if ((client->cl_import->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_GRANT_SHRINK) == 0)
+ return 0;
+
if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
client->cl_avail_grant > GRANT_SHRINK_LIMIT)
cli->cl_avail_grant = ocd->ocd_grant;
else
cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
+
+ if (cli->cl_avail_grant < 0) {
+ CWARN("%s: available grant < 0, the OSS is probably not running"
+ " with patch from bug20278 (%ld) \n",
+ cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant);
+ /* workaround for 1.6 servers which do not have
+ * the patch from bug20278 */
+ cli->cl_avail_grant = ocd->ocd_grant;
+ }
+
client_obd_list_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
+ CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
+ cli->cl_import->imp_obd->obd_name,
cli->cl_avail_grant, cli->cl_lost_grant);
- LASSERT(cli->cl_avail_grant >= 0);
if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
cfs_list_empty(&cli->cl_grant_shrink_list))
{
if (p1->flag != p2->flag) {
unsigned mask = ~(OBD_BRW_FROM_GRANT|
- OBD_BRW_NOCACHE|OBD_BRW_SYNC);
+ OBD_BRW_NOCACHE|OBD_BRW_SYNC|OBD_BRW_ASYNC);
/* warn if we try to combine flags that we don't know to be
* safe to combine */
return 0;
}
+ /* If this is mmaped file - it can be changed at any time */
+ if (oa->o_valid & OBD_MD_FLFLAGS && oa->o_flags & OBD_FL_MMAP)
+ return 1;
+
if (oa->o_valid & OBD_MD_FLFLAGS)
cksum_type = cksum_type_unpack(oa->o_flags);
else
msg = "changed in transit AND doesn't match the original - "
"likely false positive due to mmap IO (bug 11742)";
- LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inum "
- LPU64"/"LPU64" object "LPU64"/"LPU64" extent "
- "["LPU64"-"LPU64"]\n",
+ LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
+ " object "LPU64"/"LPU64" extent ["LPU64"-"LPU64"]\n",
msg, libcfs_nid2str(peer->nid),
- oa->o_valid & OBD_MD_FLFID ? oa->o_fid : (__u64)0,
- oa->o_valid & OBD_MD_FLFID ? oa->o_generation :
- (__u64)0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
oa->o_id,
- oa->o_valid & OBD_MD_FLGROUP ? oa->o_gr : (__u64)0,
+ oa->o_valid & OBD_MD_FLGROUP ? oa->o_seq : (__u64)0,
pga[0]->off,
pga[page_count-1]->off + pga[page_count-1]->count - 1);
CERROR("original client csum %x (type %x), server csum %x (type %x), "
__u32 client_cksum = 0;
ENTRY;
- if (rc < 0 && rc != -EDQUOT)
+ if (rc < 0 && rc != -EDQUOT) {
+ DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
RETURN(rc);
+ }
LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
if (body == NULL) {
- CDEBUG(D_INFO, "Can't unpack body\n");
+ DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
RETURN(-EPROTO);
}
+#ifdef HAVE_QUOTA_SUPPORT
/* set/clear over quota flag for a uid/gid */
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
+ CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
+ body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
+ body->oa.o_flags);
lquota_setdq(quota_interface, cli, qid, body->oa.o_valid,
body->oa.o_flags);
}
+#endif
+
+ osc_update_grant(cli, body);
if (rc < 0)
RETURN(rc);
if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
client_cksum = aa->aa_oa->o_cksum; /* save for later */
- osc_update_grant(cli, body);
-
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
if (rc > 0) {
CERROR("Unexpected +ve rc %d\n", rc);
libcfs_nid2str(peer->nid));
} else if (server_cksum != client_cksum) {
LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
- "%s%s%s inum "LPU64"/"LPU64" object "
+ "%s%s%s inode "DFID" object "
LPU64"/"LPU64" extent "
"["LPU64"-"LPU64"]\n",
req->rq_import->imp_obd->obd_name,
libcfs_nid2str(peer->nid),
via, router,
body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_fid : (__u64)0,
+ body->oa.o_parent_seq : (__u64)0,
+ body->oa.o_valid & OBD_MD_FLFID ?
+ body->oa.o_parent_oid : 0,
body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_generation :(__u64)0,
+ body->oa.o_parent_ver : 0,
body->oa.o_id,
body->oa.o_valid & OBD_MD_FLGROUP ?
- body->oa.o_gr : (__u64)0,
+ body->oa.o_seq : (__u64)0,
aa->aa_ppga[0]->off,
aa->aa_ppga[aa->aa_page_count-1]->off +
aa->aa_ppga[aa->aa_page_count-1]->count -
ENTRY;
if (!osc_should_resend(aa->aa_resends, aa->aa_cli)) {
- CERROR("too many resend retries, returning error\n");
+ CERROR("too many resent retries, returning error\n");
RETURN(-EIO);
}
if (cmd & OBD_BRW_WRITE) {
/* trigger a write rpc stream as long as there are dirtiers
* waiting for space. as they're waiting, they're not going to
- * create more pages to coallesce with what's waiting.. */
+ * create more pages to coalesce with what's waiting.. */
if (!cfs_list_empty(&cli->cl_cache_waiters)) {
CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
RETURN(1);
rc = osc_brw_fini_request(req, rc);
CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
if (osc_recoverable_error(rc)) {
- rc = osc_brw_redo_request(req, aa);
- if (rc == 0)
- RETURN(0);
+ /* Only retry once for mmaped files since the mmaped page
+ * might be modified at anytime. We have to retry at least
+ * once in case there WAS really a corruption of the page
+ * on the network, that was not caused by mmap() modifying
+ * the page. Bug11742 */
+ if ((rc == -EAGAIN) && (aa->aa_resends > 0) &&
+ aa->aa_oa->o_valid & OBD_MD_FLFLAGS &&
+ aa->aa_oa->o_flags & OBD_FL_MMAP) {
+ rc = 0;
+ } else {
+ rc = osc_brw_redo_request(req, aa);
+ if (rc == 0)
+ RETURN(0);
+ }
}
if (aa->aa_ocapa) {
}
OBDO_FREE(aa->aa_oa);
} else { /* from async_internal() */
- int i;
+ obd_count i;
for (i = 0; i < aa->aa_page_count; i++)
osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
if (!async)
cl_req_completion(env, aa->aa_clerq, rc);
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
+
RETURN(rc);
}
enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
struct ldlm_lock *lock = NULL;
struct cl_req_attr crattr;
- int i, rc;
+ int i, rc, mpflag = 0;
ENTRY;
LASSERT(!cfs_list_empty(rpc_list));
+ if (cmd & OBD_BRW_MEMALLOC)
+ mpflag = cfs_memory_pressure_get_and_set();
+
memset(&crattr, 0, sizeof crattr);
OBD_ALLOC(pga, sizeof(*pga) * page_count);
if (pga == NULL)
GOTO(out, req = ERR_PTR(rc));
}
+ if (cmd & OBD_BRW_MEMALLOC)
+ req->rq_memalloc = 1;
+
/* Need to update the timestamps after the request is built in case
* we race with setattr (locally or in queue at OST). If OST gets
* later setattr before earlier BRW (as determined by the request xid),
CFS_INIT_LIST_HEAD(rpc_list);
aa->aa_clerq = clerq;
out:
+ if (cmd & OBD_BRW_MEMALLOC)
+ cfs_memory_pressure_restore(mpflag);
+
capa_put(crattr.cra_capa);
if (IS_ERR(req)) {
if (oa)
* \param cmd OBD_BRW_* macroses
* \param lop pending pages
*
- * \return zero if pages successfully add to send queue.
- * \return not zere if error occurring.
+ * \return zero if no page added to send queue.
+ * \return 1 if pages successfully added to send queue.
+ * \return negative on errors.
*/
static int
osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
CFS_LIST_HEAD(tmp_list);
unsigned int ending_offset;
unsigned starting_offset = 0;
- int srvlock = 0;
+ int srvlock = 0, mem_tight = 0;
struct cl_object *clob = NULL;
ENTRY;
* until completion unlocks it. commit_write submits a page
* as not ready because its unlock will happen unconditionally
* as the call returns. if we race with commit_write giving
- * us that page we dont' want to create a hole in the page
+ * us that page we don't want to create a hole in the page
* stream, so we stop and leave the rpc to be fired by
* another dirtier or kupdated interval (the not ready page
* will still be on the dirty list). we could call in
/* now put the page back in our accounting */
cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
+ if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
+ mem_tight = 1;
if (page_count == 0)
srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
if (++page_count >= cli->cl_max_pages_per_rpc)
RETURN(0);
}
- req = osc_build_req(env, cli, &rpc_list, page_count, cmd);
+ req = osc_build_req(env, cli, &rpc_list, page_count,
+ mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
if (IS_ERR(req)) {
LASSERT(cfs_list_empty(&rpc_list));
loi_list_maint(cli, loi);
race_counter++;
}
- /* attempt some inter-object balancing by issueing rpcs
+ /* attempt some inter-object balancing by issuing rpcs
* for each object in turn */
if (!cfs_list_empty(&loi->loi_hp_ready_item))
cfs_list_del_init(&loi->loi_hp_ready_item);
osc_enter_cache_try(env, cli, loi, oap, 0))
RETURN(0);
- /* Make sure that there are write rpcs in flight to wait for. This
- * is a little silly as this object may not have any pending but
- * other objects sure might. */
- if (cli->cl_w_in_flight) {
+ /* It is safe to block as a cache waiter as long as there is grant
+ * space available or the hope of additional grant being returned
+ * when an in flight write completes. Using the write back cache
+ * if possible is preferable to sending the data synchronously
+ * because write pages can then be merged in to large requests.
+ * The addition of this cache waiter will causing pending write
+ * pages to be sent immediately. */
+ if (cli->cl_w_in_flight || cli->cl_avail_grant >= CFS_PAGE_SIZE) {
cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
cfs_waitq_init(&ocw.ocw_waitq);
ocw.ocw_oap = oap;
oap->oap_count = count;
oap->oap_brw_flags = brw_flags;
/* Give a hint to OST that requests are coming from kswapd - bug19529 */
- if (libcfs_memory_pressure_get())
+ if (cfs_memory_pressure_get())
oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags = async_flags;
struct ldlm_res_id res_id;
struct obd_device *obd = class_exp2obd(exp);
- osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_gr, &res_id);
+ osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
return 0;
}
+/* find any ldlm lock of the inode in osc
+ * return 0 not find
+ * 1 find one
+ * < 0 error */
+static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
+ ldlm_iterator_t replace, void *data)
+{
+ struct ldlm_res_id res_id;
+ struct obd_device *obd = class_exp2obd(exp);
+ int rc = 0;
+
+ osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
+ rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
+ if (rc == LDLM_ITER_STOP)
+ return(1);
+ if (rc == LDLM_ITER_CONTINUE)
+ return(0);
+ return(rc);
+}
+
static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
obd_enqueue_update_f upcall, void *cookie,
int *flags, int rc)
* osc_enqueue_fini(). */
ldlm_lock_addref(&handle, mode);
+ /* Let CP AST to grant the lock first. */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
+
/* Complete obtaining the lock procedure. */
rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
mode, aa->oa_flags, aa->oa_lvb,
ENTRY;
osc_build_res_name(oinfo->oi_md->lsm_object_id,
- oinfo->oi_md->lsm_object_gr, &res_id);
+ oinfo->oi_md->lsm_object_seq, &res_id);
rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
&oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
}
static int osc_cancel_unused(struct obd_export *exp,
- struct lov_stripe_md *lsm, int flags,
+ struct lov_stripe_md *lsm,
+ ldlm_cancel_flags_t flags,
void *opaque)
{
struct obd_device *obd = class_exp2obd(exp);
if (lsm != NULL) {
resp = osc_build_res_name(lsm->lsm_object_id,
- lsm->lsm_object_gr, &res_id);
+ lsm->lsm_object_seq, &res_id);
}
return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
}
lumk->lmm_object_id = lsm->lsm_object_id;
- lumk->lmm_object_gr = lsm->lsm_object_gr;
+ lumk->lmm_object_seq = lsm->lsm_object_seq;
lumk->lmm_stripe_count = 1;
if (cfs_copy_to_user(lump, lumk, lum_size))
RETURN(0);
}
- if (KEY_IS(KEY_INIT_RECOV)) {
- if (vallen != sizeof(int))
- RETURN(-EINVAL);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_initial_recov = *(int *)val;
- cfs_spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
- exp->exp_obd->obd_name,
- imp->imp_initial_recov);
- RETURN(0);
- }
-
if (KEY_IS(KEY_CHECKSUM)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
if (KEY_IS(KEY_MDS_CONN)) {
struct osc_creator *oscc = &obd->u.cli.cl_oscc;
- oscc->oscc_oa.o_gr = (*(__u32 *)val);
+ oscc->oscc_oa.o_seq = (*(__u32 *)val);
oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
- LASSERT_MDS_GROUP(oscc->oscc_oa.o_gr);
+ LASSERT_SEQ_IS_MDT(oscc->oscc_oa.o_seq);
req->rq_no_delay = req->rq_no_resend = 1;
req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
} else if (KEY_IS(KEY_GRANT_SHRINK)) {
CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
obd->obd_name, *index, catid.lci_logid.lgl_oid,
- catid.lci_logid.lgl_ogr, catid.lci_logid.lgl_ogen);
+ catid.lci_logid.lgl_oseq, catid.lci_logid.lgl_ogen);
rc = __osc_llog_init(obd, olg, disk_obd, &catid);
if (rc) {
.o_sync = osc_sync,
.o_enqueue = osc_enqueue,
.o_change_cbdata = osc_change_cbdata,
+ .o_find_cbdata = osc_find_cbdata,
.o_cancel = osc_cancel,
.o_cancel_unused = osc_cancel_unused,
.o_iocontrol = osc_iocontrol,