* Use is subject to license terms.
*/
/*
+ * Copyright (c) 2011 Whamcloud, Inc.
+ */
+/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
struct lov_mds_md *lmm, int lmm_bytes)
{
int lsm_size;
+ struct obd_import *imp = class_exp2cliimp(exp);
ENTRY;
if (lmm != NULL) {
LASSERT_SEQ_IS_MDT((*lsmp)->lsm_object_seq);
}
- (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+ if (imp != NULL &&
+ (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
+ (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
+ else
+ (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
RETURN(lsm_size);
}
oinfo->oi_cb_up, oinfo, rqset);
}
-static int osc_sync(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md *md, obd_size start, obd_size end,
- void *capa)
+static int osc_sync_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *arg, int rc)
+{
+ struct osc_async_args *aa = arg;
+ struct ost_body *body;
+ ENTRY;
+
+ if (rc)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL) {
+ CERROR ("can't unpack ost_body\n");
+ GOTO(out, rc = -EPROTO);
+ }
+
+ *aa->aa_oi->oi_oa = body->oa;
+out:
+ rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
+ RETURN(rc);
+}
+
+static int osc_sync(struct obd_export *exp, struct obd_info *oinfo,
+ obd_size start, obd_size end,
+ struct ptlrpc_request_set *set)
{
struct ptlrpc_request *req;
struct ost_body *body;
+ struct osc_async_args *aa;
int rc;
ENTRY;
- if (!oa) {
+ if (!oinfo->oi_oa) {
CDEBUG(D_INFO, "oa NULL\n");
RETURN(-EINVAL);
}
if (req == NULL)
RETURN(-ENOMEM);
- osc_set_capa_size(req, &RMF_CAPA1, capa);
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
if (rc) {
ptlrpc_request_free(req);
/* overload the size and blocks fields in the oa with start/end */
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
- lustre_set_wire_obdo(&body->oa, oa);
+ lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
body->oa.o_size = start;
body->oa.o_blocks = end;
body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
- osc_pack_capa(req, body, capa);
+ osc_pack_capa(req, body, oinfo->oi_capa);
ptlrpc_request_set_replen(req);
+ req->rq_interpret_reply = osc_sync_interpret;
- rc = ptlrpc_queue_wait(req);
- if (rc)
- GOTO(out, rc);
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL)
- GOTO(out, rc = -EPROTO);
-
- lustre_get_wire_obdo(oa, &body->oa);
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->aa_oi = oinfo;
- EXIT;
- out:
- ptlrpc_req_finished(req);
- return rc;
+ ptlrpc_set_add_req(set, req);
+ RETURN (0);
}
/* Find and cancel locally locks matched by @mode in the resource found by
LASSERT(body);
osc_update_grant(cli, body);
out:
- OBD_FREE_PTR(oa);
+ OBDO_FREE(oa);
return rc;
}
oa->o_grant = cli->cl_avail_grant / 4;
cli->cl_avail_grant -= oa->o_grant;
client_obd_list_unlock(&cli->cl_loi_list_lock);
+ if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
+ oa->o_valid |= OBD_MD_FLFLAGS;
+ oa->o_flags = 0;
+ }
oa->o_flags |= OBD_FL_SHRINK_GRANT;
osc_update_next_shrink(cli);
}
body->oa.o_grant = cli->cl_avail_grant - target;
cli->cl_avail_grant = target;
client_obd_list_unlock(&cli->cl_loi_list_lock);
+ if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
+ body->oa.o_valid |= OBD_MD_FLFLAGS;
+ body->oa.o_flags = 0;
+ }
body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
osc_update_next_shrink(cli);
cli->cl_avail_grant = ocd->ocd_grant;
else
cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
+
+ if (cli->cl_avail_grant < 0) {
+ CWARN("%s: available grant < 0, the OSS is probably not running"
+ " with patch from bug20278 (%ld) \n",
+ cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant);
+ /* workaround for 1.6 servers which do not have
+ * the patch from bug20278 */
+ cli->cl_avail_grant = ocd->ocd_grant;
+ }
+
client_obd_list_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
+ CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
+ cli->cl_import->imp_obd->obd_name,
cli->cl_avail_grant, cli->cl_lost_grant);
- LASSERT(cli->cl_avail_grant >= 0);
if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
cfs_list_empty(&cli->cl_grant_shrink_list))
/* return error if any niobuf was in error */
for (i = 0; i < niocount; i++) {
- if (remote_rcs[i] < 0)
+ if ((int)remote_rcs[i] < 0)
return(remote_rcs[i]);
if (remote_rcs[i] != 0) {
{
if (p1->flag != p2->flag) {
unsigned mask = ~(OBD_BRW_FROM_GRANT|
- OBD_BRW_NOCACHE|OBD_BRW_SYNC);
+ OBD_BRW_NOCACHE|OBD_BRW_SYNC|OBD_BRW_ASYNC);
/* warn if we try to combine flags that we don't know to be
* safe to combine */
pg_prev = pga[0];
for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
struct brw_page *pg = pga[i];
+ int poff = pg->off & ~CFS_PAGE_MASK;
LASSERT(pg->count > 0);
- LASSERTF((pg->off & ~CFS_PAGE_MASK) + pg->count <= CFS_PAGE_SIZE,
- "i: %d pg: %p off: "LPU64", count: %u\n", i, pg,
- pg->off, pg->count);
+ /* make sure there is no gap in the middle of page array */
+ LASSERTF(page_count == 1 ||
+ (ergo(i == 0, poff + pg->count == CFS_PAGE_SIZE) &&
+ ergo(i > 0 && i < page_count - 1,
+ poff == 0 && pg->count == CFS_PAGE_SIZE) &&
+ ergo(i == page_count - 1, poff == 0)),
+ "i: %d/%d pg: %p off: "LPU64", count: %u\n",
+ i, page_count, pg, pg->off, pg->count);
#ifdef __linux__
LASSERTF(i == 0 || pg->off > pg_prev->off,
"i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
(pg->flag & OBD_BRW_SRVLOCK));
- ptlrpc_prep_bulk_page(desc, pg->pg, pg->off & ~CFS_PAGE_MASK,
- pg->count);
+ ptlrpc_prep_bulk_page(desc, pg->pg, poff, pg->count);
requested_nob += pg->count;
if (i > 0 && can_merge_pages(pg_prev, pg)) {
&RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
- if (resend)
- body->oa.o_valid |= OBD_FL_RECOV_RESEND;
+ if (resend) {
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
+ body->oa.o_valid |= OBD_MD_FLFLAGS;
+ body->oa.o_flags = 0;
+ }
+ body->oa.o_flags |= OBD_FL_RECOV_RESEND;
+ }
if (osc_should_shrink_grant(cli))
osc_shrink_grant_local(cli, &body->oa);
return 0;
}
+ /* If this is mmaped file - it can be changed at any time */
+ if (oa->o_valid & OBD_MD_FLFLAGS && oa->o_flags & OBD_FL_MMAP)
+ return 1;
+
if (oa->o_valid & OBD_MD_FLFLAGS)
cksum_type = cksum_type_unpack(oa->o_flags);
else
if (server_cksum == ~0 && rc > 0) {
CERROR("Protocol error: server %s set the 'checksum' "
"bit, but didn't send a checksum. Not fatal, "
- "but please notify on http://bugzilla.lustre.org/\n",
+ "but please notify on http://bugs.whamcloud.com/\n",
libcfs_nid2str(peer->nid));
} else if (server_cksum != client_cksum) {
LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
ptlrpc_req_finished(req);
if (osc_recoverable_error(rc)) {
resends++;
- if (!osc_should_resend(resends, &exp->exp_obd->u.cli)) {
+ if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
CERROR("too many resend retries, returning error\n");
RETURN(-EIO);
}
int rc = 0;
ENTRY;
- if (!osc_should_resend(aa->aa_resends, aa->aa_cli)) {
+ if (!client_should_resend(aa->aa_resends, aa->aa_cli)) {
CERROR("too many resent retries, returning error\n");
RETURN(-EIO);
}
if (cmd & OBD_BRW_WRITE) {
/* trigger a write rpc stream as long as there are dirtiers
* waiting for space. as they're waiting, they're not going to
- * create more pages to coallesce with what's waiting.. */
+ * create more pages to coalesce with what's waiting.. */
if (!cfs_list_empty(&cli->cl_cache_waiters)) {
CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
RETURN(1);
rc = osc_brw_fini_request(req, rc);
CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
if (osc_recoverable_error(rc)) {
- rc = osc_brw_redo_request(req, aa);
- if (rc == 0)
- RETURN(0);
+ /* Only retry once for mmaped files since the mmaped page
+ * might be modified at anytime. We have to retry at least
+ * once in case there WAS really a corruption of the page
+ * on the network, that was not caused by mmap() modifying
+ * the page. Bug11742 */
+ if ((rc == -EAGAIN) && (aa->aa_resends > 0) &&
+ aa->aa_oa->o_valid & OBD_MD_FLFLAGS &&
+ aa->aa_oa->o_flags & OBD_FL_MMAP) {
+ rc = 0;
+ } else {
+ rc = osc_brw_redo_request(req, aa);
+ if (rc == 0)
+ RETURN(0);
+ }
}
if (aa->aa_ocapa) {
}
OBDO_FREE(aa->aa_oa);
} else { /* from async_internal() */
- int i;
+ obd_count i;
for (i = 0; i < aa->aa_page_count; i++)
osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
-
- if (aa->aa_oa->o_flags & OBD_FL_TEMPORARY)
- OBDO_FREE(aa->aa_oa);
}
osc_wake_cache_waiters(cli);
osc_check_rpcs(env, cli);
if (!async)
cl_req_completion(env, aa->aa_clerq, rc);
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
+
RETURN(rc);
}
enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
struct ldlm_lock *lock = NULL;
struct cl_req_attr crattr;
- int i, rc;
+ int i, rc, mpflag = 0;
ENTRY;
LASSERT(!cfs_list_empty(rpc_list));
+ if (cmd & OBD_BRW_MEMALLOC)
+ mpflag = cfs_memory_pressure_get_and_set();
+
memset(&crattr, 0, sizeof crattr);
OBD_ALLOC(pga, sizeof(*pga) * page_count);
if (pga == NULL)
GOTO(out, req = ERR_PTR(rc));
}
+ if (cmd & OBD_BRW_MEMALLOC)
+ req->rq_memalloc = 1;
+
/* Need to update the timestamps after the request is built in case
* we race with setattr (locally or in queue at OST). If OST gets
* later setattr before earlier BRW (as determined by the request xid),
CFS_INIT_LIST_HEAD(rpc_list);
aa->aa_clerq = clerq;
out:
+ if (cmd & OBD_BRW_MEMALLOC)
+ cfs_memory_pressure_restore(mpflag);
+
capa_put(crattr.cra_capa);
if (IS_ERR(req)) {
if (oa)
* \param cmd OBD_BRW_* macroses
* \param lop pending pages
*
- * \return zero if pages successfully add to send queue.
- * \return not zere if error occurring.
+ * \return zero if no page added to send queue.
+ * \return 1 if pages successfully added to send queue.
+ * \return negative on errors.
*/
static int
osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_brw_async_args *aa;
const struct obd_async_page_ops *ops;
CFS_LIST_HEAD(rpc_list);
- CFS_LIST_HEAD(tmp_list);
- unsigned int ending_offset;
- unsigned starting_offset = 0;
- int srvlock = 0;
+ int srvlock = 0, mem_tight = 0;
struct cl_object *clob = NULL;
+ obd_off starting_offset = OBD_OBJECT_EOF;
+ unsigned int ending_offset;
+ int starting_page_off = 0;
ENTRY;
/* ASYNC_HP pages first. At present, when the lock the pages is
* to be canceled, the pages covered by the lock will be sent out
* with ASYNC_HP. We have to send out them as soon as possible. */
cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
- if (oap->oap_async_flags & ASYNC_HP)
- cfs_list_move(&oap->oap_pending_item, &tmp_list);
- else
- cfs_list_move_tail(&oap->oap_pending_item, &tmp_list);
+ if (oap->oap_async_flags & ASYNC_HP)
+ cfs_list_move(&oap->oap_pending_item, &lop->lop_pending);
if (++page_count >= cli->cl_max_pages_per_rpc)
break;
}
-
- cfs_list_splice(&tmp_list, &lop->lop_pending);
page_count = 0;
/* first we find the pages we're allowed to work with */
/* If there is a gap at the start of this page, it can't merge
* with any previous page, so we'll hand the network a
* "fragmented" page array that it can't transfer in 1 RDMA */
- if (page_count != 0 && oap->oap_page_off != 0)
+ if (oap->oap_obj_off < starting_offset) {
+ if (starting_page_off != 0)
+ break;
+
+ starting_page_off = oap->oap_page_off;
+ starting_offset = oap->oap_obj_off + starting_page_off;
+ } else if (oap->oap_page_off != 0)
break;
/* in llite being 'ready' equates to the page being locked
* until completion unlocks it. commit_write submits a page
* as not ready because its unlock will happen unconditionally
* as the call returns. if we race with commit_write giving
- * us that page we dont' want to create a hole in the page
+ * us that page we don't want to create a hole in the page
* stream, so we stop and leave the rpc to be fired by
* another dirtier or kupdated interval (the not ready page
* will still be on the dirty list). we could call in
lop_update_pending(cli, lop, cmd, -1);
cfs_list_del_init(&oap->oap_urgent_item);
- if (page_count == 0)
- starting_offset = (oap->oap_obj_off+oap->oap_page_off) &
- (PTLRPC_MAX_BRW_SIZE - 1);
-
/* ask the caller for the size of the io as the rpc leaves. */
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
oap->oap_count =
/* now put the page back in our accounting */
cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
- if (page_count == 0)
+ if (page_count++ == 0)
srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
- if (++page_count >= cli->cl_max_pages_per_rpc)
- break;
+
+ if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
+ mem_tight = 1;
/* End on a PTLRPC_MAX_BRW_SIZE boundary. We want full-sized
* RPCs aligned on PTLRPC_MAX_BRW_SIZE boundaries to help reads
* have the same alignment as the initial writes that allocated
* extents on the server. */
- ending_offset = (oap->oap_obj_off + oap->oap_page_off +
- oap->oap_count) & (PTLRPC_MAX_BRW_SIZE - 1);
- if (ending_offset == 0)
+ ending_offset = oap->oap_obj_off + oap->oap_page_off +
+ oap->oap_count;
+ if (!(ending_offset & (PTLRPC_MAX_BRW_SIZE - 1)))
+ break;
+
+ if (page_count >= cli->cl_max_pages_per_rpc)
break;
/* If there is a gap at the end of this page, it can't merge
RETURN(0);
}
- req = osc_build_req(env, cli, &rpc_list, page_count, cmd);
+ req = osc_build_req(env, cli, &rpc_list, page_count,
+ mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
if (IS_ERR(req)) {
LASSERT(cfs_list_empty(&rpc_list));
loi_list_maint(cli, loi);
aa = ptlrpc_req_async_args(req);
+ starting_offset &= PTLRPC_MAX_BRW_SIZE - 1;
if (cmd == OBD_BRW_READ) {
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
if (rc > 0)
race_counter = 0;
- else
+ else if (rc == 0)
race_counter++;
}
if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
if (rc > 0)
race_counter = 0;
- else
+ else if (rc == 0)
race_counter++;
}
- /* attempt some inter-object balancing by issueing rpcs
+ /* attempt some inter-object balancing by issuing rpcs
* for each object in turn */
if (!cfs_list_empty(&loi->loi_hp_ready_item))
cfs_list_del_init(&loi->loi_hp_ready_item);
struct osc_async_page *oap)
{
struct osc_cache_waiter ocw;
- struct l_wait_info lwi = { 0 };
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
ENTRY;
/* force the caller to try sync io. this can jump the list
* of queued writes and create a discontiguous rpc stream */
- if (cli->cl_dirty_max < CFS_PAGE_SIZE || cli->cl_ar.ar_force_sync ||
- loi->loi_ar.ar_force_sync)
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
+ cli->cl_dirty_max < CFS_PAGE_SIZE ||
+ cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
RETURN(-EDQUOT);
/* Hopefully normal case - cache space and write credits available */
RETURN(0);
}
-struct osc_async_page *oap_from_cookie(void *cookie)
-{
- struct osc_async_page *oap = cookie;
- if (oap->oap_magic != OAP_MAGIC)
- return ERR_PTR(-EINVAL);
- return oap;
-};
-
-int osc_queue_async_io(const struct lu_env *env,
- struct obd_export *exp, struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie,
- int cmd, obd_off off, int count,
- obd_flag brw_flags, enum async_flags async_flags)
+int osc_queue_async_io(const struct lu_env *env, struct obd_export *exp,
+ struct lov_stripe_md *lsm, struct lov_oinfo *loi,
+ struct osc_async_page *oap, int cmd, int off,
+ int count, obd_flag brw_flags, enum async_flags async_flags)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
- struct osc_async_page *oap;
int rc = 0;
ENTRY;
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
+ if (oap->oap_magic != OAP_MAGIC)
+ RETURN(-EINVAL);
if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
RETURN(-EIO);
oap->oap_count = count;
oap->oap_brw_flags = brw_flags;
/* Give a hint to OST that requests are coming from kswapd - bug19529 */
- if (libcfs_memory_pressure_get())
+ if (cfs_memory_pressure_get())
oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags = async_flags;
RETURN(0);
}
-int osc_teardown_async_page(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie)
+int osc_teardown_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
+ struct lov_oinfo *loi, struct osc_async_page *oap)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct loi_oap_pages *lop;
- struct osc_async_page *oap;
int rc = 0;
ENTRY;
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
+ if (oap->oap_magic != OAP_MAGIC)
+ RETURN(-EINVAL);
if (loi == NULL)
loi = lsm->lsm_oinfo[0];
RETURN(rc);
}
-static void osc_set_lock_data_with_check(struct ldlm_lock *lock,
- struct ldlm_enqueue_info *einfo,
- int flags)
+static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
+ struct ldlm_enqueue_info *einfo)
{
void *data = einfo->ei_cbdata;
+ int set = 0;
LASSERT(lock != NULL);
LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
lock_res_and_lock(lock);
cfs_spin_lock(&osc_ast_guard);
- LASSERT(lock->l_ast_data == NULL || lock->l_ast_data == data);
- lock->l_ast_data = data;
+
+ if (lock->l_ast_data == NULL)
+ lock->l_ast_data = data;
+ if (lock->l_ast_data == data)
+ set = 1;
+
cfs_spin_unlock(&osc_ast_guard);
unlock_res_and_lock(lock);
+
+ return set;
}
-static void osc_set_data_with_check(struct lustre_handle *lockh,
- struct ldlm_enqueue_info *einfo,
- int flags)
+static int osc_set_data_with_check(struct lustre_handle *lockh,
+ struct ldlm_enqueue_info *einfo)
{
struct ldlm_lock *lock = ldlm_handle2lock(lockh);
+ int set = 0;
if (lock != NULL) {
- osc_set_lock_data_with_check(lock, einfo, flags);
+ set = osc_set_lock_data_with_check(lock, einfo);
LDLM_LOCK_PUT(lock);
} else
CERROR("lockh %p, data %p - client evicted?\n",
lockh, einfo->ei_cbdata);
+ return set;
}
static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
* osc_enqueue_fini(). */
ldlm_lock_addref(&handle, mode);
+ /* Let CP AST to grant the lock first. */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
+
/* Complete obtaining the lock procedure. */
rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
mode, aa->oa_flags, aa->oa_lvb,
if (mode) {
struct ldlm_lock *matched = ldlm_handle2lock(lockh);
- if (matched->l_ast_data == NULL ||
- matched->l_ast_data == einfo->ei_cbdata) {
+ if (osc_set_lock_data_with_check(matched, einfo)) {
/* addref the lock only if not async requests and PW
* lock is matched whereas we asked for PR. */
if (!rqset && einfo->ei_mode != mode)
ldlm_lock_addref(lockh, LCK_PR);
- osc_set_lock_data_with_check(matched, einfo, *flags);
if (intent) {
/* I would like to be able to ASSERT here that
* rss <= kms, but I can't, for reasons which
RETURN(-ENOMEM);
rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
- if (rc)
+ if (rc) {
+ ptlrpc_request_free(req);
RETURN(rc);
+ }
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
sizeof *lvb);
rc = ldlm_lock_match(obd->obd_namespace, lflags,
res_id, type, policy, rc, lockh, unref);
if (rc) {
- if (data != NULL)
- osc_set_data_with_check(lockh, data, lflags);
+ if (data != NULL) {
+ if (!osc_set_data_with_check(lockh, data)) {
+ if (!(lflags & LDLM_FL_TEST_LOCK))
+ ldlm_lock_decref(lockh, rc);
+ RETURN(0);
+ }
+ }
if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
ldlm_lock_addref(lockh, LCK_PR);
ldlm_lock_decref(lockh, LCK_PW);
* avail < ~0.1% max max = avail + used
* 1025 * avail < avail + used used = blocks - free
* 1024 * avail < used
- * 1024 * avail < blocks - free
- * avail < ((blocks - free) >> 10)
+ * 1024 * avail < blocks - free
+ * avail < ((blocks - free) >> 10)
*
* On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
* lose that amount of space so in those cases we report no space left
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
- OBD_ALLOC_PTR(oa);
+ OBDO_ALLOC(oa);
if (!oa) {
ptlrpc_req_finished(req);
RETURN(-ENOMEM);
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
break;
}
+ case IMP_EVENT_DEACTIVATE: {
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
+ break;
+ }
+ case IMP_EVENT_ACTIVATE: {
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
+ break;
+ }
default:
CERROR("Unknown import event %d\n", event);
LBUG();
RETURN(rc);
}
+/**
+ * Determine whether the lock can be canceled before replaying the lock
+ * during recovery, see bug16774 for detailed information.
+ *
+ * \retval zero the lock can't be canceled
+ * \retval other ok to cancel
+ */
+static int osc_cancel_for_recovery(struct ldlm_lock *lock)
+{
+ check_res_locked(lock->l_resource);
+
+ /*
+ * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
+ *
+ * XXX as a future improvement, we can also cancel unused write lock
+ * if it doesn't have dirty data and active mmaps.
+ */
+ if (lock->l_resource->lr_type == LDLM_EXTENT &&
+ (lock->l_granted_mode == LCK_PR ||
+ lock->l_granted_mode == LCK_CR) &&
+ (osc_dlm_lock_pageref(lock) == 0))
+ RETURN(1);
+
+ RETURN(0);
+}
+
int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
int rc;
CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
cfs_sema_init(&cli->cl_grant_sem, 1);
+
+ ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
}
RETURN(rc);
break;
}
case OBD_CLEANUP_EXPORTS: {
- /* If we set up but never connected, the
- client import will not have been cleaned. */
- if (obd->u.cli.cl_import) {
- struct obd_import *imp;
- cfs_down_write(&obd->u.cli.cl_sem);
- imp = obd->u.cli.cl_import;
- CDEBUG(D_CONFIG, "%s: client import never connected\n",
- obd->obd_name);
- ptlrpc_invalidate_import(imp);
- if (imp->imp_rq_pool) {
- ptlrpc_free_rq_pool(imp->imp_rq_pool);
- imp->imp_rq_pool = NULL;
- }
- class_destroy_import(imp);
- cfs_up_write(&obd->u.cli.cl_sem);
- obd->u.cli.cl_import = NULL;
- }
+ /* LU-464
+ * for echo client, export may be on zombie list, wait for
+ * zombie thread to cull it, because cli.cl_import will be
+ * cleared in client_disconnect_export():
+ * class_export_destroy() -> obd_cleanup() ->
+ * echo_device_free() -> echo_client_cleanup() ->
+ * obd_disconnect() -> osc_disconnect() ->
+ * client_disconnect_export()
+ */
+ obd_zombie_barrier();
+ obd_cleanup_client_import(obd);
rc = obd_llog_finish(obd, 0);
if (rc != 0)
CERROR("failed to cleanup llogging subsystems\n");