oinfo->oi_cb_up, oinfo, rqset);
}
-static int osc_sync(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md *md, obd_size start, obd_size end,
- void *capa)
+static int osc_sync_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *arg, int rc)
+{
+ struct osc_async_args *aa = arg;
+ struct ost_body *body;
+ ENTRY;
+
+ if (rc)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL) {
+ CERROR ("can't unpack ost_body\n");
+ GOTO(out, rc = -EPROTO);
+ }
+
+ *aa->aa_oi->oi_oa = body->oa;
+out:
+ rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
+ RETURN(rc);
+}
+
+static int osc_sync(struct obd_export *exp, struct obd_info *oinfo,
+ obd_size start, obd_size end,
+ struct ptlrpc_request_set *set)
{
struct ptlrpc_request *req;
struct ost_body *body;
+ struct osc_async_args *aa;
int rc;
ENTRY;
- if (!oa) {
+ if (!oinfo->oi_oa) {
CDEBUG(D_INFO, "oa NULL\n");
RETURN(-EINVAL);
}
if (req == NULL)
RETURN(-ENOMEM);
- osc_set_capa_size(req, &RMF_CAPA1, capa);
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
if (rc) {
ptlrpc_request_free(req);
/* overload the size and blocks fields in the oa with start/end */
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
- lustre_set_wire_obdo(&body->oa, oa);
+ lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
body->oa.o_size = start;
body->oa.o_blocks = end;
body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
- osc_pack_capa(req, body, capa);
+ osc_pack_capa(req, body, oinfo->oi_capa);
ptlrpc_request_set_replen(req);
+ req->rq_interpret_reply = osc_sync_interpret;
- rc = ptlrpc_queue_wait(req);
- if (rc)
- GOTO(out, rc);
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL)
- GOTO(out, rc = -EPROTO);
-
- lustre_get_wire_obdo(oa, &body->oa);
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->aa_oi = oinfo;
- EXIT;
- out:
- ptlrpc_req_finished(req);
- return rc;
+ ptlrpc_set_add_req(set, req);
+ RETURN (0);
}
/* Find and cancel locally locks matched by @mode in the resource found by
LASSERT(body);
osc_update_grant(cli, body);
out:
- OBD_FREE_PTR(oa);
+ OBDO_FREE(oa);
return rc;
}
oa->o_grant = cli->cl_avail_grant / 4;
cli->cl_avail_grant -= oa->o_grant;
client_obd_list_unlock(&cli->cl_loi_list_lock);
+ if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
+ oa->o_valid |= OBD_MD_FLFLAGS;
+ oa->o_flags = 0;
+ }
oa->o_flags |= OBD_FL_SHRINK_GRANT;
osc_update_next_shrink(cli);
}
body->oa.o_grant = cli->cl_avail_grant - target;
cli->cl_avail_grant = target;
client_obd_list_unlock(&cli->cl_loi_list_lock);
+ if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
+ body->oa.o_valid |= OBD_MD_FLFLAGS;
+ body->oa.o_flags = 0;
+ }
body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
osc_update_next_shrink(cli);
cli->cl_avail_grant = ocd->ocd_grant;
else
cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
+
+ if (cli->cl_avail_grant < 0) {
+ CWARN("%s: available grant < 0, the OSS is probably not running"
+ " with patch from bug20278 (%ld) \n",
+ cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant);
+ /* workaround for 1.6 servers which do not have
+ * the patch from bug20278 */
+ cli->cl_avail_grant = ocd->ocd_grant;
+ }
+
client_obd_list_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
+ CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
+ cli->cl_import->imp_obd->obd_name,
cli->cl_avail_grant, cli->cl_lost_grant);
- LASSERT(cli->cl_avail_grant >= 0);
if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
cfs_list_empty(&cli->cl_grant_shrink_list))
{
if (p1->flag != p2->flag) {
unsigned mask = ~(OBD_BRW_FROM_GRANT|
- OBD_BRW_NOCACHE|OBD_BRW_SYNC);
+ OBD_BRW_NOCACHE|OBD_BRW_SYNC|OBD_BRW_ASYNC);
/* warn if we try to combine flags that we don't know to be
* safe to combine */
&RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
- if (resend)
- body->oa.o_valid |= OBD_FL_RECOV_RESEND;
+ if (resend) {
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
+ body->oa.o_valid |= OBD_MD_FLFLAGS;
+ body->oa.o_flags = 0;
+ }
+ body->oa.o_flags |= OBD_FL_RECOV_RESEND;
+ }
if (osc_should_shrink_grant(cli))
osc_shrink_grant_local(cli, &body->oa);
return 0;
}
+ /* If this is mmaped file - it can be changed at any time */
+ if (oa->o_valid & OBD_MD_FLFLAGS && oa->o_flags & OBD_FL_MMAP)
+ return 1;
+
if (oa->o_valid & OBD_MD_FLFLAGS)
cksum_type = cksum_type_unpack(oa->o_flags);
else
ptlrpc_req_finished(req);
if (osc_recoverable_error(rc)) {
resends++;
- if (!osc_should_resend(resends, &exp->exp_obd->u.cli)) {
+ if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
CERROR("too many resend retries, returning error\n");
RETURN(-EIO);
}
int rc = 0;
ENTRY;
- if (!osc_should_resend(aa->aa_resends, aa->aa_cli)) {
+ if (!client_should_resend(aa->aa_resends, aa->aa_cli)) {
CERROR("too many resent retries, returning error\n");
RETURN(-EIO);
}
if (cmd & OBD_BRW_WRITE) {
/* trigger a write rpc stream as long as there are dirtiers
* waiting for space. as they're waiting, they're not going to
- * create more pages to coallesce with what's waiting.. */
+ * create more pages to coalesce with what's waiting.. */
if (!cfs_list_empty(&cli->cl_cache_waiters)) {
CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
RETURN(1);
rc = osc_brw_fini_request(req, rc);
CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
if (osc_recoverable_error(rc)) {
- rc = osc_brw_redo_request(req, aa);
- if (rc == 0)
- RETURN(0);
+ /* Only retry once for mmaped files since the mmaped page
+ * might be modified at anytime. We have to retry at least
+ * once in case there WAS really a corruption of the page
+ * on the network, that was not caused by mmap() modifying
+ * the page. Bug11742 */
+ if ((rc == -EAGAIN) && (aa->aa_resends > 0) &&
+ aa->aa_oa->o_valid & OBD_MD_FLFLAGS &&
+ aa->aa_oa->o_flags & OBD_FL_MMAP) {
+ rc = 0;
+ } else {
+ rc = osc_brw_redo_request(req, aa);
+ if (rc == 0)
+ RETURN(0);
+ }
}
if (aa->aa_ocapa) {
}
OBDO_FREE(aa->aa_oa);
} else { /* from async_internal() */
- int i;
+ obd_count i;
for (i = 0; i < aa->aa_page_count; i++)
osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
-
- if (aa->aa_oa->o_flags & OBD_FL_TEMPORARY)
- OBDO_FREE(aa->aa_oa);
}
osc_wake_cache_waiters(cli);
osc_check_rpcs(env, cli);
if (!async)
cl_req_completion(env, aa->aa_clerq, rc);
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
+
RETURN(rc);
}
enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
struct ldlm_lock *lock = NULL;
struct cl_req_attr crattr;
- int i, rc;
+ int i, rc, mpflag = 0;
ENTRY;
LASSERT(!cfs_list_empty(rpc_list));
+ if (cmd & OBD_BRW_MEMALLOC)
+ mpflag = cfs_memory_pressure_get_and_set();
+
memset(&crattr, 0, sizeof crattr);
OBD_ALLOC(pga, sizeof(*pga) * page_count);
if (pga == NULL)
GOTO(out, req = ERR_PTR(rc));
}
+ if (cmd & OBD_BRW_MEMALLOC)
+ req->rq_memalloc = 1;
+
/* Need to update the timestamps after the request is built in case
* we race with setattr (locally or in queue at OST). If OST gets
* later setattr before earlier BRW (as determined by the request xid),
CFS_INIT_LIST_HEAD(rpc_list);
aa->aa_clerq = clerq;
out:
+ if (cmd & OBD_BRW_MEMALLOC)
+ cfs_memory_pressure_restore(mpflag);
+
capa_put(crattr.cra_capa);
if (IS_ERR(req)) {
if (oa)
* \param cmd OBD_BRW_* macroses
* \param lop pending pages
*
- * \return zero if pages successfully add to send queue.
- * \return not zere if error occurring.
+ * \return zero if no page added to send queue.
+ * \return 1 if pages successfully added to send queue.
+ * \return negative on errors.
*/
static int
osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
CFS_LIST_HEAD(tmp_list);
unsigned int ending_offset;
unsigned starting_offset = 0;
- int srvlock = 0;
+ int srvlock = 0, mem_tight = 0;
struct cl_object *clob = NULL;
ENTRY;
* until completion unlocks it. commit_write submits a page
* as not ready because its unlock will happen unconditionally
* as the call returns. if we race with commit_write giving
- * us that page we dont' want to create a hole in the page
+ * us that page we don't want to create a hole in the page
* stream, so we stop and leave the rpc to be fired by
* another dirtier or kupdated interval (the not ready page
* will still be on the dirty list). we could call in
/* now put the page back in our accounting */
cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
+ if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
+ mem_tight = 1;
if (page_count == 0)
srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
if (++page_count >= cli->cl_max_pages_per_rpc)
RETURN(0);
}
- req = osc_build_req(env, cli, &rpc_list, page_count, cmd);
+ req = osc_build_req(env, cli, &rpc_list, page_count,
+ mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
if (IS_ERR(req)) {
LASSERT(cfs_list_empty(&rpc_list));
loi_list_maint(cli, loi);
race_counter++;
}
- /* attempt some inter-object balancing by issueing rpcs
+ /* attempt some inter-object balancing by issuing rpcs
* for each object in turn */
if (!cfs_list_empty(&loi->loi_hp_ready_item))
cfs_list_del_init(&loi->loi_hp_ready_item);
struct osc_async_page *oap)
{
struct osc_cache_waiter ocw;
- struct l_wait_info lwi = { 0 };
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
ENTRY;
RETURN(0);
}
-struct osc_async_page *oap_from_cookie(void *cookie)
-{
- struct osc_async_page *oap = cookie;
- if (oap->oap_magic != OAP_MAGIC)
- return ERR_PTR(-EINVAL);
- return oap;
-};
-
-int osc_queue_async_io(const struct lu_env *env,
- struct obd_export *exp, struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie,
- int cmd, obd_off off, int count,
- obd_flag brw_flags, enum async_flags async_flags)
+int osc_queue_async_io(const struct lu_env *env, struct obd_export *exp,
+ struct lov_stripe_md *lsm, struct lov_oinfo *loi,
+ struct osc_async_page *oap, int cmd, obd_off off,
+ int count, obd_flag brw_flags,
+ enum async_flags async_flags)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
- struct osc_async_page *oap;
int rc = 0;
ENTRY;
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
+ if (oap->oap_magic != OAP_MAGIC)
+ RETURN(-EINVAL);
if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
RETURN(-EIO);
oap->oap_count = count;
oap->oap_brw_flags = brw_flags;
/* Give a hint to OST that requests are coming from kswapd - bug19529 */
- if (libcfs_memory_pressure_get())
+ if (cfs_memory_pressure_get())
oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
cfs_spin_lock(&oap->oap_lock);
oap->oap_async_flags = async_flags;
RETURN(0);
}
-int osc_teardown_async_page(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie)
+int osc_teardown_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
+ struct lov_oinfo *loi, struct osc_async_page *oap)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct loi_oap_pages *lop;
- struct osc_async_page *oap;
int rc = 0;
ENTRY;
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
+ if (oap->oap_magic != OAP_MAGIC)
+ RETURN(-EINVAL);
if (loi == NULL)
loi = lsm->lsm_oinfo[0];
RETURN(rc);
}
-static void osc_set_lock_data_with_check(struct ldlm_lock *lock,
- struct ldlm_enqueue_info *einfo,
- int flags)
+static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
+ struct ldlm_enqueue_info *einfo)
{
void *data = einfo->ei_cbdata;
+ int set = 0;
LASSERT(lock != NULL);
LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
lock_res_and_lock(lock);
cfs_spin_lock(&osc_ast_guard);
- LASSERT(lock->l_ast_data == NULL || lock->l_ast_data == data);
- lock->l_ast_data = data;
+
+ if (lock->l_ast_data == NULL)
+ lock->l_ast_data = data;
+ if (lock->l_ast_data == data)
+ set = 1;
+
cfs_spin_unlock(&osc_ast_guard);
unlock_res_and_lock(lock);
+
+ return set;
}
-static void osc_set_data_with_check(struct lustre_handle *lockh,
- struct ldlm_enqueue_info *einfo,
- int flags)
+static int osc_set_data_with_check(struct lustre_handle *lockh,
+ struct ldlm_enqueue_info *einfo)
{
struct ldlm_lock *lock = ldlm_handle2lock(lockh);
+ int set = 0;
if (lock != NULL) {
- osc_set_lock_data_with_check(lock, einfo, flags);
+ set = osc_set_lock_data_with_check(lock, einfo);
LDLM_LOCK_PUT(lock);
} else
CERROR("lockh %p, data %p - client evicted?\n",
lockh, einfo->ei_cbdata);
+ return set;
}
static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
* osc_enqueue_fini(). */
ldlm_lock_addref(&handle, mode);
+ /* Let CP AST to grant the lock first. */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
+
/* Complete obtaining the lock procedure. */
rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
mode, aa->oa_flags, aa->oa_lvb,
if (mode) {
struct ldlm_lock *matched = ldlm_handle2lock(lockh);
- if (matched->l_ast_data == NULL ||
- matched->l_ast_data == einfo->ei_cbdata) {
+ if (osc_set_lock_data_with_check(matched, einfo)) {
/* addref the lock only if not async requests and PW
* lock is matched whereas we asked for PR. */
if (!rqset && einfo->ei_mode != mode)
ldlm_lock_addref(lockh, LCK_PR);
- osc_set_lock_data_with_check(matched, einfo, *flags);
if (intent) {
/* I would like to be able to ASSERT here that
* rss <= kms, but I can't, for reasons which
RETURN(-ENOMEM);
rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
- if (rc)
+ if (rc) {
+ ptlrpc_request_free(req);
RETURN(rc);
+ }
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
sizeof *lvb);
rc = ldlm_lock_match(obd->obd_namespace, lflags,
res_id, type, policy, rc, lockh, unref);
if (rc) {
- if (data != NULL)
- osc_set_data_with_check(lockh, data, lflags);
+ if (data != NULL) {
+ if (!osc_set_data_with_check(lockh, data)) {
+ if (!(lflags & LDLM_FL_TEST_LOCK))
+ ldlm_lock_decref(lockh, rc);
+ RETURN(0);
+ }
+ }
if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
ldlm_lock_addref(lockh, LCK_PR);
ldlm_lock_decref(lockh, LCK_PW);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
- OBD_ALLOC_PTR(oa);
+ OBDO_ALLOC(oa);
if (!oa) {
ptlrpc_req_finished(req);
RETURN(-ENOMEM);
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
break;
}
+ case IMP_EVENT_DEACTIVATE: {
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
+ break;
+ }
+ case IMP_EVENT_ACTIVATE: {
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
+ break;
+ }
default:
CERROR("Unknown import event %d\n", event);
LBUG();
RETURN(rc);
}
+/**
+ * Determine whether the lock can be canceled before replaying the lock
+ * during recovery, see bug16774 for detailed information.
+ *
+ * \retval zero the lock can't be canceled
+ * \retval other ok to cancel
+ */
+static int osc_cancel_for_recovery(struct ldlm_lock *lock)
+{
+ check_res_locked(lock->l_resource);
+
+ /*
+ * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
+ *
+ * XXX as a future improvement, we can also cancel unused write lock
+ * if it doesn't have dirty data and active mmaps.
+ */
+ if (lock->l_resource->lr_type == LDLM_EXTENT &&
+ (lock->l_granted_mode == LCK_PR ||
+ lock->l_granted_mode == LCK_CR) &&
+ (osc_dlm_lock_pageref(lock) == 0))
+ RETURN(1);
+
+ RETURN(0);
+}
+
int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
int rc;
CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
cfs_sema_init(&cli->cl_grant_sem, 1);
+
+ ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
}
RETURN(rc);
break;
}
case OBD_CLEANUP_EXPORTS: {
+ /* LU-464
+ * for echo client, export may be on zombie list, wait for
+ * zombie thread to cull it, because cli.cl_import will be
+ * cleared in client_disconnect_export():
+ * class_export_destroy() -> obd_cleanup() ->
+ * echo_device_free() -> echo_client_cleanup() ->
+ * obd_disconnect() -> osc_disconnect() ->
+ * client_disconnect_export()
+ */
+ obd_zombie_barrier();
/* If we set up but never connected, the
client import will not have been cleaned. */
if (obd->u.cli.cl_import) {