X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fosc%2Fosc_request.c;h=a01df94bab5b88d863fa805b09b38e8d16868524;hb=655d5e708e01a77368459a1dd9c34fab5b6838b1;hp=22bf7d9d219294e2b7d02d2ce40a8314e7424d4a;hpb=f95393b0d0a59cf3dc2f29cffc35dcc4cc9d7728;p=fs%2Flustre-release.git diff --git a/lustre/osc/osc_request.c b/lustre/osc/osc_request.c index 22bf7d9..a01df94 100644 --- a/lustre/osc/osc_request.c +++ b/lustre/osc/osc_request.c @@ -864,6 +864,17 @@ static unsigned long rpcs_in_flight(struct client_obd *cli) return cli->cl_r_in_flight + cli->cl_w_in_flight; } +int osc_wake_sync_fs(struct client_obd *cli) +{ + ENTRY; + if (cfs_list_empty(&cli->cl_loi_sync_fs_list) && + cli->cl_sf_wait.started) { + cli->cl_sf_wait.sfw_upcall(cli->cl_sf_wait.sfw_oi, 0); + cli->cl_sf_wait.started = 0; + } + RETURN(0); +} + /* caller must hold loi_list_lock */ void osc_wake_cache_waiters(struct client_obd *cli) { @@ -943,7 +954,7 @@ static int osc_shrink_grant_interpret(const struct lu_env *env, LASSERT(body); osc_update_grant(cli, body); out: - OBD_FREE_PTR(oa); + OBDO_FREE(oa); return rc; } @@ -953,6 +964,10 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) oa->o_grant = cli->cl_avail_grant / 4; cli->cl_avail_grant -= oa->o_grant; client_obd_list_unlock(&cli->cl_loi_list_lock); + if (!(oa->o_valid & OBD_MD_FLFLAGS)) { + oa->o_valid |= OBD_MD_FLFLAGS; + oa->o_flags = 0; + } oa->o_flags |= OBD_FL_SHRINK_GRANT; osc_update_next_shrink(cli); } @@ -1003,6 +1018,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, long target) body->oa.o_grant = cli->cl_avail_grant - target; cli->cl_avail_grant = target; client_obd_list_unlock(&cli->cl_loi_list_lock); + if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) { + body->oa.o_valid |= OBD_MD_FLFLAGS; + body->oa.o_flags = 0; + } body->oa.o_flags |= OBD_FL_SHRINK_GRANT; osc_update_next_shrink(cli); @@ -1020,6 +1039,11 @@ static int osc_should_shrink_grant(struct client_obd *client) { cfs_time_t time = cfs_time_current(); cfs_time_t next_shrink = client->cl_next_shrink_grant; + + if ((client->cl_import->imp_connect_data.ocd_connect_flags & + OBD_CONNECT_GRANT_SHRINK) == 0) + return 0; + if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) { if (client->cl_import->imp_state == LUSTRE_IMP_FULL && client->cl_avail_grant > GRANT_SHRINK_LIMIT) @@ -1082,11 +1106,21 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) cli->cl_avail_grant = ocd->ocd_grant; else cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty; + + if (cli->cl_avail_grant < 0) { + CWARN("%s: available grant < 0, the OSS is probably not running" + " with patch from bug20278 (%ld) \n", + cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant); + /* workaround for 1.6 servers which do not have + * the patch from bug20278 */ + cli->cl_avail_grant = ocd->ocd_grant; + } + client_obd_list_unlock(&cli->cl_loi_list_lock); - CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld \n", + CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n", + cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant, cli->cl_lost_grant); - LASSERT(cli->cl_avail_grant >= 0); if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK && cfs_list_empty(&cli->cl_grant_shrink_list)) @@ -1172,7 +1206,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) { if (p1->flag != p2->flag) { unsigned mask = ~(OBD_BRW_FROM_GRANT| - OBD_BRW_NOCACHE|OBD_BRW_SYNC); + OBD_BRW_NOCACHE|OBD_BRW_SYNC|OBD_BRW_ASYNC); /* warn if we try to combine flags that we don't know to be * safe to combine */ @@ -1225,7 +1259,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, struct lov_stripe_md *lsm, obd_count page_count, struct brw_page **pga, struct ptlrpc_request **reqp, - struct obd_capa *ocapa, int reserve) + struct obd_capa *ocapa, int reserve, + int resend) { struct ptlrpc_request *req; struct ptlrpc_bulk_desc *desc; @@ -1341,6 +1376,14 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount)); osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0); + if (resend) { + if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) { + body->oa.o_valid |= OBD_MD_FLFLAGS; + body->oa.o_flags = 0; + } + body->oa.o_flags |= OBD_FL_RECOV_RESEND; + } + if (osc_should_shrink_grant(cli)) osc_shrink_grant_local(cli, &body->oa); @@ -1422,6 +1465,10 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, return 0; } + /* If this is mmaped file - it can be changed at any time */ + if (oa->o_valid & OBD_MD_FLFLAGS && oa->o_flags & OBD_FL_MMAP) + return 1; + if (oa->o_valid & OBD_MD_FLFLAGS) cksum_type = cksum_type_unpack(oa->o_flags); else @@ -1495,14 +1542,14 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) } #endif + osc_update_grant(cli, body); + if (rc < 0) RETURN(rc); if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM) client_cksum = aa->aa_oa->o_cksum; /* save for later */ - osc_update_grant(cli, body); - if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) { if (rc > 0) { CERROR("Unexpected +ve rc %d\n", rc); @@ -1639,7 +1686,7 @@ static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa, restart_bulk: rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm, - page_count, pga, &req, ocapa, 0); + page_count, pga, &req, ocapa, 0, resends); if (rc != 0) return (rc); @@ -1681,7 +1728,7 @@ int osc_brw_redo_request(struct ptlrpc_request *request, ENTRY; if (!osc_should_resend(aa->aa_resends, aa->aa_cli)) { - CERROR("too many resend retries, returning error\n"); + CERROR("too many resent retries, returning error\n"); RETURN(-EIO); } @@ -1692,7 +1739,7 @@ int osc_brw_redo_request(struct ptlrpc_request *request, aa->aa_cli, aa->aa_oa, NULL /* lsm unused by osc currently */, aa->aa_page_count, aa->aa_ppga, - &new_req, aa->aa_ocapa, 0); + &new_req, aa->aa_ocapa, 0, 1); if (rc) RETURN(rc); @@ -1905,6 +1952,24 @@ static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap, osc_release_write_grant(cli, &oap->oap_brw_page, sent); } +static int lop_makes_syncfs_rpc(struct loi_oap_pages *lop) +{ + struct osc_async_page *oap; + ENTRY; + + if (cfs_list_empty(&lop->lop_urgent)) + RETURN(0); + + oap = cfs_list_entry(lop->lop_urgent.next, + struct osc_async_page, oap_urgent_item); + + if (oap->oap_async_flags & ASYNC_SYNCFS) { + CDEBUG(D_CACHE, "syncfs request forcing RPC\n"); + RETURN(1); + } + + RETURN(0); +} /* This maintains the lists of pending pages to read/write for a given object * (lop). This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint() @@ -1938,7 +2003,7 @@ static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop, if (cmd & OBD_BRW_WRITE) { /* trigger a write rpc stream as long as there are dirtiers * waiting for space. as they're waiting, they're not going to - * create more pages to coallesce with what's waiting.. */ + * create more pages to coalesce with what's waiting.. */ if (!cfs_list_empty(&cli->cl_cache_waiters)) { CDEBUG(D_CACHE, "cache waiters forcing RPC\n"); RETURN(1); @@ -1993,10 +2058,19 @@ void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi) on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0); on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1); } else { - on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0); - on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, - lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)|| - lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)); + if (lop_makes_syncfs_rpc(&loi->loi_write_lop)) { + on_list(&loi->loi_sync_fs_item, + &cli->cl_loi_sync_fs_list, + loi->loi_write_lop.lop_num_pending); + } else { + on_list(&loi->loi_hp_ready_item, + &cli->cl_loi_hp_ready_list, 0); + on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, + lop_makes_rpc(cli, &loi->loi_write_lop, + OBD_BRW_WRITE)|| + lop_makes_rpc(cli, &loi->loi_read_lop, + OBD_BRW_READ)); + } } on_list(&loi->loi_write_item, &cli->cl_loi_write_list, @@ -2084,6 +2158,34 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, ar->ar_force_sync = 0; } +static int osc_add_to_lop_urgent(struct loi_oap_pages *lop, + struct osc_async_page *oap, + obd_flag async_flags) +{ + + /* If true, then already present in lop urgent */ + if (!cfs_list_empty(&oap->oap_urgent_item)) { + CWARN("Request to add duplicate oap_urgent for flag = %d\n", + oap->oap_async_flags); + return 1; + } + + /* item from sync_fs, to avoid duplicates check the existing flags */ + if (async_flags & ASYNC_SYNCFS) { + cfs_list_add_tail(&oap->oap_urgent_item, + &lop->lop_urgent); + return 0; + } + + if (oap->oap_async_flags & ASYNC_HP) + cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent); + else if (oap->oap_async_flags & ASYNC_URGENT || + async_flags & ASYNC_URGENT) + cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent); + + return 0; +} + void osc_oap_to_pending(struct osc_async_page *oap) { struct loi_oap_pages *lop; @@ -2093,10 +2195,7 @@ void osc_oap_to_pending(struct osc_async_page *oap) else lop = &oap->oap_loi->loi_read_lop; - if (oap->oap_async_flags & ASYNC_HP) - cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent); - else if (oap->oap_async_flags & ASYNC_URGENT) - cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent); + osc_add_to_lop_urgent(lop, oap, 0); cfs_list_add_tail(&oap->oap_pending_item, &lop->lop_pending); lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1); } @@ -2162,9 +2261,20 @@ static int brw_interpret(const struct lu_env *env, rc = osc_brw_fini_request(req, rc); CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc); if (osc_recoverable_error(rc)) { - rc = osc_brw_redo_request(req, aa); - if (rc == 0) - RETURN(0); + /* Only retry once for mmaped files since the mmaped page + * might be modified at anytime. We have to retry at least + * once in case there WAS really a corruption of the page + * on the network, that was not caused by mmap() modifying + * the page. Bug11742 */ + if ((rc == -EAGAIN) && (aa->aa_resends > 0) && + aa->aa_oa->o_valid & OBD_MD_FLFLAGS && + aa->aa_oa->o_flags & OBD_FL_MMAP) { + rc = 0; + } else { + rc = osc_brw_redo_request(req, aa); + if (rc == 0) + RETURN(0); + } } if (aa->aa_ocapa) { @@ -2196,19 +2306,18 @@ static int brw_interpret(const struct lu_env *env, } OBDO_FREE(aa->aa_oa); } else { /* from async_internal() */ - int i; + obd_count i; for (i = 0; i < aa->aa_page_count; i++) osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1); - - if (aa->aa_oa->o_flags & OBD_FL_TEMPORARY) - OBDO_FREE(aa->aa_oa); } osc_wake_cache_waiters(cli); + osc_wake_sync_fs(cli); osc_check_rpcs(env, cli); client_obd_list_unlock(&cli->cl_loi_list_lock); if (!async) cl_req_completion(env, aa->aa_clerq, rc); osc_release_ppga(aa->aa_ppga, aa->aa_page_count); + RETURN(rc); } @@ -2230,11 +2339,14 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env, enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ; struct ldlm_lock *lock = NULL; struct cl_req_attr crattr; - int i, rc; + int i, rc, mpflag = 0; ENTRY; LASSERT(!cfs_list_empty(rpc_list)); + if (cmd & OBD_BRW_MEMALLOC) + mpflag = cfs_memory_pressure_get_and_set(); + memset(&crattr, 0, sizeof crattr); OBD_ALLOC(pga, sizeof(*pga) * page_count); if (pga == NULL) @@ -2284,12 +2396,15 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env, sort_brw_pages(pga, page_count); rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count, - pga, &req, crattr.cra_capa, 1); + pga, &req, crattr.cra_capa, 1, 0); if (rc != 0) { CERROR("prep_req failed: %d\n", rc); GOTO(out, req = ERR_PTR(rc)); } + if (cmd & OBD_BRW_MEMALLOC) + req->rq_memalloc = 1; + /* Need to update the timestamps after the request is built in case * we race with setattr (locally or in queue at OST). If OST gets * later setattr before earlier BRW (as determined by the request xid), @@ -2306,6 +2421,9 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env, CFS_INIT_LIST_HEAD(rpc_list); aa->aa_clerq = clerq; out: + if (cmd & OBD_BRW_MEMALLOC) + cfs_memory_pressure_restore(mpflag); + capa_put(crattr.cra_capa); if (IS_ERR(req)) { if (oa) @@ -2340,8 +2458,9 @@ out: * \param cmd OBD_BRW_* macroses * \param lop pending pages * - * \return zero if pages successfully add to send queue. - * \return not zere if error occurring. + * \return zero if no page added to send queue. + * \return 1 if pages successfully added to send queue. + * \return negative on errors. */ static int osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, @@ -2357,7 +2476,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, CFS_LIST_HEAD(tmp_list); unsigned int ending_offset; unsigned starting_offset = 0; - int srvlock = 0; + int srvlock = 0, mem_tight = 0; struct cl_object *clob = NULL; ENTRY; @@ -2409,7 +2528,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, * until completion unlocks it. commit_write submits a page * as not ready because its unlock will happen unconditionally * as the call returns. if we race with commit_write giving - * us that page we dont' want to create a hole in the page + * us that page we don't want to create a hole in the page * stream, so we stop and leave the rpc to be fired by * another dirtier or kupdated interval (the not ready page * will still be on the dirty list). we could call in @@ -2501,6 +2620,8 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, /* now put the page back in our accounting */ cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list); + if (oap->oap_brw_flags & OBD_BRW_MEMALLOC) + mem_tight = 1; if (page_count == 0) srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK); if (++page_count >= cli->cl_max_pages_per_rpc) @@ -2523,7 +2644,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, } osc_wake_cache_waiters(cli); - + osc_wake_sync_fs(cli); loi_list_maint(cli, loi); client_obd_list_unlock(&cli->cl_loi_list_lock); @@ -2536,7 +2657,8 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, RETURN(0); } - req = osc_build_req(env, cli, &rpc_list, page_count, cmd); + req = osc_build_req(env, cli, &rpc_list, page_count, + mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd); if (IS_ERR(req)) { LASSERT(cfs_list_empty(&rpc_list)); loi_list_maint(cli, loi); @@ -2615,6 +2737,9 @@ struct lov_oinfo *osc_next_loi(struct client_obd *cli) if (!cfs_list_empty(&cli->cl_loi_ready_list)) RETURN(cfs_list_entry(cli->cl_loi_ready_list.next, struct lov_oinfo, loi_ready_item)); + if (!cfs_list_empty(&cli->cl_loi_sync_fs_list)) + RETURN(cfs_list_entry(cli->cl_loi_sync_fs_list.next, + struct lov_oinfo, loi_sync_fs_item)); /* then if we have cache waiters, return all objects with queued * writes. This is especially important when many small files @@ -2720,7 +2845,7 @@ void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) race_counter++; } - /* attempt some inter-object balancing by issueing rpcs + /* attempt some inter-object balancing by issuing rpcs * for each object in turn */ if (!cfs_list_empty(&loi->loi_hp_ready_item)) cfs_list_del_init(&loi->loi_hp_ready_item); @@ -2730,6 +2855,8 @@ void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) cfs_list_del_init(&loi->loi_write_item); if (!cfs_list_empty(&loi->loi_read_item)) cfs_list_del_init(&loi->loi_read_item); + if (!cfs_list_empty(&loi->loi_sync_fs_item)) + cfs_list_del_init(&loi->loi_sync_fs_item); loi_list_maint(cli, loi); @@ -2946,7 +3073,7 @@ int osc_queue_async_io(const struct lu_env *env, oap->oap_count = count; oap->oap_brw_flags = brw_flags; /* Give a hint to OST that requests are coming from kswapd - bug19529 */ - if (libcfs_memory_pressure_get()) + if (cfs_memory_pressure_get()) oap->oap_brw_flags |= OBD_BRW_MEMALLOC; cfs_spin_lock(&oap->oap_lock); oap->oap_async_flags = async_flags; @@ -2994,16 +3121,27 @@ int osc_set_async_flags_base(struct client_obd *cli, if ((oap->oap_async_flags & async_flags) == async_flags) RETURN(0); + /* XXX: This introduces a tiny insignificant race for the case if this + * loi already had other urgent items. + */ + if (SETTING(oap->oap_async_flags, async_flags, ASYNC_SYNCFS) && + cfs_list_empty(&oap->oap_rpc_item) && + cfs_list_empty(&oap->oap_urgent_item)) { + osc_add_to_lop_urgent(lop, oap, ASYNC_SYNCFS); + flags |= ASYNC_SYNCFS; + cfs_spin_lock(&oap->oap_lock); + oap->oap_async_flags |= flags; + cfs_spin_unlock(&oap->oap_lock); + loi_list_maint(cli, loi); + RETURN(0); + } + if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY)) flags |= ASYNC_READY; if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) && cfs_list_empty(&oap->oap_rpc_item)) { - if (oap->oap_async_flags & ASYNC_HP) - cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent); - else - cfs_list_add_tail(&oap->oap_urgent_item, - &lop->lop_urgent); + osc_add_to_lop_urgent(lop, oap, ASYNC_URGENT); flags |= ASYNC_URGENT; loi_list_maint(cli, loi); } @@ -3050,7 +3188,8 @@ int osc_teardown_async_page(struct obd_export *exp, if (!cfs_list_empty(&oap->oap_urgent_item)) { cfs_list_del_init(&oap->oap_urgent_item); cfs_spin_lock(&oap->oap_lock); - oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP); + oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP | + ASYNC_SYNCFS); cfs_spin_unlock(&oap->oap_lock); } if (!cfs_list_empty(&oap->oap_pending_item)) { @@ -3183,6 +3322,9 @@ static int osc_enqueue_interpret(const struct lu_env *env, * osc_enqueue_fini(). */ ldlm_lock_addref(&handle, mode); + /* Let CP AST to grant the lock first. */ + OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1); + /* Complete obtaining the lock procedure. */ rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1, mode, aa->oa_flags, aa->oa_lvb, @@ -3341,8 +3483,10 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, RETURN(-ENOMEM); rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0); - if (rc) + if (rc) { + ptlrpc_request_free(req); RETURN(rc); + } req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, sizeof *lvb); @@ -4042,7 +4186,7 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); - OBD_ALLOC_PTR(oa); + OBDO_ALLOC(oa); if (!oa) { ptlrpc_req_finished(req); RETURN(-ENOMEM); @@ -4316,6 +4460,32 @@ static int osc_import_event(struct obd_device *obd, RETURN(rc); } +/** + * Determine whether the lock can be canceled before replaying the lock + * during recovery, see bug16774 for detailed information. + * + * \retval zero the lock can't be canceled + * \retval other ok to cancel + */ +static int osc_cancel_for_recovery(struct ldlm_lock *lock) +{ + check_res_locked(lock->l_resource); + + /* + * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR. + * + * XXX as a future improvement, we can also cancel unused write lock + * if it doesn't have dirty data and active mmaps. + */ + if (lock->l_resource->lr_type == LDLM_EXTENT && + (lock->l_granted_mode == LCK_PR || + lock->l_granted_mode == LCK_CR) && + (osc_dlm_lock_pageref(lock) == 0)) + RETURN(1); + + RETURN(0); +} + int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) { int rc; @@ -4354,6 +4524,8 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list); cfs_sema_init(&cli->cl_grant_sem, 1); + + ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery); } RETURN(rc); @@ -4439,6 +4611,46 @@ int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg) return(rc); } +static int osc_sync_fs(struct obd_export *exp, struct obd_info *oinfo, + int wait) +{ + struct obd_device *obd = class_exp2obd(exp); + struct client_obd *cli; + struct lov_oinfo *loi; + struct lov_oinfo *tloi; + struct osc_async_page *oap; + struct osc_async_page *toap; + struct loi_oap_pages *lop; + struct lu_env *env; + int refcheck; + int rc = 0; + ENTRY; + + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); + + cli = &obd->u.cli; + client_obd_list_lock(&cli->cl_loi_list_lock); + cli->cl_sf_wait.sfw_oi = oinfo; + cli->cl_sf_wait.sfw_upcall = oinfo->oi_cb_up; + cli->cl_sf_wait.started = 1; + /* creating cl_loi_sync_fs list */ + cfs_list_for_each_entry_safe(loi, tloi, &cli->cl_loi_write_list, + loi_write_item) { + lop = &loi->loi_write_lop; + cfs_list_for_each_entry_safe(oap, toap, &lop->lop_pending, + oap_pending_item) + osc_set_async_flags_base(cli, loi, oap, ASYNC_SYNCFS); + } + osc_check_rpcs(env, cli); + osc_wake_sync_fs(cli); + client_obd_list_unlock(&cli->cl_loi_list_lock); + cl_env_put(env, &refcheck); + + RETURN(rc); +} + static int osc_process_config(struct obd_device *obd, obd_count len, void *buf) { return osc_process_config_base(obd, buf); @@ -4481,6 +4693,7 @@ struct obd_ops osc_obd_ops = { .o_llog_init = osc_llog_init, .o_llog_finish = osc_llog_finish, .o_process_config = osc_process_config, + .o_sync_fs = osc_sync_fs, }; extern struct lu_kmem_descr osc_caches[];