- __u64 xid = 0;
-
- ENTRY;
- if (oap->oap_request != NULL) {
- xid = ptlrpc_req_xid(oap->oap_request);
- ptlrpc_req_finished(oap->oap_request);
- oap->oap_request = NULL;
- }
-
- cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags = 0;
- cfs_spin_unlock(&oap->oap_lock);
- oap->oap_interrupted = 0;
-
- if (oap->oap_cmd & OBD_BRW_WRITE) {
- osc_process_ar(&cli->cl_ar, xid, rc);
- osc_process_ar(&oap->oap_loi->loi_ar, xid, rc);
- }
-
- if (rc == 0 && oa != NULL) {
- if (oa->o_valid & OBD_MD_FLBLOCKS)
- oap->oap_loi->loi_lvb.lvb_blocks = oa->o_blocks;
- if (oa->o_valid & OBD_MD_FLMTIME)
- oap->oap_loi->loi_lvb.lvb_mtime = oa->o_mtime;
- if (oa->o_valid & OBD_MD_FLATIME)
- oap->oap_loi->loi_lvb.lvb_atime = oa->o_atime;
- if (oa->o_valid & OBD_MD_FLCTIME)
- oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime;
- }
-
- rc = oap->oap_caller_ops->ap_completion(env, oap->oap_caller_data,
- oap->oap_cmd, oa, rc);
-
- /* ll_ap_completion (from llite) drops PG_locked. so, a new
- * I/O on the page could start, but OSC calls it under lock
- * and thus we can add oap back to pending safely */
- if (rc)
- /* upper layer wants to leave the page on pending queue */
- osc_oap_to_pending(oap);
- else
- osc_exit_cache(cli, oap, sent);
- EXIT;
-}
-
-static int brw_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
-{
- struct osc_brw_async_args *aa = data;
- struct client_obd *cli;
- int async;
- ENTRY;
-
- rc = osc_brw_fini_request(req, rc);
- CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
- if (osc_recoverable_error(rc)) {
- /* Only retry once for mmaped files since the mmaped page
- * might be modified at anytime. We have to retry at least
- * once in case there WAS really a corruption of the page
- * on the network, that was not caused by mmap() modifying
- * the page. Bug11742 */
- if ((rc == -EAGAIN) && (aa->aa_resends > 0) &&
- aa->aa_oa->o_valid & OBD_MD_FLFLAGS &&
- aa->aa_oa->o_flags & OBD_FL_MMAP) {
- rc = 0;
- } else {
- rc = osc_brw_redo_request(req, aa);
- if (rc == 0)
- RETURN(0);
- }
- }
-
- if (aa->aa_ocapa) {
- capa_put(aa->aa_ocapa);
- aa->aa_ocapa = NULL;
- }
-
- cli = aa->aa_cli;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
- * is called so we know whether to go to sync BRWs or wait for more
- * RPCs to complete */
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
- cli->cl_w_in_flight--;
- else
- cli->cl_r_in_flight--;
-
- async = cfs_list_empty(&aa->aa_oaps);
- if (!async) { /* from osc_send_oap_rpc() */
- struct osc_async_page *oap, *tmp;
- /* the caller may re-use the oap after the completion call so
- * we need to clean it up a little */
- cfs_list_for_each_entry_safe(oap, tmp, &aa->aa_oaps,
- oap_rpc_item) {
- cfs_list_del_init(&oap->oap_rpc_item);
- osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc);
- }
- OBDO_FREE(aa->aa_oa);
- } else { /* from async_internal() */
- obd_count i;
- for (i = 0; i < aa->aa_page_count; i++)
- osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
- }
- osc_wake_cache_waiters(cli);
- osc_check_rpcs(env, cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- if (!async)
- cl_req_completion(env, aa->aa_clerq, rc);
- osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
-
- RETURN(rc);
-}
-
-static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
- struct client_obd *cli,
- cfs_list_t *rpc_list,
- int page_count, int cmd)
-{
- struct ptlrpc_request *req;
- struct brw_page **pga = NULL;
- struct osc_brw_async_args *aa;
- struct obdo *oa = NULL;
- const struct obd_async_page_ops *ops = NULL;
- void *caller_data = NULL;
- struct osc_async_page *oap;
- struct osc_async_page *tmp;
- struct ost_body *body;
- struct cl_req *clerq = NULL;
- enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
- struct ldlm_lock *lock = NULL;
- struct cl_req_attr crattr;
- int i, rc, mpflag = 0;
-
- ENTRY;
- LASSERT(!cfs_list_empty(rpc_list));
-
- if (cmd & OBD_BRW_MEMALLOC)
- mpflag = cfs_memory_pressure_get_and_set();
-
- memset(&crattr, 0, sizeof crattr);
- OBD_ALLOC(pga, sizeof(*pga) * page_count);
- if (pga == NULL)
- GOTO(out, req = ERR_PTR(-ENOMEM));
-
- OBDO_ALLOC(oa);
- if (oa == NULL)
- GOTO(out, req = ERR_PTR(-ENOMEM));
-
- i = 0;
- cfs_list_for_each_entry(oap, rpc_list, oap_rpc_item) {
- struct cl_page *page = osc_oap2cl_page(oap);
- if (ops == NULL) {
- ops = oap->oap_caller_ops;
- caller_data = oap->oap_caller_data;
-
- clerq = cl_req_alloc(env, page, crt,
- 1 /* only 1-object rpcs for
- * now */);
- if (IS_ERR(clerq))
- GOTO(out, req = (void *)clerq);
- lock = oap->oap_ldlm_lock;
- }
- pga[i] = &oap->oap_brw_page;
- pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
- CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
- pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag);
- i++;
- cl_req_page_add(env, clerq, page);
- }
-
- /* always get the data for the obdo for the rpc */
- LASSERT(ops != NULL);
- crattr.cra_oa = oa;
- crattr.cra_capa = NULL;
- cl_req_attr_set(env, clerq, &crattr, ~0ULL);
- if (lock) {
- oa->o_handle = lock->l_remote_handle;
- oa->o_valid |= OBD_MD_FLHANDLE;
- }
-
- rc = cl_req_prep(env, clerq);
- if (rc != 0) {
- CERROR("cl_req_prep failed: %d\n", rc);
- GOTO(out, req = ERR_PTR(rc));
- }
-
- sort_brw_pages(pga, page_count);
- rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
- pga, &req, crattr.cra_capa, 1, 0);
- if (rc != 0) {
- CERROR("prep_req failed: %d\n", rc);
- GOTO(out, req = ERR_PTR(rc));
- }
-
- if (cmd & OBD_BRW_MEMALLOC)
- req->rq_memalloc = 1;
-
- /* Need to update the timestamps after the request is built in case
- * we race with setattr (locally or in queue at OST). If OST gets
- * later setattr before earlier BRW (as determined by the request xid),
- * the OST will not use BRW timestamps. Sadly, there is no obvious
- * way to do this in a single call. bug 10150 */
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- cl_req_attr_set(env, clerq, &crattr,
- OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
-
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- CFS_INIT_LIST_HEAD(&aa->aa_oaps);
- cfs_list_splice(rpc_list, &aa->aa_oaps);
- CFS_INIT_LIST_HEAD(rpc_list);
- aa->aa_clerq = clerq;
-out:
- if (cmd & OBD_BRW_MEMALLOC)
- cfs_memory_pressure_restore(mpflag);
-
- capa_put(crattr.cra_capa);
- if (IS_ERR(req)) {
- if (oa)
- OBDO_FREE(oa);
- if (pga)
- OBD_FREE(pga, sizeof(*pga) * page_count);
- /* this should happen rarely and is pretty bad, it makes the
- * pending list not follow the dirty order */
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cfs_list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
- cfs_list_del_init(&oap->oap_rpc_item);
-
- /* queued sync pages can be torn down while the pages
- * were between the pending list and the rpc */
- if (oap->oap_interrupted) {
- CDEBUG(D_INODE, "oap %p interrupted\n", oap);
- osc_ap_completion(env, cli, NULL, oap, 0,
- oap->oap_count);
- continue;
- }
- osc_ap_completion(env, cli, NULL, oap, 0, PTR_ERR(req));
- }
- if (clerq && !IS_ERR(clerq))
- cl_req_completion(env, clerq, PTR_ERR(req));
- }
- RETURN(req);
-}
-
-/**
- * prepare pages for ASYNC io and put pages in send queue.
- *
- * \param cmd OBD_BRW_* macroses
- * \param lop pending pages
- *
- * \return zero if no page added to send queue.
- * \return 1 if pages successfully added to send queue.
- * \return negative on errors.
- */
-static int
-osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
- struct lov_oinfo *loi,
- int cmd, struct loi_oap_pages *lop)
-{
- struct ptlrpc_request *req;
- obd_count page_count = 0;
- struct osc_async_page *oap = NULL, *tmp;
- struct osc_brw_async_args *aa;
- const struct obd_async_page_ops *ops;
- CFS_LIST_HEAD(rpc_list);
- int srvlock = 0, mem_tight = 0;
- struct cl_object *clob = NULL;
- obd_off starting_offset = OBD_OBJECT_EOF;
- unsigned int ending_offset;
- int starting_page_off = 0;
- ENTRY;
-
- /* ASYNC_HP pages first. At present, when the lock the pages is
- * to be canceled, the pages covered by the lock will be sent out
- * with ASYNC_HP. We have to send out them as soon as possible. */
- cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
- if (oap->oap_async_flags & ASYNC_HP)
- cfs_list_move(&oap->oap_pending_item, &lop->lop_pending);
- if (++page_count >= cli->cl_max_pages_per_rpc)
- break;
- }
- page_count = 0;
-
- /* first we find the pages we're allowed to work with */
- cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
- oap_pending_item) {
- ops = oap->oap_caller_ops;
-
- LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
- "magic 0x%x\n", oap, oap->oap_magic);
-
- if (clob == NULL) {
- /* pin object in memory, so that completion call-backs
- * can be safely called under client_obd_list lock. */
- clob = osc_oap2cl_page(oap)->cp_obj;
- cl_object_get(clob);
- }
-
- if (page_count != 0 &&
- srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
- CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
- " oap %p, page %p, srvlock %u\n",
- oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
- break;
- }
-
- /* If there is a gap at the start of this page, it can't merge
- * with any previous page, so we'll hand the network a
- * "fragmented" page array that it can't transfer in 1 RDMA */
- if (oap->oap_obj_off < starting_offset) {
- if (starting_page_off != 0)
- break;
-
- starting_page_off = oap->oap_page_off;
- starting_offset = oap->oap_obj_off + starting_page_off;
- } else if (oap->oap_page_off != 0)
- break;
-
- /* in llite being 'ready' equates to the page being locked
- * until completion unlocks it. commit_write submits a page
- * as not ready because its unlock will happen unconditionally
- * as the call returns. if we race with commit_write giving
- * us that page we don't want to create a hole in the page
- * stream, so we stop and leave the rpc to be fired by
- * another dirtier or kupdated interval (the not ready page
- * will still be on the dirty list). we could call in
- * at the end of ll_file_write to process the queue again. */
- if (!(oap->oap_async_flags & ASYNC_READY)) {
- int rc = ops->ap_make_ready(env, oap->oap_caller_data,
- cmd);
- if (rc < 0)
- CDEBUG(D_INODE, "oap %p page %p returned %d "
- "instead of ready\n", oap,
- oap->oap_page, rc);
- switch (rc) {
- case -EAGAIN:
- /* llite is telling us that the page is still
- * in commit_write and that we should try
- * and put it in an rpc again later. we
- * break out of the loop so we don't create
- * a hole in the sequence of pages in the rpc
- * stream.*/
- oap = NULL;
- break;
- case -EINTR:
- /* the io isn't needed.. tell the checks
- * below to complete the rpc with EINTR */
- cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- cfs_spin_unlock(&oap->oap_lock);
- oap->oap_count = -EINTR;
- break;
- case 0:
- cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_READY;
- cfs_spin_unlock(&oap->oap_lock);
- break;
- default:
- LASSERTF(0, "oap %p page %p returned %d "
- "from make_ready\n", oap,
- oap->oap_page, rc);
- break;
- }
- }
- if (oap == NULL)
- break;
- /*
- * Page submitted for IO has to be locked. Either by
- * ->ap_make_ready() or by higher layers.
- */
-#if defined(__KERNEL__) && defined(__linux__)
- {
- struct cl_page *page;
-
- page = osc_oap2cl_page(oap);
-
- if (page->cp_type == CPT_CACHEABLE &&
- !(PageLocked(oap->oap_page) &&
- (CheckWriteback(oap->oap_page, cmd)))) {
- CDEBUG(D_PAGE, "page %p lost wb %lx/%x\n",
- oap->oap_page,
- (long)oap->oap_page->flags,
- oap->oap_async_flags);
- LBUG();
- }
- }
-#endif
-
- /* take the page out of our book-keeping */
- cfs_list_del_init(&oap->oap_pending_item);
- lop_update_pending(cli, lop, cmd, -1);
- cfs_list_del_init(&oap->oap_urgent_item);
-
- /* ask the caller for the size of the io as the rpc leaves. */
- if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
- oap->oap_count =
- ops->ap_refresh_count(env, oap->oap_caller_data,
- cmd);
- LASSERT(oap->oap_page_off + oap->oap_count <= CFS_PAGE_SIZE);
- }
- if (oap->oap_count <= 0) {
- CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
- oap->oap_count);
- osc_ap_completion(env, cli, NULL,
- oap, 0, oap->oap_count);
- continue;
- }
-
- /* now put the page back in our accounting */
- cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
- if (page_count++ == 0)
- srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
-
- if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
- mem_tight = 1;
-
- /* End on a PTLRPC_MAX_BRW_SIZE boundary. We want full-sized
- * RPCs aligned on PTLRPC_MAX_BRW_SIZE boundaries to help reads
- * have the same alignment as the initial writes that allocated
- * extents on the server. */
- ending_offset = oap->oap_obj_off + oap->oap_page_off +
- oap->oap_count;
- if (!(ending_offset & (PTLRPC_MAX_BRW_SIZE - 1)))
- break;
-
- if (page_count >= cli->cl_max_pages_per_rpc)
- break;
-
- /* If there is a gap at the end of this page, it can't merge
- * with any subsequent pages, so we'll hand the network a
- * "fragmented" page array that it can't transfer in 1 RDMA */
- if (oap->oap_page_off + oap->oap_count < CFS_PAGE_SIZE)
- break;
- }
-
- osc_wake_cache_waiters(cli);
-
- loi_list_maint(cli, loi);
-
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- if (clob != NULL)
- cl_object_put(env, clob);
-
- if (page_count == 0) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
- RETURN(0);
- }
-
- req = osc_build_req(env, cli, &rpc_list, page_count,
- mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
- if (IS_ERR(req)) {
- LASSERT(cfs_list_empty(&rpc_list));
- loi_list_maint(cli, loi);
- RETURN(PTR_ERR(req));
- }
-
- aa = ptlrpc_req_async_args(req);
-
- starting_offset &= PTLRPC_MAX_BRW_SIZE - 1;
- if (cmd == OBD_BRW_READ) {
- lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
- lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
- lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
- (starting_offset >> CFS_PAGE_SHIFT) + 1);
- } else {
- lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
- lprocfs_oh_tally(&cli->cl_write_rpc_hist,
- cli->cl_w_in_flight);
- lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
- (starting_offset >> CFS_PAGE_SHIFT) + 1);
- }
- ptlrpc_lprocfs_brw(req, aa->aa_requested_nob);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- if (cmd == OBD_BRW_READ)
- cli->cl_r_in_flight++;
- else
- cli->cl_w_in_flight++;
-
- /* queued sync pages can be torn down while the pages
- * were between the pending list and the rpc */
- tmp = NULL;
- cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- /* only one oap gets a request reference */
- if (tmp == NULL)
- tmp = oap;
- if (oap->oap_interrupted && !req->rq_intr) {
- CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
- oap, req);
- ptlrpc_mark_interrupted(req);
- }
- }
- if (tmp != NULL)
- tmp->oap_request = ptlrpc_request_addref(req);
-
- DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
- page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
-
- req->rq_interpret_reply = brw_interpret;
- ptlrpcd_add_req(req, PSCOPE_BRW);
- RETURN(1);
-}
-
-#define LOI_DEBUG(LOI, STR, args...) \
- CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
- !cfs_list_empty(&(LOI)->loi_ready_item) || \
- !cfs_list_empty(&(LOI)->loi_hp_ready_item), \
- (LOI)->loi_write_lop.lop_num_pending, \
- !cfs_list_empty(&(LOI)->loi_write_lop.lop_urgent), \
- (LOI)->loi_read_lop.lop_num_pending, \
- !cfs_list_empty(&(LOI)->loi_read_lop.lop_urgent), \
- args) \
-
-/* This is called by osc_check_rpcs() to find which objects have pages that
- * we could be sending. These lists are maintained by lop_makes_rpc(). */
-struct lov_oinfo *osc_next_loi(struct client_obd *cli)
-{
- ENTRY;
-
- /* First return objects that have blocked locks so that they
- * will be flushed quickly and other clients can get the lock,
- * then objects which have pages ready to be stuffed into RPCs */
- if (!cfs_list_empty(&cli->cl_loi_hp_ready_list))
- RETURN(cfs_list_entry(cli->cl_loi_hp_ready_list.next,
- struct lov_oinfo, loi_hp_ready_item));
- if (!cfs_list_empty(&cli->cl_loi_ready_list))
- RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
- struct lov_oinfo, loi_ready_item));
-
- /* then if we have cache waiters, return all objects with queued
- * writes. This is especially important when many small files
- * have filled up the cache and not been fired into rpcs because
- * they don't pass the nr_pending/object threshhold */
- if (!cfs_list_empty(&cli->cl_cache_waiters) &&
- !cfs_list_empty(&cli->cl_loi_write_list))
- RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
- struct lov_oinfo, loi_write_item));
-
- /* then return all queued objects when we have an invalid import
- * so that they get flushed */
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
- if (!cfs_list_empty(&cli->cl_loi_write_list))
- RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
- struct lov_oinfo,
- loi_write_item));
- if (!cfs_list_empty(&cli->cl_loi_read_list))
- RETURN(cfs_list_entry(cli->cl_loi_read_list.next,
- struct lov_oinfo, loi_read_item));
- }
- RETURN(NULL);
-}
-
-static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi)
-{
- struct osc_async_page *oap;
- int hprpc = 0;
-
- if (!cfs_list_empty(&loi->loi_write_lop.lop_urgent)) {
- oap = cfs_list_entry(loi->loi_write_lop.lop_urgent.next,
- struct osc_async_page, oap_urgent_item);
- hprpc = !!(oap->oap_async_flags & ASYNC_HP);
- }
-
- if (!hprpc && !cfs_list_empty(&loi->loi_read_lop.lop_urgent)) {
- oap = cfs_list_entry(loi->loi_read_lop.lop_urgent.next,
- struct osc_async_page, oap_urgent_item);
- hprpc = !!(oap->oap_async_flags & ASYNC_HP);
- }
-
- return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
-}
-
-/* called with the loi list lock held */
-void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
-{
- struct lov_oinfo *loi;
- int rc = 0, race_counter = 0;
- ENTRY;
-
- while ((loi = osc_next_loi(cli)) != NULL) {
- LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
-
- if (osc_max_rpc_in_flight(cli, loi))
- break;
-
- /* attempt some read/write balancing by alternating between
- * reads and writes in an object. The makes_rpc checks here
- * would be redundant if we were getting read/write work items
- * instead of objects. we don't want send_oap_rpc to drain a
- * partial read pending queue when we're given this object to
- * do io on writes while there are cache waiters */
- if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
- rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE,
- &loi->loi_write_lop);
- if (rc < 0) {
- CERROR("Write request failed with %d\n", rc);
-
- /* osc_send_oap_rpc failed, mostly because of
- * memory pressure.
- *
- * It can't break here, because if:
- * - a page was submitted by osc_io_submit, so
- * page locked;
- * - no request in flight
- * - no subsequent request
- * The system will be in live-lock state,
- * because there is no chance to call
- * osc_io_unplug() and osc_check_rpcs() any
- * more. pdflush can't help in this case,
- * because it might be blocked at grabbing
- * the page lock as we mentioned.
- *
- * Anyway, continue to drain pages. */
- /* break; */
- }
-
- if (rc > 0)
- race_counter = 0;
- else if (rc == 0)
- race_counter++;
- }
- if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
- rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ,
- &loi->loi_read_lop);
- if (rc < 0)
- CERROR("Read request failed with %d\n", rc);
-
- if (rc > 0)
- race_counter = 0;
- else if (rc == 0)
- race_counter++;
- }
-
- /* attempt some inter-object balancing by issuing rpcs
- * for each object in turn */
- if (!cfs_list_empty(&loi->loi_hp_ready_item))
- cfs_list_del_init(&loi->loi_hp_ready_item);
- if (!cfs_list_empty(&loi->loi_ready_item))
- cfs_list_del_init(&loi->loi_ready_item);
- if (!cfs_list_empty(&loi->loi_write_item))
- cfs_list_del_init(&loi->loi_write_item);
- if (!cfs_list_empty(&loi->loi_read_item))
- cfs_list_del_init(&loi->loi_read_item);
-
- loi_list_maint(cli, loi);
-
- /* send_oap_rpc fails with 0 when make_ready tells it to
- * back off. llite's make_ready does this when it tries
- * to lock a page queued for write that is already locked.
- * we want to try sending rpcs from many objects, but we
- * don't want to spin failing with 0. */
- if (race_counter == 10)
- break;
- }
- EXIT;
-}
-
-/* we're trying to queue a page in the osc so we're subject to the
- * 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
- * If the osc's queued pages are already at that limit, then we want to sleep
- * until there is space in the osc's queue for us. We also may be waiting for
- * write credits from the OST if there are RPCs in flight that may return some
- * before we fall back to sync writes.
- *
- * We need this know our allocation was granted in the presence of signals */
-static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
-{
- int rc;
- ENTRY;
- client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = cfs_list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- RETURN(rc);
-};
-
-/**
- * Non-blocking version of osc_enter_cache() that consumes grant only when it
- * is available.
- */
-int osc_enter_cache_try(const struct lu_env *env,
- struct client_obd *cli, struct lov_oinfo *loi,
- struct osc_async_page *oap, int transient)
-{
- int has_grant;
-
- has_grant = cli->cl_avail_grant >= CFS_PAGE_SIZE;
- if (has_grant) {
- osc_consume_write_grant(cli, &oap->oap_brw_page);
- if (transient) {
- cli->cl_dirty_transit += CFS_PAGE_SIZE;
- cfs_atomic_inc(&obd_dirty_transit_pages);
- oap->oap_brw_flags |= OBD_BRW_NOCACHE;
- }
- }
- return has_grant;
-}
-
-/* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
- * grant or cache space. */
-static int osc_enter_cache(const struct lu_env *env,
- struct client_obd *cli, struct lov_oinfo *loi,
- struct osc_async_page *oap)
-{
- struct osc_cache_waiter ocw;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
- ENTRY;
-
- CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
- "grant: %lu\n", cli->cl_dirty, cfs_atomic_read(&obd_dirty_pages),
- cli->cl_dirty_max, obd_max_dirty_pages,
- cli->cl_lost_grant, cli->cl_avail_grant);
-
- /* force the caller to try sync io. this can jump the list
- * of queued writes and create a discontiguous rpc stream */
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
- cli->cl_dirty_max < CFS_PAGE_SIZE ||
- cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
- RETURN(-EDQUOT);
-
- /* Hopefully normal case - cache space and write credits available */
- if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
- cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
- osc_enter_cache_try(env, cli, loi, oap, 0))
- RETURN(0);
-
- /* It is safe to block as a cache waiter as long as there is grant
- * space available or the hope of additional grant being returned
- * when an in flight write completes. Using the write back cache
- * if possible is preferable to sending the data synchronously
- * because write pages can then be merged in to large requests.
- * The addition of this cache waiter will causing pending write
- * pages to be sent immediately. */
- if (cli->cl_w_in_flight || cli->cl_avail_grant >= CFS_PAGE_SIZE) {
- cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
- cfs_waitq_init(&ocw.ocw_waitq);
- ocw.ocw_oap = oap;
- ocw.ocw_rc = 0;
-
- loi_list_maint(cli, loi);
- osc_check_rpcs(env, cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- CDEBUG(D_CACHE, "sleeping for cache space\n");
- l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (!cfs_list_empty(&ocw.ocw_entry)) {
- cfs_list_del(&ocw.ocw_entry);
- RETURN(-EINTR);
- }
- RETURN(ocw.ocw_rc);
- }
-
- RETURN(-EDQUOT);
-}
-
-
-int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, cfs_page_t *page,
- obd_off offset, const struct obd_async_page_ops *ops,
- void *data, void **res, int nocache,
- struct lustre_handle *lockh)
-{
- struct osc_async_page *oap;
-
- ENTRY;
-
- if (!page)
- return cfs_size_round(sizeof(*oap));
-
- oap = *res;
- oap->oap_magic = OAP_MAGIC;
- oap->oap_cli = &exp->exp_obd->u.cli;
- oap->oap_loi = loi;
-
- oap->oap_caller_ops = ops;
- oap->oap_caller_data = data;
-
- oap->oap_page = page;
- oap->oap_obj_off = offset;
- if (!client_is_remote(exp) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE))
- oap->oap_brw_flags = OBD_BRW_NOQUOTA;
-
- LASSERT(!(offset & ~CFS_PAGE_MASK));
-
- CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
- CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
- CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
- CFS_INIT_LIST_HEAD(&oap->oap_page_list);
-
- cfs_spin_lock_init(&oap->oap_lock);
- CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
- RETURN(0);
-}
-
-int osc_queue_async_io(const struct lu_env *env, struct obd_export *exp,
- struct lov_stripe_md *lsm, struct lov_oinfo *loi,
- struct osc_async_page *oap, int cmd, int off,
- int count, obd_flag brw_flags, enum async_flags async_flags)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int rc = 0;
- ENTRY;
-
- if (oap->oap_magic != OAP_MAGIC)
- RETURN(-EINVAL);
-
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
- RETURN(-EIO);
-
- if (!cfs_list_empty(&oap->oap_pending_item) ||
- !cfs_list_empty(&oap->oap_urgent_item) ||
- !cfs_list_empty(&oap->oap_rpc_item))
- RETURN(-EBUSY);
-
- /* check if the file's owner/group is over quota */
- if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)) {
- struct cl_object *obj;
- struct cl_attr attr; /* XXX put attr into thread info */
- unsigned int qid[MAXQUOTAS];
-
- obj = cl_object_top(osc_oap2cl_page(oap)->cp_obj);
-
- cl_object_attr_lock(obj);
- rc = cl_object_attr_get(env, obj, &attr);
- cl_object_attr_unlock(obj);
-
- qid[USRQUOTA] = attr.cat_uid;
- qid[GRPQUOTA] = attr.cat_gid;
- if (rc == 0 &&
- lquota_chkdq(quota_interface, cli, qid) == NO_QUOTA)
- rc = -EDQUOT;
- if (rc)
- RETURN(rc);
- }
-
- if (loi == NULL)
- loi = lsm->lsm_oinfo[0];
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- LASSERT(off + count <= CFS_PAGE_SIZE);
- oap->oap_cmd = cmd;
- oap->oap_page_off = off;
- oap->oap_count = count;
- oap->oap_brw_flags = brw_flags;
- /* Give a hint to OST that requests are coming from kswapd - bug19529 */
- if (cfs_memory_pressure_get())
- oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
- cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags = async_flags;
- cfs_spin_unlock(&oap->oap_lock);
-
- if (cmd & OBD_BRW_WRITE) {
- rc = osc_enter_cache(env, cli, loi, oap);
- if (rc) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- RETURN(rc);
- }
- }
-
- osc_oap_to_pending(oap);
- loi_list_maint(cli, loi);
-
- LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
- cmd);
-
- osc_check_rpcs(env, cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- RETURN(0);
-}
-
-/* aka (~was & now & flag), but this is more clear :) */
-#define SETTING(was, now, flag) (!(was & flag) && (now & flag))
-
-int osc_set_async_flags_base(struct client_obd *cli,
- struct lov_oinfo *loi, struct osc_async_page *oap,
- obd_flag async_flags)
-{
- struct loi_oap_pages *lop;
- int flags = 0;
- ENTRY;
-
- LASSERT(!cfs_list_empty(&oap->oap_pending_item));
-
- if (oap->oap_cmd & OBD_BRW_WRITE) {
- lop = &loi->loi_write_lop;
- } else {
- lop = &loi->loi_read_lop;
- }
-
- if ((oap->oap_async_flags & async_flags) == async_flags)
- RETURN(0);
-
- if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
- flags |= ASYNC_READY;
-
- if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
- cfs_list_empty(&oap->oap_rpc_item)) {
- if (oap->oap_async_flags & ASYNC_HP)
- cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
- else
- cfs_list_add_tail(&oap->oap_urgent_item,
- &lop->lop_urgent);
- flags |= ASYNC_URGENT;
- loi_list_maint(cli, loi);
- }
- cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= flags;
- cfs_spin_unlock(&oap->oap_lock);
-
- LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
- oap->oap_async_flags);
- RETURN(0);
-}
-
-int osc_teardown_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, struct osc_async_page *oap)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct loi_oap_pages *lop;
- int rc = 0;
- ENTRY;
-
- if (oap->oap_magic != OAP_MAGIC)
- RETURN(-EINVAL);
-
- if (loi == NULL)
- loi = lsm->lsm_oinfo[0];
-
- if (oap->oap_cmd & OBD_BRW_WRITE) {
- lop = &loi->loi_write_lop;
- } else {
- lop = &loi->loi_read_lop;
- }
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- if (!cfs_list_empty(&oap->oap_rpc_item))
- GOTO(out, rc = -EBUSY);
-
- osc_exit_cache(cli, oap, 0);
- osc_wake_cache_waiters(cli);
-
- if (!cfs_list_empty(&oap->oap_urgent_item)) {
- cfs_list_del_init(&oap->oap_urgent_item);
- cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
- cfs_spin_unlock(&oap->oap_lock);
- }
- if (!cfs_list_empty(&oap->oap_pending_item)) {
- cfs_list_del_init(&oap->oap_pending_item);
- lop_update_pending(cli, lop, oap->oap_cmd, -1);
- }
- loi_list_maint(cli, loi);
- LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
-out:
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- RETURN(rc);
-}
-
-static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
- struct ldlm_enqueue_info *einfo)
-{
- void *data = einfo->ei_cbdata;
- int set = 0;
-
- LASSERT(lock != NULL);
- LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
- LASSERT(lock->l_resource->lr_type == einfo->ei_type);
- LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
- LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
-
- lock_res_and_lock(lock);
- cfs_spin_lock(&osc_ast_guard);
-
- if (lock->l_ast_data == NULL)
- lock->l_ast_data = data;
- if (lock->l_ast_data == data)
- set = 1;
-
- cfs_spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(lock);
-
- return set;
-}
-
-static int osc_set_data_with_check(struct lustre_handle *lockh,
- struct ldlm_enqueue_info *einfo)
-{
- struct ldlm_lock *lock = ldlm_handle2lock(lockh);
- int set = 0;