* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
}
/**
- * How many pages osc_io_submit() queues before checking whether an RPC is
- * ready.
- */
-#define OSC_QUEUE_GRAIN (32)
-
-/**
* An implementation of cl_io_operations::cio_io_submit() method for osc
* layer. Iterates over pages in the in-queue, prepares each for io by calling
* cl_page_prep() and then either submits them through osc_io_submit_page()
LASSERT(qin->pl_nr > 0);
- CDEBUG(D_INFO, "%i %i\n", qin->pl_nr, crt);
+ CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, crt);
/*
* NOTE: here @page is a top-level page. This is done to avoid
* creation of sub-page-list.
OSC_FLAGS);
/*
* bug 18881: we can't just break out here when
- * error occurrs after cl_page_prep has been
+ * error occurs after cl_page_prep has been
* called against the page. The correct
* way is to call page's completion routine,
* as in osc_oap_interrupted. For simplicity,
*/
result = 0;
}
+
/*
- * Don't keep client_obd_list_lock() for too long.
- *
- * XXX client_obd_list lock has to be unlocked periodically to
- * avoid soft-lockups that tend to happen otherwise (see bug
- * 16651). On the other hand, osc_io_submit_page() queues a
- * page with ASYNC_URGENT flag and so all pages queued up
- * until this point are sent out immediately by
- * osc_io_unplug() resulting in sub-optimal RPCs (sub-optimal
- * RPCs only happen during `warm up' phase when less than
- * cl_max_rpcs_in_flight RPCs are in flight). To balance these
- * conflicting requirements, one might unplug once enough
- * pages to form a large RPC were queued (i.e., use
- * cli->cl_max_pages_per_rpc as OSC_QUEUE_GRAIN, see
- * lop_makes_rpc()), or ignore soft-lockup issue altogether.
+ * We might hold client_obd_list_lock() for too long and cause
+ * soft-lockups (see bug 16651). But on the other hand, pages
+ * are queued here with ASYNC_URGENT flag, thus will be sent
+ * out immediately once osc_io_unplug() be called, possibly
+ * resulting sub-optimal RPCs.
*
- * XXX lock_need_resched() should be used here, but it is not
- * available in the older of supported kernels.
+ * We think creating optimal-sized RPCs is more important than
+ * avoiding the transient soft-lockups, plus I believe the
+ * soft-locks only happen in full debug testing.
*/
- if (queued > OSC_QUEUE_GRAIN || cfs_need_resched()) {
- queued = 0;
- osc_io_unplug(env, osc, cli);
- cfs_cond_resched();
- }
}
LASSERT(ergo(result == 0, cli != NULL));
if (queued > 0)
osc_io_unplug(env, osc, cli);
- CDEBUG(D_INFO, "%i/%i %i\n", qin->pl_nr, qout->pl_nr, result);
+ CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
return qout->pl_nr > 0 ? 0 : result;
}
io = ios->cis_io;
fio = &io->u.ci_fault;
- CDEBUG(D_INFO, "%lu %i %i\n",
+ CDEBUG(D_INFO, "%lu %d %d\n",
fio->ft_index, fio->ft_writable, fio->ft_nob);
/*
* If mapping is writeable, adjust kms to cover this page,
/*
* XXX Linux specific debugging stuff.
*/
- CL_PAGE_DEBUG(D_ERROR, env, page, "%s/%i %lu\n",
+ CL_PAGE_DEBUG(D_ERROR, env, page, "%s/%d %lu\n",
submitter->comm, submitter->pid, start);
libcfs_debug_dumpstack(submitter);
}
memset(oa, 0, sizeof(*oa));
if (result == 0) {
oa->o_id = loi->loi_id;
- oa->o_gr = loi->loi_gr;
+ oa->o_seq = loi->loi_seq;
oa->o_mtime = attr->cat_mtime;
oa->o_atime = attr->cat_atime;
oa->o_ctime = attr->cat_ctime;
/**
* Implementation of struct cl_req_operations::cro_attr_set() for osc
- * layer. osc is responsible for struct obdo::o_id and struct obdo::o_gr
+ * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
* fields.
*/
static void osc_req_attr_set(const struct lu_env *env,
oa->o_valid |= OBD_MD_FLID;
}
if (flags & OBD_MD_FLGROUP) {
- oa->o_gr = oinfo->loi_gr;
+ oa->o_seq = oinfo->loi_seq;
oa->o_valid |= OBD_MD_FLGROUP;
}
if (flags & OBD_MD_FLHANDLE) {