locks under och_sem, because enqueue might want to decide to
cancel open locks for same inode we are holding och_sem for.
+Severity : normal
+Bugzilla : 13843
+Description: Client eviction while running blogbench
+Details : A lot of unlink operations with concurrent I/O can lead to a
+ deadlock causing evictions. To address the problem, the number of
+ oustanding OST_DESTROY requests is now throttled to
+ max_rpcs_in_flight per OSC and LDLM_FL_DISCARD_DATA blocking
+ callbacks are processed in priority.
+
--------------------------------------------------------------------------------
2007-08-10 Cluster File Systems, Inc. <info@clusterfs.com>
struct obd_histogram cl_read_offset_hist;
struct obd_histogram cl_write_offset_hist;
+ /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
+ atomic_t cl_destroy_in_flight;
+ cfs_waitq_t cl_destroy_waitq;
+
struct mdc_rpc_lock *cl_rpc_lock;
struct mdc_rpc_lock *cl_setattr_lock;
struct mdc_rpc_lock *cl_close_lock;
RETURN(count);
}
+static int osc_destroy_interpret(struct ptlrpc_request *req, void *data,
+ int rc)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+
+ atomic_dec(&cli->cl_destroy_in_flight);
+ cfs_waitq_signal(&cli->cl_destroy_waitq);
+ return 0;
+}
+
+static int osc_can_send_destroy(struct client_obd *cli)
+{
+ if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
+ cli->cl_max_rpcs_in_flight) {
+ /* The destroy request can be sent */
+ return 1;
+ }
+ if (atomic_dec_return(&cli->cl_destroy_in_flight) <
+ cli->cl_max_rpcs_in_flight) {
+ /*
+ * The counter has been modified between the two atomic
+ * operations.
+ */
+ cfs_waitq_signal(&cli->cl_destroy_waitq);
+ }
+ return 0;
+}
+
/* Destroy requests can be async always on the client, and we don't even really
* care about the return code since the client cannot do anything at all about
* a destroy failure.
struct ost_body *body;
int size[3] = { sizeof(struct ptlrpc_body), sizeof(*body), 0 };
int count, bufcount = 2;
+ struct client_obd *cli = &exp->exp_obd->u.cli;
ENTRY;
if (!oa) {
RETURN(-ENOMEM);
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
+ req->rq_interpret_reply = osc_destroy_interpret;
body = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
ptlrpc_req_set_repsize(req, 2, size);
+ if (!osc_can_send_destroy(cli)) {
+ struct l_wait_info lwi = { 0 };
+
+ /*
+ * Wait until the number of on-going destroy RPCs drops
+ * under max_rpc_in_flight
+ */
+ l_wait_event_exclusive(cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli), &lwi);
+ }
+
+ /* Do not wait for response */
ptlrpcd_add_req(req);
RETURN(0);
}