#define DEBUG_SUBSYSTEM S_OSC
+#include <linux/workqueue.h>
#include <lprocfs_status.h>
#include <lustre_debug.h>
#include <lustre_dlm.h>
static unsigned int osc_reqpool_mem_max = 5;
module_param(osc_reqpool_mem_max, uint, 0444);
+static int osc_idle_timeout = 20;
+module_param(osc_idle_timeout, uint, 0644);
+
#define osc_grant_args osc_brw_async_args
struct osc_setattr_args {
}
}
+/**
+ * grant thread data for shrinking space.
+ */
+struct grant_thread_data {
+ struct list_head gtd_clients;
+ struct mutex gtd_mutex;
+ unsigned long gtd_stopped:1;
+};
+static struct grant_thread_data client_gtd;
+
static int osc_shrink_grant_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
void *aa, int rc)
{
time64_t next_shrink = client->cl_next_shrink_grant;
+ if (client->cl_import == NULL)
+ return 0;
+
if ((client->cl_import->imp_connect_data.ocd_connect_flags &
OBD_CONNECT_GRANT_SHRINK) == 0)
return 0;
return 0;
}
-static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
+#define GRANT_SHRINK_RPC_BATCH 100
+
+static struct delayed_work work;
+
+static void osc_grant_work_handler(struct work_struct *data)
{
- struct client_obd *client;
+ struct client_obd *cli;
+ int rpc_sent;
+ bool init_next_shrink = true;
+ time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
+
+ rpc_sent = 0;
+ mutex_lock(&client_gtd.gtd_mutex);
+ list_for_each_entry(cli, &client_gtd.gtd_clients,
+ cl_grant_chain) {
+ if (++rpc_sent < GRANT_SHRINK_RPC_BATCH &&
+ osc_should_shrink_grant(cli))
+ osc_shrink_grant(cli);
- list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
- if (osc_should_shrink_grant(client))
- osc_shrink_grant(client);
+ if (!init_next_shrink) {
+ if (cli->cl_next_shrink_grant < next_shrink &&
+ cli->cl_next_shrink_grant > ktime_get_seconds())
+ next_shrink = cli->cl_next_shrink_grant;
+ } else {
+ init_next_shrink = false;
+ next_shrink = cli->cl_next_shrink_grant;
+ }
}
- return 0;
+ mutex_unlock(&client_gtd.gtd_mutex);
+
+ if (client_gtd.gtd_stopped == 1)
+ return;
+
+ if (next_shrink > ktime_get_seconds())
+ schedule_delayed_work(&work, msecs_to_jiffies(
+ (next_shrink - ktime_get_seconds()) *
+ MSEC_PER_SEC));
+ else
+ schedule_work(&work.work);
}
-static int osc_add_shrink_grant(struct client_obd *client)
+/**
+ * Start grant thread for returing grant to server for idle clients.
+ */
+static int osc_start_grant_work(void)
{
- int rc;
+ client_gtd.gtd_stopped = 0;
+ mutex_init(&client_gtd.gtd_mutex);
+ INIT_LIST_HEAD(&client_gtd.gtd_clients);
+
+ INIT_DELAYED_WORK(&work, osc_grant_work_handler);
+ schedule_work(&work.work);
- rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
- TIMEOUT_GRANT,
- osc_grant_shrink_grant_cb, NULL,
- &client->cl_grant_shrink_list);
- if (rc) {
- CERROR("add grant client %s error %d\n", cli_name(client), rc);
- return rc;
- }
- CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client));
- osc_update_next_shrink(client);
return 0;
}
-static int osc_del_shrink_grant(struct client_obd *client)
+static void osc_stop_grant_work(void)
+{
+ client_gtd.gtd_stopped = 1;
+ cancel_delayed_work_sync(&work);
+}
+
+static void osc_add_grant_list(struct client_obd *client)
{
- return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
- TIMEOUT_GRANT);
+ mutex_lock(&client_gtd.gtd_mutex);
+ list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
+ mutex_unlock(&client_gtd.gtd_mutex);
+}
+
+static void osc_del_grant_list(struct client_obd *client)
+{
+ if (list_empty(&client->cl_grant_chain))
+ return;
+
+ mutex_lock(&client_gtd.gtd_mutex);
+ list_del_init(&client->cl_grant_chain);
+ mutex_unlock(&client_gtd.gtd_mutex);
}
void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
cli->cl_max_extent_pages);
- if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
- list_empty(&cli->cl_grant_shrink_list))
- osc_add_shrink_grant(cli);
+ if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
+ osc_add_grant_list(cli);
}
EXPORT_SYMBOL(osc_init_grant);
* safe to combine */
if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
- "report this at https://jira.hpdd.intel.com/\n",
+ "report this at https://jira.whamcloud.com/\n",
p1->flag, p2->flag);
}
return 0;
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
struct osc_async_args *aa;
- int rc;
+ int rc;
ENTRY;
/* We could possibly pass max_age in the request (as an absolute
ptlrpc_request_free(req);
RETURN(rc);
}
- ptlrpc_request_set_replen(req);
- req->rq_request_portal = OST_CREATE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
+ ptlrpc_request_set_replen(req);
+ req->rq_request_portal = OST_CREATE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
- if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
- /* procfs requests not want stat in wait for avoid deadlock */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
+ if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
+ /* procfs requests not want stat in wait for avoid deadlock */
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ }
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
struct obd_statfs *osfs, time64_t max_age, __u32 flags)
{
- struct obd_device *obd = class_exp2obd(exp);
- struct obd_statfs *msfs;
- struct ptlrpc_request *req;
- struct obd_import *imp = NULL;
- int rc;
- ENTRY;
+ struct obd_device *obd = class_exp2obd(exp);
+ struct obd_statfs *msfs;
+ struct ptlrpc_request *req;
+ struct obd_import *imp = NULL;
+ int rc;
+ ENTRY;
+
/*Since the request might also come from lprocfs, so we need
*sync this with client_disconnect_export Bug15684*/
if (!imp)
RETURN(-ENODEV);
- /* We could possibly pass max_age in the request (as an absolute
- * timestamp or a "seconds.usec ago") so the target can avoid doing
- * extra calls into the filesystem if that isn't necessary (e.g.
- * during mount that would help a bit). Having relative timestamps
- * is not so great if request processing is slow, while absolute
- * timestamps are not ideal because they need time synchronization. */
- req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
+ /* We could possibly pass max_age in the request (as an absolute
+ * timestamp or a "seconds.usec ago") so the target can avoid doing
+ * extra calls into the filesystem if that isn't necessary (e.g.
+ * during mount that would help a bit). Having relative timestamps
+ * is not so great if request processing is slow, while absolute
+ * timestamps are not ideal because they need time synchronization. */
+ req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
- class_import_put(imp);
+ class_import_put(imp);
- if (req == NULL)
- RETURN(-ENOMEM);
+ if (req == NULL)
+ RETURN(-ENOMEM);
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
- if (rc) {
- ptlrpc_request_free(req);
- RETURN(rc);
- }
- ptlrpc_request_set_replen(req);
- req->rq_request_portal = OST_CREATE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+ ptlrpc_request_set_replen(req);
+ req->rq_request_portal = OST_CREATE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
- if (flags & OBD_STATFS_NODELAY) {
- /* procfs requests not want stat in wait for avoid deadlock */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
+ if (flags & OBD_STATFS_NODELAY) {
+ /* procfs requests not want stat in wait for avoid deadlock */
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ }
- rc = ptlrpc_queue_wait(req);
- if (rc)
- GOTO(out, rc);
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
- msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (msfs == NULL) {
- GOTO(out, rc = -EPROTO);
- }
+ msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
+ if (msfs == NULL)
+ GOTO(out, rc = -EPROTO);
- *osfs = *msfs;
+ *osfs = *msfs;
- EXIT;
- out:
- ptlrpc_req_finished(req);
- return rc;
+ EXIT;
+out:
+ ptlrpc_req_finished(req);
+ return rc;
}
static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
struct obd_device *obd = class_exp2obd(exp);
int rc;
- rc = client_disconnect_export(exp);
- /**
- * Initially we put del_shrink_grant before disconnect_export, but it
- * causes the following problem if setup (connect) and cleanup
- * (disconnect) are tangled together.
- * connect p1 disconnect p2
- * ptlrpc_connect_import
- * ............... class_manual_cleanup
- * osc_disconnect
- * del_shrink_grant
- * ptlrpc_connect_interrupt
- * init_grant_shrink
- * add this client to shrink list
- * cleanup_osc
- * Bang! pinger trigger the shrink.
- * So the osc should be disconnected from the shrink list, after we
- * are sure the import has been destroyed. BUG18662
- */
- if (obd->u.cli.cl_import == NULL)
- osc_del_shrink_grant(&obd->u.cli);
- return rc;
+ rc = client_disconnect_export(exp);
+ /**
+ * Initially we put del_shrink_grant before disconnect_export, but it
+ * causes the following problem if setup (connect) and cleanup
+ * (disconnect) are tangled together.
+ * connect p1 disconnect p2
+ * ptlrpc_connect_import
+ * ............... class_manual_cleanup
+ * osc_disconnect
+ * del_shrink_grant
+ * ptlrpc_connect_interrupt
+ * osc_init_grant
+ * add this client to shrink list
+ * cleanup_osc
+ * Bang! grant shrink thread trigger the shrink. BUG18662
+ */
+ osc_del_grant_list(&obd->u.cli);
+ return rc;
}
EXPORT_SYMBOL(osc_disconnect);
GOTO(out_ptlrpcd_work, rc);
cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
+ osc_update_next_shrink(cli);
- INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
RETURN(rc);
out_ptlrpcd_work:
atomic_add(added, &osc_pool_req_count);
}
- INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
spin_lock(&osc_shrink_lock);
list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
spin_unlock(&osc_shrink_lock);
+ cli->cl_import->imp_idle_timeout = osc_idle_timeout;
RETURN(0);
}
osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
ptlrpc_add_rqs_to_pool);
- if (osc_rq_pool != NULL)
- GOTO(out, rc);
- rc = -ENOMEM;
+ if (osc_rq_pool == NULL)
+ GOTO(out_type, rc = -ENOMEM);
+
+ rc = osc_start_grant_work();
+ if (rc != 0)
+ GOTO(out_req_pool, rc);
+
+ RETURN(rc);
+
+out_req_pool:
+ ptlrpc_free_rq_pool(osc_rq_pool);
out_type:
class_unregister_type(LUSTRE_OSC_NAME);
out_kmem:
lu_kmem_fini(osc_caches);
-out:
+
RETURN(rc);
}
static void __exit osc_exit(void)
{
+ osc_stop_grant_work();
remove_shrinker(osc_cache_shrinker);
class_unregister_type(LUSTRE_OSC_NAME);
lu_kmem_fini(osc_caches);