#define DEBUG_SUBSYSTEM S_OSC
+#include <linux/workqueue.h>
#include <lprocfs_status.h>
#include <lustre_debug.h>
#include <lustre_dlm.h>
}
}
+/**
+ * grant thread data for shrinking space.
+ */
+struct grant_thread_data {
+ struct list_head gtd_clients;
+ struct mutex gtd_mutex;
+ unsigned long gtd_stopped:1;
+};
+static struct grant_thread_data client_gtd;
+
static int osc_shrink_grant_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *aa, int rc)
+ struct ptlrpc_request *req,
+ void *aa, int rc)
{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
- struct ost_body *body;
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
+ struct ost_body *body;
- if (rc != 0) {
- __osc_update_grant(cli, oa->o_grant);
- GOTO(out, rc);
- }
+ if (rc != 0) {
+ __osc_update_grant(cli, oa->o_grant);
+ GOTO(out, rc);
+ }
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- osc_update_grant(cli, body);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ osc_update_grant(cli, body);
out:
- OBDO_FREE(oa);
- return rc;
+ OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
+ return rc;
}
static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
osc_announce_cached(cli, &body->oa, 0);
spin_lock(&cli->cl_loi_list_lock);
+ if (target_bytes >= cli->cl_avail_grant) {
+ /* available grant has changed since target calculation */
+ spin_unlock(&cli->cl_loi_list_lock);
+ GOTO(out_free, rc = 0);
+ }
body->oa.o_grant = cli->cl_avail_grant - target_bytes;
cli->cl_avail_grant = target_bytes;
spin_unlock(&cli->cl_loi_list_lock);
sizeof(*body), body, NULL);
if (rc != 0)
__osc_update_grant(cli, body->oa.o_grant);
+out_free:
OBD_FREE_PTR(body);
RETURN(rc);
}
{
time64_t next_shrink = client->cl_next_shrink_grant;
+ if (client->cl_import == NULL)
+ return 0;
+
if ((client->cl_import->imp_connect_data.ocd_connect_flags &
OBD_CONNECT_GRANT_SHRINK) == 0)
return 0;
return 0;
}
-static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
+#define GRANT_SHRINK_RPC_BATCH 100
+
+static struct delayed_work work;
+
+static void osc_grant_work_handler(struct work_struct *data)
{
- struct client_obd *client;
+ struct client_obd *cli;
+ int rpc_sent;
+ bool init_next_shrink = true;
+ time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
- list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
- if (osc_should_shrink_grant(client))
- osc_shrink_grant(client);
+ rpc_sent = 0;
+ mutex_lock(&client_gtd.gtd_mutex);
+ list_for_each_entry(cli, &client_gtd.gtd_clients,
+ cl_grant_chain) {
+ if (++rpc_sent < GRANT_SHRINK_RPC_BATCH &&
+ osc_should_shrink_grant(cli))
+ osc_shrink_grant(cli);
+
+ if (!init_next_shrink) {
+ if (cli->cl_next_shrink_grant < next_shrink &&
+ cli->cl_next_shrink_grant > ktime_get_seconds())
+ next_shrink = cli->cl_next_shrink_grant;
+ } else {
+ init_next_shrink = false;
+ next_shrink = cli->cl_next_shrink_grant;
+ }
}
- return 0;
+ mutex_unlock(&client_gtd.gtd_mutex);
+
+ if (client_gtd.gtd_stopped == 1)
+ return;
+
+ if (next_shrink > ktime_get_seconds())
+ schedule_delayed_work(&work, msecs_to_jiffies(
+ (next_shrink - ktime_get_seconds()) *
+ MSEC_PER_SEC));
+ else
+ schedule_work(&work.work);
}
-static int osc_add_shrink_grant(struct client_obd *client)
+/**
+ * Start grant thread for returing grant to server for idle clients.
+ */
+static int osc_start_grant_work(void)
{
- int rc;
+ client_gtd.gtd_stopped = 0;
+ mutex_init(&client_gtd.gtd_mutex);
+ INIT_LIST_HEAD(&client_gtd.gtd_clients);
+
+ INIT_DELAYED_WORK(&work, osc_grant_work_handler);
+ schedule_work(&work.work);
- rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
- TIMEOUT_GRANT,
- osc_grant_shrink_grant_cb, NULL,
- &client->cl_grant_shrink_list);
- if (rc) {
- CERROR("add grant client %s error %d\n", cli_name(client), rc);
- return rc;
- }
- CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client));
- osc_update_next_shrink(client);
return 0;
}
-static int osc_del_shrink_grant(struct client_obd *client)
+static void osc_stop_grant_work(void)
+{
+ client_gtd.gtd_stopped = 1;
+ cancel_delayed_work_sync(&work);
+}
+
+static void osc_add_grant_list(struct client_obd *client)
+{
+ mutex_lock(&client_gtd.gtd_mutex);
+ list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
+ mutex_unlock(&client_gtd.gtd_mutex);
+}
+
+static void osc_del_grant_list(struct client_obd *client)
{
- return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
- TIMEOUT_GRANT);
+ if (list_empty(&client->cl_grant_chain))
+ return;
+
+ mutex_lock(&client_gtd.gtd_mutex);
+ list_del_init(&client->cl_grant_chain);
+ mutex_unlock(&client_gtd.gtd_mutex);
}
void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
cli->cl_max_extent_pages);
- if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
- list_empty(&cli->cl_grant_shrink_list))
- osc_add_shrink_grant(cli);
+ if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
+ osc_add_grant_list(cli);
}
EXPORT_SYMBOL(osc_init_grant);
* safe to combine */
if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
- "report this at https://jira.hpdd.intel.com/\n",
+ "report this at https://jira.whamcloud.com/\n",
p1->flag, p2->flag);
}
return 0;
int sector_size,
u32 *check_sum)
{
- struct cfs_crypto_hash_desc *hdesc;
+ struct ahash_request *req;
/* Used Adler as the default checksum type on top of DIF tags */
unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
struct page *__page;
if (__page == NULL)
return -ENOMEM;
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
- rc = PTR_ERR(hdesc);
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
+ rc = PTR_ERR(req);
CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
obd_name, cfs_crypto_hash_name(cfs_alg), rc);
GOTO(out, rc);
used_number += used;
if (used_number == guard_number) {
- cfs_crypto_hash_update_page(hdesc, __page, 0,
+ cfs_crypto_hash_update_page(req, __page, 0,
used_number * sizeof(*guard_start));
used_number = 0;
}
GOTO(out, rc);
if (used_number != 0)
- cfs_crypto_hash_update_page(hdesc, __page, 0,
+ cfs_crypto_hash_update_page(req, __page, 0,
used_number * sizeof(*guard_start));
bufsize = sizeof(cksum);
- cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+ cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo */
u32 *cksum)
{
int i = 0;
- struct cfs_crypto_hash_desc *hdesc;
+ struct ahash_request *req;
unsigned int bufsize;
unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
LASSERT(pg_count > 0);
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
CERROR("Unable to initialize checksum hash %s\n",
cfs_crypto_hash_name(cfs_alg));
- return PTR_ERR(hdesc);
+ return PTR_ERR(req);
}
while (nob > 0 && pg_count > 0) {
memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
kunmap(pga[i]->pg);
}
- cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
+ cfs_crypto_hash_update_page(req, pga[i]->pg,
pga[i]->off & ~PAGE_MASK,
count);
LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
}
bufsize = sizeof(*cksum);
- cfs_crypto_hash_final(hdesc, (unsigned char *)cksum, &bufsize);
+ cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo */
for (i = 0; i < page_count; i++)
short_io_size += pga[i]->count;
- /* Check if we can do a short io. */
- if (!(short_io_size <= cli->cl_short_io_bytes && niocount == 1 &&
- imp_connect_shortio(cli->cl_import)))
+ /* Check if read/write is small enough to be a short io. */
+ if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
+ !imp_connect_shortio(cli->cl_import))
short_io_size = 0;
req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
cl_object_attr_update(env, obj, attr, valid);
cl_object_attr_unlock(obj);
}
- OBDO_FREE(aa->aa_oa);
+ OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
osc_inc_unstable_pages(req);
if (pga == NULL)
GOTO(out, rc = -ENOMEM);
- OBDO_ALLOC(oa);
+ OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
if (oa == NULL)
GOTO(out, rc = -ENOMEM);
LASSERT(req == NULL);
if (oa)
- OBDO_FREE(oa);
+ OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
if (pga)
OBD_FREE(pga, sizeof(*pga) * page_count);
/* this should happen rarely and is pretty bad, it makes the
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
- OBDO_ALLOC(oa);
+ OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
if (!oa) {
ptlrpc_req_finished(req);
RETURN(-ENOMEM);
struct obd_device *obd = class_exp2obd(exp);
int rc;
- rc = client_disconnect_export(exp);
- /**
- * Initially we put del_shrink_grant before disconnect_export, but it
- * causes the following problem if setup (connect) and cleanup
- * (disconnect) are tangled together.
- * connect p1 disconnect p2
- * ptlrpc_connect_import
- * ............... class_manual_cleanup
- * osc_disconnect
- * del_shrink_grant
- * ptlrpc_connect_interrupt
- * init_grant_shrink
- * add this client to shrink list
- * cleanup_osc
- * Bang! pinger trigger the shrink.
- * So the osc should be disconnected from the shrink list, after we
- * are sure the import has been destroyed. BUG18662
- */
- if (obd->u.cli.cl_import == NULL)
- osc_del_shrink_grant(&obd->u.cli);
- return rc;
+ rc = client_disconnect_export(exp);
+ /**
+ * Initially we put del_shrink_grant before disconnect_export, but it
+ * causes the following problem if setup (connect) and cleanup
+ * (disconnect) are tangled together.
+ * connect p1 disconnect p2
+ * ptlrpc_connect_import
+ * ............... class_manual_cleanup
+ * osc_disconnect
+ * del_shrink_grant
+ * ptlrpc_connect_interrupt
+ * osc_init_grant
+ * add this client to shrink list
+ * cleanup_osc
+ * Bang! grant shrink thread trigger the shrink. BUG18662
+ */
+ osc_del_grant_list(&obd->u.cli);
+ return rc;
}
EXPORT_SYMBOL(osc_disconnect);
GOTO(out_ptlrpcd_work, rc);
cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
+ osc_update_next_shrink(cli);
- INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
RETURN(rc);
out_ptlrpcd_work:
atomic_add(added, &osc_pool_req_count);
}
- INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
spin_lock(&osc_shrink_lock);
list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
spin_unlock(&osc_shrink_lock);
cli->cl_import->imp_idle_timeout = osc_idle_timeout;
+ cli->cl_import->imp_idle_debug = D_HA;
RETURN(0);
}
osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
ptlrpc_add_rqs_to_pool);
- if (osc_rq_pool != NULL)
- GOTO(out, rc);
- rc = -ENOMEM;
+ if (osc_rq_pool == NULL)
+ GOTO(out_type, rc = -ENOMEM);
+
+ rc = osc_start_grant_work();
+ if (rc != 0)
+ GOTO(out_req_pool, rc);
+
+ RETURN(rc);
+
+out_req_pool:
+ ptlrpc_free_rq_pool(osc_rq_pool);
out_type:
class_unregister_type(LUSTRE_OSC_NAME);
out_kmem:
lu_kmem_fini(osc_caches);
-out:
+
RETURN(rc);
}
static void __exit osc_exit(void)
{
+ osc_stop_grant_work();
remove_shrinker(osc_cache_shrinker);
class_unregister_type(LUSTRE_OSC_NAME);
lu_kmem_fini(osc_caches);