#define DEBUG_SUBSYSTEM S_FILTER
-#ifndef AUTOCONF_INCLUDED
-#include <linux/config.h>
-#endif
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/dcache.h>
* at the OST layer there are only (potentially) multiple obd_device of type
* unknown at the time of OST thread creation.
*
- * Instead array of iobuf's is attached to struct filter_obd (->fo_iobuf_pool
- * field). This array has size OST_MAX_THREADS, so that each OST thread uses
- * it's very own iobuf.
+ * We create a cfs_hash for struct filter_obd (->fo_iobuf_hash field) on
+ * initializing, each OST thread will create it's own iobuf on the first
+ * access and insert it into ->fo_iobuf_hash with thread ID as key,
+ * so the iobuf can be found again by thread ID.
*
* Functions below
*
- * filter_kiobuf_pool_init()
+ * filter_iobuf_pool_init()
*
- * filter_kiobuf_pool_done()
+ * filter_iobuf_pool_done()
*
* filter_iobuf_get()
*
*/
static void filter_iobuf_pool_done(struct filter_obd *filter)
{
- struct filter_iobuf **pool;
- int i;
-
- ENTRY;
+ ENTRY;
- pool = filter->fo_iobuf_pool;
- if (pool != NULL) {
- for (i = 0; i < filter->fo_iobuf_count; ++ i) {
- if (pool[i] != NULL)
- filter_free_iobuf(pool[i]);
- }
- OBD_FREE(pool, filter->fo_iobuf_count * sizeof pool[0]);
- filter->fo_iobuf_pool = NULL;
- }
- EXIT;
+ if (filter->fo_iobuf_hash != NULL) {
+ cfs_hash_putref(filter->fo_iobuf_hash);
+ filter->fo_iobuf_hash = NULL;
+ }
+ EXIT;
}
static int filter_adapt_sptlrpc_conf(struct obd_device *obd, int initial)
return 0;
}
-/*
- * pre-allocate pool of iobuf's to be used by filter_{prep,commit}rw_write().
- */
-static int filter_iobuf_pool_init(struct filter_obd *filter)
+static unsigned
+filter_iobuf_hop_hash(cfs_hash_t *hs, const void *key, unsigned mask)
{
- void **pool;
+ __u64 val = *((__u64 *)key);
- ENTRY;
+ return cfs_hash_long(val, hs->hs_cur_bits);
+}
+static void *
+filter_iobuf_hop_key(cfs_hlist_node_t *hnode)
+{
+ struct filter_iobuf *pool;
- OBD_ALLOC_GFP(filter->fo_iobuf_pool, OSS_THREADS_MAX * sizeof(*pool),
- GFP_KERNEL);
- if (filter->fo_iobuf_pool == NULL)
- RETURN(-ENOMEM);
+ pool = cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
+ return &pool->dr_hkey;
+}
- filter->fo_iobuf_count = OSS_THREADS_MAX;
+static int
+filter_iobuf_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+{
+ struct filter_iobuf *pool;
- RETURN(0);
+ pool = cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
+ return pool->dr_hkey == *((__u64 *)key);
}
-/* Return iobuf allocated for @thread_id. We don't know in advance how
- * many threads there will be so we allocate a large empty array and only
- * fill in those slots that are actually in use.
- * If we haven't allocated a pool entry for this thread before, do so now. */
-void *filter_iobuf_get(struct filter_obd *filter, struct obd_trans_info *oti)
+static void *
+filter_iobuf_hop_object(cfs_hlist_node_t *hnode)
{
- int thread_id = (oti && oti->oti_thread) ?
- oti->oti_thread->t_id : -1;
- struct filter_iobuf *pool = NULL;
- struct filter_iobuf **pool_place = NULL;
+ return cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
+}
- if (thread_id >= 0) {
- LASSERT(thread_id < filter->fo_iobuf_count);
- pool = *(pool_place = &filter->fo_iobuf_pool[thread_id]);
- }
+static void
+filter_iobuf_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+ /* dummy, required by cfs_hash */
+}
- if (unlikely(pool == NULL)) {
- pool = filter_alloc_iobuf(filter, OBD_BRW_WRITE,
- PTLRPC_MAX_BRW_PAGES);
- if (pool_place != NULL)
- *pool_place = pool;
- }
+static void
+filter_iobuf_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+ /* dummy, required by cfs_hash */
+}
+
+static void
+filter_iobuf_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+ struct filter_iobuf *pool;
+
+ pool = cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
+ filter_free_iobuf(pool);
+}
+
+static struct cfs_hash_ops filter_iobuf_hops = {
+ .hs_hash = filter_iobuf_hop_hash,
+ .hs_key = filter_iobuf_hop_key,
+ .hs_keycmp = filter_iobuf_hop_keycmp,
+ .hs_object = filter_iobuf_hop_object,
+ .hs_get = filter_iobuf_hop_get,
+ .hs_put_locked = filter_iobuf_hop_put_locked,
+ .hs_exit = filter_iobuf_hop_exit
+};
+
+#define FILTER_IOBUF_HASH_BITS 9
+#define FILTER_IOBUF_HBKT_BITS 4
+
+/*
+ * pre-allocate pool of iobuf's to be used by filter_{prep,commit}rw_write().
+ */
+static int filter_iobuf_pool_init(struct filter_obd *filter)
+{
+ filter->fo_iobuf_hash = cfs_hash_create("filter_iobuf",
+ FILTER_IOBUF_HASH_BITS,
+ FILTER_IOBUF_HASH_BITS,
+ FILTER_IOBUF_HBKT_BITS, 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &filter_iobuf_hops,
+ CFS_HASH_RW_BKTLOCK |
+ CFS_HASH_NO_ITEMREF);
+
+ return filter->fo_iobuf_hash != NULL ? 0 : -ENOMEM;
+}
- return pool;
+/* Return iobuf allocated for @thread_id.
+ * If we haven't allocated a pool entry for this thread before, do so now and
+ * insert it into fo_iobuf_hash, otherwise we can find it from fo_iobuf_hash */
+void *filter_iobuf_get(struct filter_obd *filter, struct obd_trans_info *oti)
+{
+ struct filter_iobuf *pool = NULL;
+ __u64 key = 0;
+ int thread_id;
+ int rc;
+
+ thread_id = (oti && oti->oti_thread) ? oti->oti_thread->t_id : -1;
+ if (thread_id >= 0) {
+ struct ptlrpc_service_part *svcpt;
+
+ svcpt = oti->oti_thread->t_svcpt;
+ LASSERT(svcpt != NULL);
+
+ key = (__u64)(svcpt->scp_cpt) << 32 | thread_id;
+ pool = cfs_hash_lookup(filter->fo_iobuf_hash, &key);
+ if (pool != NULL)
+ return pool;
+ }
+
+ pool = filter_alloc_iobuf(filter, OBD_BRW_WRITE, PTLRPC_MAX_BRW_PAGES);
+ if (pool == NULL)
+ return NULL;
+
+ if (thread_id >= 0) {
+ pool->dr_hkey = key;
+ rc = cfs_hash_add_unique(filter->fo_iobuf_hash,
+ &key, &pool->dr_hlist);
+ /* ptlrpc service thould guarantee thread ID is unique */
+ LASSERT(rc != -EALREADY);
+ }
+
+ return pool;
}
/* mount the file system (secretly). lustre_cfg parameters are:
* We actually do sync in disconnect time, but disconnect
* may not come being marked rq_no_resend = 1.
*/
- llog_sync(ctxt, NULL);
+ llog_sync(ctxt, NULL, OBD_LLOG_FL_EXIT);
/*
* Balance class_import_get() in llog_receptor_accept().
class_import_put(ctxt->loc_imp);
ctxt->loc_imp = NULL;
}
+
+ if (filter->fo_lcm) {
+ llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
+ filter->fo_lcm = NULL;
+ }
+
cfs_mutex_unlock(&ctxt->loc_mutex);
llog_ctxt_put(ctxt);
}
- if (filter->fo_lcm) {
- cfs_mutex_lock(&ctxt->loc_mutex);
- llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
- filter->fo_lcm = NULL;
- cfs_mutex_unlock(&ctxt->loc_mutex);
- }
RETURN(filter_olg_fini(&obd->obd_olg));
}
ctxt = llog_group_get_ctxt(olg_min,
LLOG_MDS_OST_REPL_CTXT);
if (ctxt) {
- err = llog_sync(ctxt, olg_min->olg_exp);
+ err = llog_sync(ctxt, olg_min->olg_exp, 0);
llog_ctxt_put(ctxt);
if (err) {
CERROR("error flushing logs to MDS: "