+/*****************************************************************************
+ * Routins to use lustre_hash functionality to bind the current thread
+ * to cl_env
+ */
+
+/** lustre hash to manage the cl_env for current thread */
+static lustre_hash_t *cl_env_hash;
+static void cl_env_init0(struct cl_env *cle, void *debug);
+
+static unsigned cl_env_hops_hash(lustre_hash_t *lh, void *key, unsigned mask)
+{
+#if BITS_PER_LONG == 64
+ return lh_u64_hash((__u64)key, mask);
+#else
+ return lh_u32_hash((__u32)key, mask);
+#endif
+}
+
+static void *cl_env_hops_obj(struct hlist_node *hn)
+{
+ struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
+ LASSERT(cle->ce_magic == &cl_env_init0);
+ return (void *)cle;
+}
+
+static int cl_env_hops_compare(void *key, struct hlist_node *hn)
+{
+ struct cl_env *cle = cl_env_hops_obj(hn);
+
+ LASSERT(cle->ce_owner != NULL);
+ return (key == cle->ce_owner);
+}
+
+static lustre_hash_ops_t cl_env_hops = {
+ .lh_hash = cl_env_hops_hash,
+ .lh_compare = cl_env_hops_compare,
+ .lh_key = cl_env_hops_obj,
+ .lh_get = cl_env_hops_obj,
+ .lh_put = cl_env_hops_obj,
+};
+
+static inline struct cl_env *cl_env_fetch(void)
+{
+ struct cl_env *cle;
+ cle = lustre_hash_lookup(cl_env_hash, cfs_current());
+ LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
+ return cle;
+}
+
+static inline void cl_env_attach(struct cl_env *cle)
+{
+ if (cle) {
+ int rc;
+ LASSERT(cle->ce_owner == NULL);
+ cle->ce_owner = cfs_current();
+ rc = lustre_hash_add_unique(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ LASSERT(rc == 0);
+ }
+}
+
+static inline struct cl_env *cl_env_detach(struct cl_env *cle)
+{
+ if (cle == NULL)
+ cle = cl_env_fetch();
+ if (cle && cle->ce_owner) {
+ void *cookie;
+ LASSERT(cle->ce_owner == cfs_current());
+ cookie = lustre_hash_del(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ cle->ce_owner = NULL;
+ LASSERT(cookie == cle);
+ }
+ return cle;
+}
+/* ----------------------- hash routines end ---------------------------- */
+