Whamcloud - gitweb
b=21686 fail the request if its obd_device stopping
[fs/lustre-release.git] / lustre / osc / cache.c
index 371b78e..ae1a547 100644 (file)
@@ -86,6 +86,7 @@ int cache_add_extent(struct lustre_cache *cache, struct ldlm_res_id *res,
         struct lustre_handle tmplockh;
         ldlm_policy_data_t tmpex;
         struct ldlm_lock *lock = NULL;
+        int mode = 0;
         ENTRY;
 
         /* Don't add anything second time */
@@ -99,16 +100,18 @@ int cache_add_extent(struct lustre_cache *cache, struct ldlm_res_id *res,
                 if (!lock)
                         RETURN(-ENOLCK);
 
-                LASSERTF(lock->l_policy_data.l_extent.start <=
-                         extent->oap_obj_off &&
-                         extent->oap_obj_off + CFS_PAGE_SIZE - 1 <=
-                         lock->l_policy_data.l_extent.end,
-                         "Got wrong lock [" LPU64 "," LPU64 "] for page with "
-                         "offset " LPU64 "\n",
-                         lock->l_policy_data.l_extent.start,
-                         lock->l_policy_data.l_extent.end, extent->oap_obj_off);
+                if(lock->l_policy_data.l_extent.start > extent->oap_obj_off ||
+                   extent->oap_obj_off + CFS_PAGE_SIZE - 1 >
+                   lock->l_policy_data.l_extent.end) {
+                         CDEBUG(D_CACHE, "Got wrong lock [" LPU64 "," LPU64 "] "
+                                         "for page with offset " LPU64 "\n",
+                                         lock->l_policy_data.l_extent.start,
+                                         lock->l_policy_data.l_extent.end,
+                                         extent->oap_obj_off);
+                         LDLM_LOCK_PUT(lock);
+                         RETURN(-ENOLCK);
+                }
         } else {
-                int mode;
                 /* Real extent width calculation here once we have real
                  * extents
                  */
@@ -138,13 +141,26 @@ int cache_add_extent(struct lustre_cache *cache, struct ldlm_res_id *res,
                                "under us\n");
                         RETURN(-ENOLCK);
                 }
-                ldlm_lock_decref(&tmplockh, mode);
+               
+                /* XXX Note! if the caller passed a unused lock handle,
+                 * it expects us to return the lockh of the lock we matched,
+                 * reference(LCK_PR) of the lock is increased here to assure
+                 * its validity, and the caller should decrease the reference
+                 * when it isn't used any more. */ 
+                if (lockh && !lustre_handle_is_used(lockh)) {
+                        ldlm_lock_addref(&tmplockh, LCK_PR);
+                        lustre_handle_copy(lockh, &tmplockh);
+                }
         }
 
         spin_lock(&lock->l_extents_list_lock);
         list_add_tail(&extent->oap_page_list, &lock->l_extents_list);
         spin_unlock(&lock->l_extents_list_lock);
         extent->oap_ldlm_lock = lock;
+        LASSERTF(!(lock->l_flags & LDLM_FL_CANCEL), "Adding a page to already "
+                 "cancelled lock %p", lock);
+        if (mode)
+                ldlm_lock_decref(&tmplockh, mode);
         LDLM_LOCK_PUT(lock);
 
         RETURN(0);
@@ -227,6 +243,7 @@ int cache_del_extent_removal_cb(struct lustre_cache *cache,
 {
         int found = 0;
         struct page_removal_cb_element *element, *t;
+        ENTRY;
 
         write_lock(&cache->lc_page_removal_cb_lock);
         list_for_each_entry_safe(element, t,
@@ -347,6 +364,7 @@ static int cache_remove_extents_from_lock(struct lustre_cache *cache,
                            page with address 0x5a5a5a5a in
                            cache_extent_removal_event */
                         ext_data = extent->oap_page;
+                        LASSERT(cache->lc_pin_extent_cb != NULL);
                         cache->lc_pin_extent_cb(extent->oap_page);
 
                         if (lock->l_flags & LDLM_FL_BL_AST)
@@ -402,46 +420,48 @@ struct lustre_cache *cache_create(struct obd_device *obd)
         OBD_ALLOC(cache, sizeof(*cache));
         if (!cache)
                 GOTO(out, NULL);
+
         spin_lock_init(&cache->lc_locks_list_lock);
         CFS_INIT_LIST_HEAD(&cache->lc_locks_list);
         CFS_INIT_LIST_HEAD(&cache->lc_page_removal_callback_list);
         rwlock_init(&cache->lc_page_removal_cb_lock);
         cache->lc_obd = obd;
 
-      out:
+out:
         return cache;
 }
 
 /* Destroy @cache and free its memory */
 int cache_destroy(struct lustre_cache *cache)
 {
-        if (cache) {
-                spin_lock(&cache->lc_locks_list_lock);
-                if (!list_empty(&cache->lc_locks_list)) {
-                        struct ldlm_lock *lock, *tmp;
-                        CERROR("still have locks in the list on cleanup:\n");
-
-                        list_for_each_entry_safe(lock, tmp,
-                                                 &cache->lc_locks_list,
-                                                 l_cache_locks_list) {
-                                list_del_init(&lock->l_cache_locks_list);
-                                /* XXX: Of course natural idea would be to print
-                                   offending locks here, but if we use
-                                   e.g. LDLM_ERROR, we will likely crash here,
-                                   as LDLM error tries to access e.g.
-                                   nonexisting namespace. Normally this kind of
-                                   case could only happen when somebody did not
-                                   release lock reference and we have other ways
-                                   to detect this. */
-                                /* Make sure there are no pages left under the
-                                   lock */
-                                LASSERT(list_empty(&lock->l_extents_list));
-                        }
+        if (!cache)
+                RETURN(0);
+
+        spin_lock(&cache->lc_locks_list_lock);
+        if (!list_empty(&cache->lc_locks_list)) {
+                struct ldlm_lock *lock, *tmp;
+                CERROR("still have locks in the list on cleanup:\n");
+
+                list_for_each_entry_safe(lock, tmp,
+                                         &cache->lc_locks_list,
+                                         l_cache_locks_list) {
+                        list_del_init(&lock->l_cache_locks_list);
+                        /* XXX: Of course natural idea would be to print
+                         * offending locks here, but if we use
+                         * e.g. LDLM_ERROR, we will likely crash here,
+                         * as LDLM error tries to access e.g.
+                         * nonexisting namespace. Normally this kind of
+                         * case could only happen when somebody did not
+                         * release lock reference and we have other ways
+                         * to detect this. */
+                        /* Make sure there are no pages left under the
+                         * lock */
+                        LASSERT(list_empty(&lock->l_extents_list));
                 }
-                spin_unlock(&cache->lc_locks_list_lock);
-                LASSERT(list_empty(&cache->lc_page_removal_callback_list));
-                OBD_FREE(cache, sizeof(*cache));
         }
+        spin_unlock(&cache->lc_locks_list_lock);
+        LASSERT(list_empty(&cache->lc_page_removal_callback_list));
 
+        OBD_FREE(cache, sizeof(*cache));
         return 0;
 }