Whamcloud - gitweb
b=22176 Add .sync_fs super block handler
[fs/lustre-release.git] / lustre / osc / osc_request.c
index 22bf7d9..f4c5dd0 100644 (file)
@@ -864,6 +864,19 @@ static unsigned long rpcs_in_flight(struct client_obd *cli)
         return cli->cl_r_in_flight + cli->cl_w_in_flight;
 }
 
+int osc_wake_sync_fs(struct client_obd *cli)
+{
+        int rc = 0;
+        ENTRY;
+        if (cfs_list_empty(&cli->cl_loi_sync_fs_list) &&
+            cli->cl_sf_wait.started) {
+                cli->cl_sf_wait.sfw_upcall(cli->cl_sf_wait.sfw_oi, rc);
+                cli->cl_sf_wait.started = 0;
+                CDEBUG(D_CACHE, "sync_fs_loi list is empty\n");
+        }
+        RETURN(rc);
+}
+
 /* caller must hold loi_list_lock */
 void osc_wake_cache_waiters(struct client_obd *cli)
 {
@@ -943,7 +956,7 @@ static int osc_shrink_grant_interpret(const struct lu_env *env,
         LASSERT(body);
         osc_update_grant(cli, body);
 out:
-        OBD_FREE_PTR(oa);
+        OBDO_FREE(oa);
         return rc;
 }
 
@@ -1020,6 +1033,11 @@ static int osc_should_shrink_grant(struct client_obd *client)
 {
         cfs_time_t time = cfs_time_current();
         cfs_time_t next_shrink = client->cl_next_shrink_grant;
+
+        if ((client->cl_import->imp_connect_data.ocd_connect_flags &
+             OBD_CONNECT_GRANT_SHRINK) == 0)
+                return 0;
+
         if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
                     client->cl_avail_grant > GRANT_SHRINK_LIMIT)
@@ -1082,11 +1100,21 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
                 cli->cl_avail_grant = ocd->ocd_grant;
         else
                 cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
+
+        if (cli->cl_avail_grant < 0) {
+                CWARN("%s: available grant < 0, the OSS is probably not running"
+                      " with patch from bug20278 (%ld) \n",
+                      cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant);
+                /* workaround for 1.6 servers which do not have 
+                 * the patch from bug20278 */
+                cli->cl_avail_grant = ocd->ocd_grant;
+        }
+
         client_obd_list_unlock(&cli->cl_loi_list_lock);
 
-        CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
+        CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
+               cli->cl_import->imp_obd->obd_name,
                cli->cl_avail_grant, cli->cl_lost_grant);
-        LASSERT(cli->cl_avail_grant >= 0);
 
         if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
             cfs_list_empty(&cli->cl_grant_shrink_list))
@@ -1172,7 +1200,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
 {
         if (p1->flag != p2->flag) {
                 unsigned mask = ~(OBD_BRW_FROM_GRANT|
-                                  OBD_BRW_NOCACHE|OBD_BRW_SYNC);
+                                  OBD_BRW_NOCACHE|OBD_BRW_SYNC|OBD_BRW_ASYNC);
 
                 /* warn if we try to combine flags that we don't know to be
                  * safe to combine */
@@ -1422,6 +1450,10 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
                 return 0;
         }
 
+        /* If this is mmaped file - it can be changed at any time */
+        if (oa->o_valid & OBD_MD_FLFLAGS && oa->o_flags & OBD_FL_MMAP)
+                return 1;
+
         if (oa->o_valid & OBD_MD_FLFLAGS)
                 cksum_type = cksum_type_unpack(oa->o_flags);
         else
@@ -1495,14 +1527,14 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
         }
 #endif
 
+        osc_update_grant(cli, body);
+
         if (rc < 0)
                 RETURN(rc);
 
         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
 
-        osc_update_grant(cli, body);
-
         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
                 if (rc > 0) {
                         CERROR("Unexpected +ve rc %d\n", rc);
@@ -1681,7 +1713,7 @@ int osc_brw_redo_request(struct ptlrpc_request *request,
         ENTRY;
 
         if (!osc_should_resend(aa->aa_resends, aa->aa_cli)) {
-                CERROR("too many resend retries, returning error\n");
+                CERROR("too many resent retries, returning error\n");
                 RETURN(-EIO);
         }
 
@@ -1905,6 +1937,24 @@ static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
         osc_release_write_grant(cli, &oap->oap_brw_page, sent);
 }
 
+static int lop_makes_syncfs_rpc(struct loi_oap_pages *lop)
+{
+        struct osc_async_page *oap;
+        ENTRY;
+
+        if (cfs_list_empty(&lop->lop_urgent))
+                RETURN(0);
+
+        oap = cfs_list_entry(lop->lop_urgent.next,
+                             struct osc_async_page, oap_urgent_item);
+
+        if (oap->oap_async_flags & ASYNC_SYNCFS) {
+                CDEBUG(D_CACHE, "syncfs request forcing RPC\n");
+                RETURN(1);
+        }
+
+        RETURN(0);
+}
 
 /* This maintains the lists of pending pages to read/write for a given object
  * (lop).  This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint()
@@ -1938,7 +1988,7 @@ static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
         if (cmd & OBD_BRW_WRITE) {
                 /* trigger a write rpc stream as long as there are dirtiers
                  * waiting for space.  as they're waiting, they're not going to
-                 * create more pages to coallesce with what's waiting.. */
+                 * create more pages to coalesce with what's waiting.. */
                 if (!cfs_list_empty(&cli->cl_cache_waiters)) {
                         CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
                         RETURN(1);
@@ -1993,10 +2043,19 @@ void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
                 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0);
                 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
         } else {
-                on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
-                on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
-                        lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)||
-                        lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
+                if (lop_makes_syncfs_rpc(&loi->loi_write_lop)) {
+                        on_list(&loi->loi_sync_fs_item,
+                                &cli->cl_loi_sync_fs_list,
+                                loi->loi_write_lop.lop_num_pending);
+                } else {
+                        on_list(&loi->loi_hp_ready_item,
+                                &cli->cl_loi_hp_ready_list, 0);
+                        on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
+                                lop_makes_rpc(cli, &loi->loi_write_lop,
+                                              OBD_BRW_WRITE)||
+                                lop_makes_rpc(cli, &loi->loi_read_lop,
+                                              OBD_BRW_READ));
+                }
         }
 
         on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
@@ -2162,9 +2221,20 @@ static int brw_interpret(const struct lu_env *env,
         rc = osc_brw_fini_request(req, rc);
         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
         if (osc_recoverable_error(rc)) {
-                rc = osc_brw_redo_request(req, aa);
-                if (rc == 0)
-                        RETURN(0);
+                /* Only retry once for mmaped files since the mmaped page
+                 * might be modified at anytime. We have to retry at least
+                 * once in case there WAS really a corruption of the page
+                 * on the network, that was not caused by mmap() modifying
+                 * the page. Bug11742 */
+                if ((rc == -EAGAIN) && (aa->aa_resends > 0) &&
+                    aa->aa_oa->o_valid & OBD_MD_FLFLAGS &&
+                    aa->aa_oa->o_flags & OBD_FL_MMAP) {
+                        rc = 0;
+                } else {
+                        rc = osc_brw_redo_request(req, aa);
+                        if (rc == 0)
+                                RETURN(0);
+                }
         }
 
         if (aa->aa_ocapa) {
@@ -2196,19 +2266,18 @@ static int brw_interpret(const struct lu_env *env,
                 }
                 OBDO_FREE(aa->aa_oa);
         } else { /* from async_internal() */
-                int i;
+                obd_count i;
                 for (i = 0; i < aa->aa_page_count; i++)
                         osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
-
-                if (aa->aa_oa->o_flags & OBD_FL_TEMPORARY)
-                        OBDO_FREE(aa->aa_oa);
         }
         osc_wake_cache_waiters(cli);
+        osc_wake_sync_fs(cli);
         osc_check_rpcs(env, cli);
         client_obd_list_unlock(&cli->cl_loi_list_lock);
         if (!async)
                 cl_req_completion(env, aa->aa_clerq, rc);
         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
+
         RETURN(rc);
 }
 
@@ -2230,11 +2299,14 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
         enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
         struct ldlm_lock *lock = NULL;
         struct cl_req_attr crattr;
-        int i, rc;
+        int i, rc, mpflag = 0;
 
         ENTRY;
         LASSERT(!cfs_list_empty(rpc_list));
 
+        if (cmd & OBD_BRW_MEMALLOC)
+                mpflag = cfs_memory_pressure_get_and_set();
+
         memset(&crattr, 0, sizeof crattr);
         OBD_ALLOC(pga, sizeof(*pga) * page_count);
         if (pga == NULL)
@@ -2290,6 +2362,9 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
                 GOTO(out, req = ERR_PTR(rc));
         }
 
+        if (cmd & OBD_BRW_MEMALLOC)
+                req->rq_memalloc = 1;
+
         /* Need to update the timestamps after the request is built in case
          * we race with setattr (locally or in queue at OST).  If OST gets
          * later setattr before earlier BRW (as determined by the request xid),
@@ -2306,6 +2381,9 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
         CFS_INIT_LIST_HEAD(rpc_list);
         aa->aa_clerq = clerq;
 out:
+        if (cmd & OBD_BRW_MEMALLOC)
+                cfs_memory_pressure_restore(mpflag);
+
         capa_put(crattr.cra_capa);
         if (IS_ERR(req)) {
                 if (oa)
@@ -2340,8 +2418,9 @@ out:
  * \param cmd OBD_BRW_* macroses
  * \param lop pending pages
  *
- * \return zero if pages successfully add to send queue.
- * \return not zere if error occurring.
+ * \return zero if no page added to send queue.
+ * \return 1 if pages successfully added to send queue.
+ * \return negative on errors.
  */
 static int
 osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
@@ -2357,7 +2436,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
         CFS_LIST_HEAD(tmp_list);
         unsigned int ending_offset;
         unsigned  starting_offset = 0;
-        int srvlock = 0;
+        int srvlock = 0, mem_tight = 0;
         struct cl_object *clob = NULL;
         ENTRY;
 
@@ -2365,7 +2444,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
          * to be canceled, the pages covered by the lock will be sent out
          * with ASYNC_HP. We have to send out them as soon as possible. */
         cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
-                if (oap->oap_async_flags & ASYNC_HP) 
+                if (oap->oap_async_flags & ASYNC_HP)
                         cfs_list_move(&oap->oap_pending_item, &tmp_list);
                 else
                         cfs_list_move_tail(&oap->oap_pending_item, &tmp_list);
@@ -2409,7 +2488,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
                  * until completion unlocks it.  commit_write submits a page
                  * as not ready because its unlock will happen unconditionally
                  * as the call returns.  if we race with commit_write giving
-                 * us that page we dont' want to create a hole in the page
+                 * us that page we don't want to create a hole in the page
                  * stream, so we stop and leave the rpc to be fired by
                  * another dirtier or kupdated interval (the not ready page
                  * will still be on the dirty list).  we could call in
@@ -2501,6 +2580,8 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
 
                 /* now put the page back in our accounting */
                 cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
+                if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
+                        mem_tight = 1;
                 if (page_count == 0)
                         srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
                 if (++page_count >= cli->cl_max_pages_per_rpc)
@@ -2523,7 +2604,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
         }
 
         osc_wake_cache_waiters(cli);
-
+        osc_wake_sync_fs(cli);
         loi_list_maint(cli, loi);
 
         client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -2536,7 +2617,8 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
                 RETURN(0);
         }
 
-        req = osc_build_req(env, cli, &rpc_list, page_count, cmd);
+        req = osc_build_req(env, cli, &rpc_list, page_count,
+                            mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
         if (IS_ERR(req)) {
                 LASSERT(cfs_list_empty(&rpc_list));
                 loi_list_maint(cli, loi);
@@ -2615,6 +2697,9 @@ struct lov_oinfo *osc_next_loi(struct client_obd *cli)
         if (!cfs_list_empty(&cli->cl_loi_ready_list))
                 RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
                                       struct lov_oinfo, loi_ready_item));
+        if (!cfs_list_empty(&cli->cl_loi_sync_fs_list))
+                RETURN(cfs_list_entry(cli->cl_loi_sync_fs_list.next,
+                                      struct lov_oinfo, loi_sync_fs_item));
 
         /* then if we have cache waiters, return all objects with queued
          * writes.  This is especially important when many small files
@@ -2720,7 +2805,7 @@ void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
                                 race_counter++;
                 }
 
-                /* attempt some inter-object balancing by issueing rpcs
+                /* attempt some inter-object balancing by issuing rpcs
                  * for each object in turn */
                 if (!cfs_list_empty(&loi->loi_hp_ready_item))
                         cfs_list_del_init(&loi->loi_hp_ready_item);
@@ -2730,6 +2815,8 @@ void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
                         cfs_list_del_init(&loi->loi_write_item);
                 if (!cfs_list_empty(&loi->loi_read_item))
                         cfs_list_del_init(&loi->loi_read_item);
+                if (!cfs_list_empty(&loi->loi_sync_fs_item))
+                        cfs_list_del_init(&loi->loi_sync_fs_item);
 
                 loi_list_maint(cli, loi);
 
@@ -2946,7 +3033,7 @@ int osc_queue_async_io(const struct lu_env *env,
         oap->oap_count = count;
         oap->oap_brw_flags = brw_flags;
         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
-        if (libcfs_memory_pressure_get())
+        if (cfs_memory_pressure_get())
                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
         cfs_spin_lock(&oap->oap_lock);
         oap->oap_async_flags = async_flags;
@@ -2994,6 +3081,21 @@ int osc_set_async_flags_base(struct client_obd *cli,
         if ((oap->oap_async_flags & async_flags) == async_flags)
                 RETURN(0);
 
+        /* XXX: This introduces a tiny insignificant race for the case if this
+         * loi already had other urgent items.
+         */
+        if (SETTING(oap->oap_async_flags, async_flags, ASYNC_SYNCFS) &&
+            cfs_list_empty(&oap->oap_rpc_item) &&
+            cfs_list_empty(&oap->oap_urgent_item)) {
+                cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
+                flags |= ASYNC_SYNCFS;
+                cfs_spin_lock(&oap->oap_lock);
+                oap->oap_async_flags |= flags;
+                cfs_spin_unlock(&oap->oap_lock);
+                loi_list_maint(cli, loi);
+                RETURN(0);
+        }
+
         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
                 flags |= ASYNC_READY;
 
@@ -3050,7 +3152,8 @@ int osc_teardown_async_page(struct obd_export *exp,
         if (!cfs_list_empty(&oap->oap_urgent_item)) {
                 cfs_list_del_init(&oap->oap_urgent_item);
                 cfs_spin_lock(&oap->oap_lock);
-                oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
+                oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP |
+                                          ASYNC_SYNCFS);
                 cfs_spin_unlock(&oap->oap_lock);
         }
         if (!cfs_list_empty(&oap->oap_pending_item)) {
@@ -3183,6 +3286,9 @@ static int osc_enqueue_interpret(const struct lu_env *env,
          * osc_enqueue_fini(). */
         ldlm_lock_addref(&handle, mode);
 
+        /* Let CP AST to grant the lock first. */
+        OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
+
         /* Complete obtaining the lock procedure. */
         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
                                    mode, aa->oa_flags, aa->oa_lvb,
@@ -3341,8 +3447,10 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
                         RETURN(-ENOMEM);
 
                 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
-                if (rc)
+                if (rc) {
+                        ptlrpc_request_free(req);
                         RETURN(rc);
+                }
 
                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
                                      sizeof *lvb);
@@ -4042,7 +4150,7 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
 
                 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
                 aa = ptlrpc_req_async_args(req);
-                OBD_ALLOC_PTR(oa);
+                OBDO_ALLOC(oa);
                 if (!oa) {
                         ptlrpc_req_finished(req);
                         RETURN(-ENOMEM);
@@ -4316,6 +4424,32 @@ static int osc_import_event(struct obd_device *obd,
         RETURN(rc);
 }
 
+/**
+ * Determine whether the lock can be canceled before replaying the lock
+ * during recovery, see bug16774 for detailed information.
+ *
+ * \retval zero the lock can't be canceled
+ * \retval other ok to cancel
+ */
+static int osc_cancel_for_recovery(struct ldlm_lock *lock)
+{
+        check_res_locked(lock->l_resource);
+
+        /*
+         * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
+         *
+         * XXX as a future improvement, we can also cancel unused write lock
+         * if it doesn't have dirty data and active mmaps.
+         */
+        if (lock->l_resource->lr_type == LDLM_EXTENT &&
+            (lock->l_granted_mode == LCK_PR ||
+             lock->l_granted_mode == LCK_CR) &&
+            (osc_dlm_lock_pageref(lock) == 0))
+                RETURN(1);
+
+        RETURN(0);
+}
+
 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 {
         int rc;
@@ -4354,6 +4488,8 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 
                 CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
                 cfs_sema_init(&cli->cl_grant_sem, 1);
+
+                ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
         }
 
         RETURN(rc);
@@ -4439,6 +4575,45 @@ int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
         return(rc);
 }
 
+static int osc_sync_fs(struct obd_device *obd, struct obd_info *oinfo,
+                       int wait)
+{
+        struct client_obd *cli;
+        struct lov_oinfo *loi;
+        struct lov_oinfo *tloi;
+        struct osc_async_page *oap;
+        struct osc_async_page *toap;
+        struct loi_oap_pages *lop;
+        struct lu_env *env;
+        int refcheck;
+        int rc = 0;
+        ENTRY;
+
+        env = cl_env_get(&refcheck);
+        if (IS_ERR(env))
+                RETURN(PTR_ERR(env));
+
+        cli = &obd->u.cli;
+        client_obd_list_lock(&cli->cl_loi_list_lock);
+        cli->cl_sf_wait.sfw_oi = oinfo;
+        cli->cl_sf_wait.sfw_upcall = oinfo->oi_cb_up;
+        cli->cl_sf_wait.started = 1;
+        /* creating cl_loi_sync_fs list */
+        cfs_list_for_each_entry_safe(loi, tloi, &cli->cl_loi_write_list,
+                                     loi_write_item) {
+                lop = &loi->loi_write_lop;
+                cfs_list_for_each_entry_safe(oap, toap, &lop->lop_pending,
+                                             oap_pending_item)
+                        osc_set_async_flags_base(cli, loi, oap, ASYNC_SYNCFS);
+        }
+
+        osc_check_rpcs(env, cli);
+        osc_wake_sync_fs(cli);
+        client_obd_list_unlock(&cli->cl_loi_list_lock);
+        cl_env_put(env, &refcheck);
+        RETURN(rc);
+}
+
 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
 {
         return osc_process_config_base(obd, buf);
@@ -4481,6 +4656,7 @@ struct obd_ops osc_obd_ops = {
         .o_llog_init            = osc_llog_init,
         .o_llog_finish          = osc_llog_finish,
         .o_process_config       = osc_process_config,
+        .o_sync_fs              = osc_sync_fs,
 };
 
 extern struct lu_kmem_descr osc_caches[];