Whamcloud - gitweb
LU-9679 osc: centralize handling of PTLRPCD_SET
[fs/lustre-release.git] / lustre / osc / osc_request.c
index 42c628e..bd28d27 100644 (file)
@@ -236,10 +236,7 @@ int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
                sa->sa_upcall = upcall;
                sa->sa_cookie = cookie;
 
-               if (rqset == PTLRPCD_SET)
-                       ptlrpcd_add_req(req);
-               else
-                       ptlrpc_set_add_req(rqset, req);
+               ptlrpc_set_add_req(rqset, req);
        }
 
        RETURN(0);
@@ -324,10 +321,7 @@ int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
        la->la_upcall = upcall;
        la->la_cookie = cookie;
 
-       if (rqset == PTLRPCD_SET)
-               ptlrpcd_add_req(req);
-       else
-               ptlrpc_set_add_req(rqset, req);
+       ptlrpc_set_add_req(rqset, req);
 
        RETURN(0);
 }
@@ -499,10 +493,7 @@ int osc_sync_base(struct osc_object *obj, struct obdo *oa,
        fa->fa_upcall = upcall;
        fa->fa_cookie = cookie;
 
-       if (rqset == PTLRPCD_SET)
-               ptlrpcd_add_req(req);
-       else
-               ptlrpc_set_add_req(rqset, req);
+       ptlrpc_set_add_req(rqset, req);
 
        RETURN (0);
 }
@@ -577,7 +568,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
         struct client_obd     *cli = &exp->exp_obd->u.cli;
         struct ptlrpc_request *req;
         struct ost_body       *body;
-       struct list_head       cancels = LIST_HEAD_INIT(cancels);
+       LIST_HEAD(cancels);
         int rc, count;
         ENTRY;
 
@@ -613,17 +604,16 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
 
        req->rq_interpret_reply = osc_destroy_interpret;
        if (!osc_can_send_destroy(cli)) {
-               struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
                /*
                 * Wait until the number of on-going destroy RPCs drops
                 * under max_rpc_in_flight
                 */
-               rc = l_wait_event_exclusive(cli->cl_destroy_waitq,
-                                           osc_can_send_destroy(cli), &lwi);
+               rc = l_wait_event_abortable_exclusive(
+                       cli->cl_destroy_waitq,
+                       osc_can_send_destroy(cli));
                if (rc) {
                        ptlrpc_req_finished(req);
-                       RETURN(rc);
+                       RETURN(-EINTR);
                }
        }
 
@@ -645,21 +635,18 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
                oa->o_dirty = cli->cl_dirty_grant;
        else
                oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
-       if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
-                    cli->cl_dirty_max_pages)) {
-               CERROR("dirty %lu - %lu > dirty_max %lu\n",
-                      cli->cl_dirty_pages, cli->cl_dirty_transit,
+       if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
+               CERROR("dirty %lu > dirty_max %lu\n",
+                      cli->cl_dirty_pages,
                       cli->cl_dirty_max_pages);
                oa->o_undirty = 0;
-       } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
-                           atomic_long_read(&obd_dirty_transit_pages) >
+       } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
                            (long)(obd_max_dirty_pages + 1))) {
                /* The atomic_read() allowing the atomic_inc() are
                 * not covered by a lock thus they may safely race and trip
                 * this CERROR() unless we add in a small fudge factor (+1). */
-               CERROR("%s: dirty %ld - %ld > system dirty_max %ld\n",
+               CERROR("%s: dirty %ld > system dirty_max %ld\n",
                       cli_name(cli), atomic_long_read(&obd_dirty_pages),
-                      atomic_long_read(&obd_dirty_transit_pages),
                       obd_max_dirty_pages);
                oa->o_undirty = 0;
        } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
@@ -846,8 +833,10 @@ static int osc_should_shrink_grant(struct client_obd *client)
                return 0;
 
        if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
-           client->cl_import->imp_grant_shrink_disabled)
+           client->cl_import->imp_grant_shrink_disabled) {
+               osc_update_next_shrink(client);
                return 0;
+       }
 
        if (ktime_get_seconds() >= next_shrink - 5) {
                /* Get the current RPC size directly, instead of going via:
@@ -999,11 +988,11 @@ void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
        }
        spin_unlock(&cli->cl_loi_list_lock);
 
-       CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
-               "chunk bits: %d cl_max_extent_pages: %d\n",
-               cli_name(cli),
-               cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
-               cli->cl_max_extent_pages);
+       CDEBUG(D_CACHE,
+              "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
+              cli_name(cli),
+              cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
+              cli->cl_max_extent_pages);
 
        if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
                osc_add_grant_list(cli);
@@ -1448,13 +1437,13 @@ no_bulk:
                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
                         (pg->flag & OBD_BRW_SRVLOCK));
                if (short_io_size != 0 && opc == OST_WRITE) {
-                       unsigned char *ptr = ll_kmap_atomic(pg->pg, KM_USER0);
+                       unsigned char *ptr = kmap_atomic(pg->pg);
 
                        LASSERT(short_io_size >= requested_nob + pg->count);
                        memcpy(short_io_buf + requested_nob,
                               ptr + poff,
                               pg->count);
-                       ll_kunmap_atomic(ptr, KM_USER0);
+                       kunmap_atomic(ptr);
                } else if (short_io_size == 0) {
                        desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
                                                         pg->count);
@@ -1628,7 +1617,6 @@ static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
        if (rc)
                CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
        filp_close(filp, NULL);
-       return;
 }
 
 static int
@@ -1829,10 +1817,10 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
 
                        CDEBUG(D_CACHE, "page %p count %d\n",
                               aa->aa_ppga[i]->pg, count);
-                       ptr = ll_kmap_atomic(aa->aa_ppga[i]->pg, KM_USER0);
+                       ptr = kmap_atomic(aa->aa_ppga[i]->pg);
                        memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
                               count);
-                       ll_kunmap_atomic((void *) ptr, KM_USER0);
+                       kunmap_atomic((void *) ptr);
 
                        buf += count;
                        nob -= count;
@@ -1943,16 +1931,12 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
                 RETURN(rc);
 
        list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
-                if (oap->oap_request != NULL) {
-                        LASSERTF(request == oap->oap_request,
-                                 "request %p != oap_request %p\n",
-                                 request, oap->oap_request);
-                        if (oap->oap_interrupted) {
-                                ptlrpc_req_finished(new_req);
-                                RETURN(-EINTR);
-                        }
-                }
-        }
+               if (oap->oap_request != NULL) {
+                       LASSERTF(request == oap->oap_request,
+                                "request %p != oap_request %p\n",
+                                request, oap->oap_request);
+               }
+       }
        /*
         * New request takes over pga and oaps from old request.
         * Note that copying a list_head doesn't work, need to move it...
@@ -2199,13 +2183,12 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
        int                             mem_tight = 0;
        int                             page_count = 0;
        bool                            soft_sync = false;
-       bool                            interrupted = false;
        bool                            ndelay = false;
        int                             i;
        int                             grant = 0;
        int                             rc;
        __u32                           layout_version = 0;
-       struct list_head                rpc_list = LIST_HEAD_INIT(rpc_list);
+       LIST_HEAD(rpc_list);
        struct ost_body                 *body;
        ENTRY;
        LASSERT(!list_empty(ext_list));
@@ -2216,7 +2199,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
                mem_tight |= ext->oe_memalloc;
                grant += ext->oe_grants;
                page_count += ext->oe_nr_pages;
-               layout_version = MAX(layout_version, ext->oe_layout_version);
+               layout_version = max(layout_version, ext->oe_layout_version);
                if (obj == NULL)
                        obj = ext->oe_obj;
        }
@@ -2256,8 +2239,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
                        else
                                LASSERT(oap->oap_page_off + oap->oap_count ==
                                        PAGE_SIZE);
-                       if (oap->oap_interrupted)
-                               interrupted = true;
                }
                if (ext->oe_ndelay)
                        ndelay = true;
@@ -2296,8 +2277,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
        req->rq_interpret_reply = brw_interpret;
        req->rq_memalloc = mem_tight != 0;
        oap->oap_request = ptlrpc_request_addref(req);
-       if (interrupted && !req->rq_intr)
-               ptlrpc_mark_interrupted(req);
        if (ndelay) {
                req->rq_no_resend = req->rq_no_delay = 1;
                /* probably set a shorter timeout value.
@@ -2480,8 +2459,6 @@ int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
        RETURN(rc);
 }
 
-struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
-
 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
  * other synchronous requests, however keeping some locks and trying to obtain
@@ -2620,10 +2597,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
                        }
 
                        req->rq_interpret_reply = osc_enqueue_interpret;
-                       if (rqset == PTLRPCD_SET)
-                               ptlrpcd_add_req(req);
-                       else
-                               ptlrpc_set_add_req(rqset, req);
+                       ptlrpc_set_add_req(rqset, req);
                } else if (intent) {
                        ptlrpc_req_finished(req);
                }
@@ -3348,7 +3322,7 @@ int osc_cleanup_common(struct obd_device *obd)
 }
 EXPORT_SYMBOL(osc_cleanup_common);
 
-static struct obd_ops osc_obd_ops = {
+static const struct obd_ops osc_obd_ops = {
         .o_owner                = THIS_MODULE,
         .o_setup                = osc_setup,
         .o_precleanup           = osc_precleanup,
@@ -3371,7 +3345,7 @@ static struct obd_ops osc_obd_ops = {
 };
 
 static struct shrinker *osc_cache_shrinker;
-struct list_head osc_shrink_list = LIST_HEAD_INIT(osc_shrink_list);
+LIST_HEAD(osc_shrink_list);
 DEFINE_SPINLOCK(osc_shrink_lock);
 
 #ifndef HAVE_SHRINKER_COUNT
@@ -3381,10 +3355,6 @@ static int osc_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
                .nr_to_scan = shrink_param(sc, nr_to_scan),
                .gfp_mask   = shrink_param(sc, gfp_mask)
        };
-#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
-       struct shrinker *shrinker = NULL;
-#endif
-
        (void)osc_cache_shrink_scan(shrinker, &scv);
 
        return osc_cache_shrink_count(shrinker, &scv);