sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
}
RETURN(0);
la->la_upcall = upcall;
la->la_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
RETURN(0);
}
fa->fa_upcall = upcall;
fa->fa_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
RETURN (0);
}
struct client_obd *cli = &exp->exp_obd->u.cli;
struct ptlrpc_request *req;
struct ost_body *body;
- struct list_head cancels = LIST_HEAD_INIT(cancels);
+ LIST_HEAD(cancels);
int rc, count;
ENTRY;
req->rq_interpret_reply = osc_destroy_interpret;
if (!osc_can_send_destroy(cli)) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
/*
* Wait until the number of on-going destroy RPCs drops
* under max_rpc_in_flight
*/
- rc = l_wait_event_exclusive(cli->cl_destroy_waitq,
- osc_can_send_destroy(cli), &lwi);
+ rc = l_wait_event_abortable_exclusive(
+ cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli));
if (rc) {
ptlrpc_req_finished(req);
- RETURN(rc);
+ RETURN(-EINTR);
}
}
oa->o_dirty = cli->cl_dirty_grant;
else
oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
- if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
- cli->cl_dirty_max_pages)) {
- CERROR("dirty %lu - %lu > dirty_max %lu\n",
- cli->cl_dirty_pages, cli->cl_dirty_transit,
+ if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
+ CERROR("dirty %lu > dirty_max %lu\n",
+ cli->cl_dirty_pages,
cli->cl_dirty_max_pages);
oa->o_undirty = 0;
- } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
- atomic_long_read(&obd_dirty_transit_pages) >
+ } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
(long)(obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1). */
- CERROR("%s: dirty %ld - %ld > system dirty_max %ld\n",
+ CERROR("%s: dirty %ld > system dirty_max %ld\n",
cli_name(cli), atomic_long_read(&obd_dirty_pages),
- atomic_long_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
oa->o_undirty = 0;
} else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
return 0;
if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
- client->cl_import->imp_grant_shrink_disabled)
+ client->cl_import->imp_grant_shrink_disabled) {
+ osc_update_next_shrink(client);
return 0;
+ }
if (ktime_get_seconds() >= next_shrink - 5) {
/* Get the current RPC size directly, instead of going via:
}
spin_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
- "chunk bits: %d cl_max_extent_pages: %d\n",
- cli_name(cli),
- cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
- cli->cl_max_extent_pages);
+ CDEBUG(D_CACHE,
+ "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
+ cli_name(cli),
+ cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
+ cli->cl_max_extent_pages);
if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
osc_add_grant_list(cli);
LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
(pg->flag & OBD_BRW_SRVLOCK));
if (short_io_size != 0 && opc == OST_WRITE) {
- unsigned char *ptr = ll_kmap_atomic(pg->pg, KM_USER0);
+ unsigned char *ptr = kmap_atomic(pg->pg);
LASSERT(short_io_size >= requested_nob + pg->count);
memcpy(short_io_buf + requested_nob,
ptr + poff,
pg->count);
- ll_kunmap_atomic(ptr, KM_USER0);
+ kunmap_atomic(ptr);
} else if (short_io_size == 0) {
desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
pg->count);
if (rc)
CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
filp_close(filp, NULL);
- return;
}
static int
CDEBUG(D_CACHE, "page %p count %d\n",
aa->aa_ppga[i]->pg, count);
- ptr = ll_kmap_atomic(aa->aa_ppga[i]->pg, KM_USER0);
+ ptr = kmap_atomic(aa->aa_ppga[i]->pg);
memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
count);
- ll_kunmap_atomic((void *) ptr, KM_USER0);
+ kunmap_atomic((void *) ptr);
buf += count;
nob -= count;
RETURN(rc);
list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- if (oap->oap_request != NULL) {
- LASSERTF(request == oap->oap_request,
- "request %p != oap_request %p\n",
- request, oap->oap_request);
- if (oap->oap_interrupted) {
- ptlrpc_req_finished(new_req);
- RETURN(-EINTR);
- }
- }
- }
+ if (oap->oap_request != NULL) {
+ LASSERTF(request == oap->oap_request,
+ "request %p != oap_request %p\n",
+ request, oap->oap_request);
+ }
+ }
/*
* New request takes over pga and oaps from old request.
* Note that copying a list_head doesn't work, need to move it...
int mem_tight = 0;
int page_count = 0;
bool soft_sync = false;
- bool interrupted = false;
bool ndelay = false;
int i;
int grant = 0;
int rc;
__u32 layout_version = 0;
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ LIST_HEAD(rpc_list);
struct ost_body *body;
ENTRY;
LASSERT(!list_empty(ext_list));
mem_tight |= ext->oe_memalloc;
grant += ext->oe_grants;
page_count += ext->oe_nr_pages;
- layout_version = MAX(layout_version, ext->oe_layout_version);
+ layout_version = max(layout_version, ext->oe_layout_version);
if (obj == NULL)
obj = ext->oe_obj;
}
else
LASSERT(oap->oap_page_off + oap->oap_count ==
PAGE_SIZE);
- if (oap->oap_interrupted)
- interrupted = true;
}
if (ext->oe_ndelay)
ndelay = true;
req->rq_interpret_reply = brw_interpret;
req->rq_memalloc = mem_tight != 0;
oap->oap_request = ptlrpc_request_addref(req);
- if (interrupted && !req->rq_intr)
- ptlrpc_mark_interrupted(req);
if (ndelay) {
req->rq_no_resend = req->rq_no_delay = 1;
/* probably set a shorter timeout value.
RETURN(rc);
}
-struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
-
/* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
* from the 2nd OSC before a lock from the 1st one. This does not deadlock with
* other synchronous requests, however keeping some locks and trying to obtain
}
req->rq_interpret_reply = osc_enqueue_interpret;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
} else if (intent) {
ptlrpc_req_finished(req);
}
}
EXPORT_SYMBOL(osc_cleanup_common);
-static struct obd_ops osc_obd_ops = {
+static const struct obd_ops osc_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = osc_setup,
.o_precleanup = osc_precleanup,
};
static struct shrinker *osc_cache_shrinker;
-struct list_head osc_shrink_list = LIST_HEAD_INIT(osc_shrink_list);
+LIST_HEAD(osc_shrink_list);
DEFINE_SPINLOCK(osc_shrink_lock);
#ifndef HAVE_SHRINKER_COUNT
.nr_to_scan = shrink_param(sc, nr_to_scan),
.gfp_mask = shrink_param(sc, gfp_mask)
};
-#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
- struct shrinker *shrinker = NULL;
-#endif
-
(void)osc_cache_shrink_scan(shrinker, &scv);
return osc_cache_shrink_count(shrinker, &scv);