} else {
req->rq_interpret_reply = osc_setattr_interpret;
- CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
- sa = ptlrpc_req_async_args(req);
+ sa = ptlrpc_req_async_args(sa, req);
sa->sa_oa = oa;
sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
}
RETURN(0);
}
req->rq_interpret_reply = osc_ladvise_interpret;
- CLASSERT(sizeof(*la) <= sizeof(req->rq_async_args));
- la = ptlrpc_req_async_args(req);
+ la = ptlrpc_req_async_args(la, req);
la->la_oa = oa;
la->la_upcall = upcall;
la->la_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
RETURN(0);
}
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = osc_setattr_interpret;
- CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
- sa = ptlrpc_req_async_args(req);
+ sa = ptlrpc_req_async_args(sa, req);
sa->sa_oa = oa;
sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = osc_sync_interpret;
- CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
- fa = ptlrpc_req_async_args(req);
+ fa = ptlrpc_req_async_args(fa, req);
fa->fa_obj = obj;
fa->fa_oa = oa;
fa->fa_upcall = upcall;
fa->fa_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
RETURN (0);
}
struct client_obd *cli = &exp->exp_obd->u.cli;
struct ptlrpc_request *req;
struct ost_body *body;
- struct list_head cancels = LIST_HEAD_INIT(cancels);
+ LIST_HEAD(cancels);
int rc, count;
ENTRY;
req->rq_interpret_reply = osc_destroy_interpret;
if (!osc_can_send_destroy(cli)) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
/*
* Wait until the number of on-going destroy RPCs drops
* under max_rpc_in_flight
*/
- rc = l_wait_event_exclusive(cli->cl_destroy_waitq,
- osc_can_send_destroy(cli), &lwi);
+ rc = l_wait_event_abortable_exclusive(
+ cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli));
if (rc) {
ptlrpc_req_finished(req);
- RETURN(rc);
+ RETURN(-EINTR);
}
}
oa->o_dirty = cli->cl_dirty_grant;
else
oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
- if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
- cli->cl_dirty_max_pages)) {
- CERROR("dirty %lu - %lu > dirty_max %lu\n",
- cli->cl_dirty_pages, cli->cl_dirty_transit,
+ if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
+ CERROR("dirty %lu > dirty_max %lu\n",
+ cli->cl_dirty_pages,
cli->cl_dirty_max_pages);
oa->o_undirty = 0;
- } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
- atomic_long_read(&obd_dirty_transit_pages) >
+ } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
(long)(obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1). */
- CERROR("%s: dirty %ld - %ld > system dirty_max %ld\n",
+ CERROR("%s: dirty %ld > system dirty_max %ld\n",
cli_name(cli), atomic_long_read(&obd_dirty_pages),
- atomic_long_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
oa->o_undirty = 0;
} else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
osc_update_grant(cli, body);
out:
OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
+ aa->aa_oa = NULL;
return rc;
}
if (client->cl_import == NULL)
return 0;
- if ((client->cl_import->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_GRANT_SHRINK) == 0)
- return 0;
+ if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
+ client->cl_import->imp_grant_shrink_disabled) {
+ osc_update_next_shrink(client);
+ return 0;
+ }
if (ktime_get_seconds() >= next_shrink - 5) {
/* Get the current RPC size directly, instead of going via:
if (client_gtd.gtd_stopped == 1)
return;
- if (next_shrink > ktime_get_seconds())
- schedule_delayed_work(&work, msecs_to_jiffies(
- (next_shrink - ktime_get_seconds()) *
- MSEC_PER_SEC));
- else
+ if (next_shrink > ktime_get_seconds()) {
+ time64_t delay = next_shrink - ktime_get_seconds();
+
+ schedule_delayed_work(&work, cfs_time_seconds(delay));
+ } else {
schedule_work(&work.work);
+ }
}
void osc_schedule_grant_work(void)
}
spin_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
- "chunk bits: %d cl_max_extent_pages: %d\n",
- cli_name(cli),
- cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
- cli->cl_max_extent_pages);
+ CDEBUG(D_CACHE,
+ "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
+ cli_name(cli),
+ cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
+ cli->cl_max_extent_pages);
if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
osc_add_grant_list(cli);
LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
(pg->flag & OBD_BRW_SRVLOCK));
if (short_io_size != 0 && opc == OST_WRITE) {
- unsigned char *ptr = ll_kmap_atomic(pg->pg, KM_USER0);
+ unsigned char *ptr = kmap_atomic(pg->pg);
LASSERT(short_io_size >= requested_nob + pg->count);
memcpy(short_io_buf + requested_nob,
ptr + poff,
pg->count);
- ll_kunmap_atomic(ptr, KM_USER0);
+ kunmap_atomic(ptr);
} else if (short_io_size == 0) {
desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
pg->count);
}
ptlrpc_request_set_replen(req);
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
+ aa = ptlrpc_req_async_args(aa, req);
aa->aa_oa = oa;
aa->aa_requested_nob = requested_nob;
aa->aa_nio_count = niocount;
if (rc)
CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
filp_close(filp, NULL);
- return;
}
static int
&req->rq_import->imp_connection->c_peer;
struct ost_body *body;
u32 client_cksum = 0;
- ENTRY;
- if (rc < 0 && rc != -EDQUOT) {
- DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
- RETURN(rc);
- }
+ ENTRY;
- LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
- DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
- RETURN(-EPROTO);
- }
+ if (rc < 0 && rc != -EDQUOT) {
+ DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
+ RETURN(rc);
+ }
+
+ LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL) {
+ DEBUG_REQ(D_INFO, req, "cannot unpack body");
+ RETURN(-EPROTO);
+ }
/* set/clear over quota flag for a uid/gid/projid */
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
unsigned qid[LL_MAXQUOTAS] = {
body->oa.o_uid, body->oa.o_gid,
body->oa.o_projid };
- CDEBUG(D_QUOTA, "setdq for [%u %u %u] with valid %#llx, flags %x\n",
+ CDEBUG(D_QUOTA,
+ "setdq for [%u %u %u] with valid %#llx, flags %x\n",
body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
body->oa.o_valid, body->oa.o_flags);
osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
body->oa.o_flags);
- }
+ }
- osc_update_grant(cli, body);
+ osc_update_grant(cli, body);
- if (rc < 0)
- RETURN(rc);
+ if (rc < 0)
+ RETURN(rc);
- if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
- client_cksum = aa->aa_oa->o_cksum; /* save for later */
+ if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
+ client_cksum = aa->aa_oa->o_cksum; /* save for later */
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
- if (rc > 0) {
- CERROR("Unexpected +ve rc %d\n", rc);
- RETURN(-EPROTO);
- }
+ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
+ if (rc > 0) {
+ CERROR("%s: unexpected positive size %d\n",
+ obd_name, rc);
+ RETURN(-EPROTO);
+ }
if (req->rq_bulk != NULL &&
sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
- RETURN(-EAGAIN);
+ RETURN(-EAGAIN);
- if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
- check_write_checksum(&body->oa, peer, client_cksum,
+ if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
+ check_write_checksum(&body->oa, peer, client_cksum,
body->oa.o_cksum, aa))
- RETURN(-EAGAIN);
+ RETURN(-EAGAIN);
- rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
- aa->aa_page_count, aa->aa_ppga);
- GOTO(out, rc);
- }
+ rc = check_write_rcs(req, aa->aa_requested_nob,
+ aa->aa_nio_count, aa->aa_page_count,
+ aa->aa_ppga);
+ GOTO(out, rc);
+ }
- /* The rest of this function executes only for OST_READs */
+ /* The rest of this function executes only for OST_READs */
if (req->rq_bulk == NULL) {
rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
/* if unwrap_bulk failed, return -EAGAIN to retry */
rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
}
- if (rc < 0)
- GOTO(out, rc = -EAGAIN);
+ if (rc < 0)
+ GOTO(out, rc = -EAGAIN);
- if (rc > aa->aa_requested_nob) {
- CERROR("Unexpected rc %d (%d requested)\n", rc,
- aa->aa_requested_nob);
- RETURN(-EPROTO);
- }
+ if (rc > aa->aa_requested_nob) {
+ CERROR("%s: unexpected size %d, requested %d\n", obd_name,
+ rc, aa->aa_requested_nob);
+ RETURN(-EPROTO);
+ }
if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
- CERROR ("Unexpected rc %d (%d transferred)\n",
- rc, req->rq_bulk->bd_nob_transferred);
- return (-EPROTO);
- }
+ CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
+ rc, req->rq_bulk->bd_nob_transferred);
+ RETURN(-EPROTO);
+ }
if (req->rq_bulk == NULL) {
/* short io */
CDEBUG(D_CACHE, "page %p count %d\n",
aa->aa_ppga[i]->pg, count);
- ptr = ll_kmap_atomic(aa->aa_ppga[i]->pg, KM_USER0);
+ ptr = kmap_atomic(aa->aa_ppga[i]->pg);
memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
count);
- ll_kunmap_atomic((void *) ptr, KM_USER0);
+ kunmap_atomic((void *) ptr);
buf += count;
nob -= count;
CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
rc = 0;
}
- } else if (unlikely(client_cksum)) {
- static int cksum_missed;
-
- cksum_missed++;
- if ((cksum_missed & (-cksum_missed)) == cksum_missed)
- CERROR("Checksum %u requested from %s but not sent\n",
- cksum_missed, libcfs_nid2str(peer->nid));
- } else {
- rc = 0;
- }
+ } else if (unlikely(client_cksum)) {
+ static int cksum_missed;
+
+ cksum_missed++;
+ if ((cksum_missed & (-cksum_missed)) == cksum_missed)
+ CERROR("%s: checksum %u requested from %s but not sent\n",
+ obd_name, cksum_missed,
+ libcfs_nid2str(peer->nid));
+ } else {
+ rc = 0;
+ }
out:
if (rc >= 0)
lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
aa->aa_oa, &body->oa);
- RETURN(rc);
+ RETURN(rc);
}
static int osc_brw_redo_request(struct ptlrpc_request *request,
struct osc_brw_async_args *aa, int rc)
{
- struct ptlrpc_request *new_req;
- struct osc_brw_async_args *new_aa;
- struct osc_async_page *oap;
- ENTRY;
+ struct ptlrpc_request *new_req;
+ struct osc_brw_async_args *new_aa;
+ struct osc_async_page *oap;
+ ENTRY;
+ /* The below message is checked in replay-ost-single.sh test_8ae*/
DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
"redo for recoverable error %d", rc);
RETURN(rc);
list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- if (oap->oap_request != NULL) {
- LASSERTF(request == oap->oap_request,
- "request %p != oap_request %p\n",
- request, oap->oap_request);
- if (oap->oap_interrupted) {
- ptlrpc_req_finished(new_req);
- RETURN(-EINTR);
- }
- }
- }
+ if (oap->oap_request != NULL) {
+ LASSERTF(request == oap->oap_request,
+ "request %p != oap_request %p\n",
+ request, oap->oap_request);
+ }
+ }
/*
* New request takes over pga and oaps from old request.
* Note that copying a list_head doesn't work, need to move it...
new_req->rq_generation_set = 1;
new_req->rq_import_generation = request->rq_import_generation;
- new_aa = ptlrpc_req_async_args(new_req);
+ new_aa = ptlrpc_req_async_args(new_aa, new_req);
INIT_LIST_HEAD(&new_aa->aa_oaps);
list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
cl_object_attr_unlock(obj);
}
OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
+ aa->aa_oa = NULL;
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
osc_inc_unstable_pages(req);
int mem_tight = 0;
int page_count = 0;
bool soft_sync = false;
- bool interrupted = false;
bool ndelay = false;
int i;
int grant = 0;
int rc;
__u32 layout_version = 0;
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ LIST_HEAD(rpc_list);
struct ost_body *body;
ENTRY;
LASSERT(!list_empty(ext_list));
mem_tight |= ext->oe_memalloc;
grant += ext->oe_grants;
page_count += ext->oe_nr_pages;
- layout_version = MAX(layout_version, ext->oe_layout_version);
+ layout_version = max(layout_version, ext->oe_layout_version);
if (obj == NULL)
obj = ext->oe_obj;
}
else
LASSERT(oap->oap_page_off + oap->oap_count ==
PAGE_SIZE);
- if (oap->oap_interrupted)
- interrupted = true;
}
if (ext->oe_ndelay)
ndelay = true;
req->rq_interpret_reply = brw_interpret;
req->rq_memalloc = mem_tight != 0;
oap->oap_request = ptlrpc_request_addref(req);
- if (interrupted && !req->rq_intr)
- ptlrpc_mark_interrupted(req);
if (ndelay) {
req->rq_no_resend = req->rq_no_delay = 1;
/* probably set a shorter timeout value.
cl_req_attr_set(env, osc2cl(obj), crattr);
lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
+ aa = ptlrpc_req_async_args(aa, req);
INIT_LIST_HEAD(&aa->aa_oaps);
list_splice_init(&rpc_list, &aa->aa_oaps);
INIT_LIST_HEAD(&aa->aa_exts);
}
spin_unlock(&cli->cl_loi_list_lock);
- DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%uw in flight",
+ DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
page_count, aa, cli->cl_r_in_flight,
cli->cl_w_in_flight);
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
RETURN(rc);
}
-struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
-
/* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
* from the 2nd OSC before a lock from the 1st one. This does not deadlock with
* other synchronous requests, however keeping some locks and trying to obtain
* release locks just after they are obtained. */
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
__u64 *flags, union ldlm_policy_data *policy,
- struct ost_lvb *lvb, int kms_valid,
- osc_enqueue_upcall_f upcall, void *cookie,
- struct ldlm_enqueue_info *einfo,
+ struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
+ void *cookie, struct ldlm_enqueue_info *einfo,
struct ptlrpc_request_set *rqset, int async,
bool speculative)
{
policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
policy->l_extent.end |= ~PAGE_MASK;
- /*
- * kms is not valid when either object is completely fresh (so that no
- * locks are cached), or object was evicted. In the latter case cached
- * lock cannot be used, because it would prime inode state with
- * potentially stale LVB.
- */
- if (!kms_valid)
- goto no_match;
-
/* Next, search for already existing extent locks that will cover us */
/* If we're trying to read, we also search for an existing PW lock. The
* VFS and page cache already protect us locally, so lots of readers/
}
}
-no_match:
if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
RETURN(-ENOLCK);
if (async) {
if (!rc) {
struct osc_enqueue_args *aa;
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
+ aa = ptlrpc_req_async_args(aa, req);
aa->oa_exp = exp;
aa->oa_mode = einfo->ei_mode;
aa->oa_type = einfo->ei_type;
}
req->rq_interpret_reply = osc_enqueue_interpret;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
} else if (intent) {
ptlrpc_req_finished(req);
}
RETURN(rc);
}
-int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- enum ldlm_type type, union ldlm_policy_data *policy,
- enum ldlm_mode mode, __u64 *flags, void *data,
+int osc_match_base(const struct lu_env *env, struct obd_export *exp,
+ struct ldlm_res_id *res_id, enum ldlm_type type,
+ union ldlm_policy_data *policy, enum ldlm_mode mode,
+ __u64 *flags, struct osc_object *obj,
struct lustre_handle *lockh, int unref)
{
struct obd_device *obd = exp->exp_obd;
if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
RETURN(rc);
- if (data != NULL) {
+ if (obj != NULL) {
struct ldlm_lock *lock = ldlm_handle2lock(lockh);
LASSERT(lock != NULL);
- if (!osc_set_lock_data(lock, data)) {
+ if (osc_set_lock_data(lock, obj)) {
+ lock_res_and_lock(lock);
+ if (!ldlm_is_lvb_cached(lock)) {
+ LASSERT(lock->l_ast_data == obj);
+ osc_lock_lvb_update(env, obj, lock, NULL);
+ ldlm_set_lvb_cached(lock);
+ }
+ unlock_res_and_lock(lock);
+ } else {
ldlm_lock_decref(lockh, rc);
rc = 0;
}
}
req->rq_interpret_reply = osc_statfs_interpret;
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
+ aa = ptlrpc_req_async_args(aa, req);
aa->aa_oi = oinfo;
ptlrpc_set_add_req(rqset, req);
struct osc_grant_args *aa;
struct obdo *oa;
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
+ aa = ptlrpc_req_async_args(aa, req);
OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
if (!oa) {
ptlrpc_req_finished(req);
}
EXPORT_SYMBOL(osc_cleanup_common);
-static struct obd_ops osc_obd_ops = {
+static const struct obd_ops osc_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = osc_setup,
.o_precleanup = osc_precleanup,
};
static struct shrinker *osc_cache_shrinker;
-struct list_head osc_shrink_list = LIST_HEAD_INIT(osc_shrink_list);
+LIST_HEAD(osc_shrink_list);
DEFINE_SPINLOCK(osc_shrink_lock);
#ifndef HAVE_SHRINKER_COUNT
.nr_to_scan = shrink_param(sc, nr_to_scan),
.gfp_mask = shrink_param(sc, gfp_mask)
};
-#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
- struct shrinker *shrinker = NULL;
-#endif
-
(void)osc_cache_shrink_scan(shrinker, &scv);
return osc_cache_shrink_count(shrinker, &scv);