if (rc != 0)
GOTO(out, rc);
- body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
- lustre_swab_ost_body);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
if (body) {
CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa);
int rc;
ENTRY;
- LASSERTF(!(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP) ||
- CHECK_MDS_GROUP(oinfo->oi_oa->o_gr),
- "oinfo->oi_oa->o_valid="LPU64" oinfo->oi_oa->o_gr="LPU64"\n",
- oinfo->oi_oa->o_valid, oinfo->oi_oa->o_gr);
+ LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
if (req == NULL)
RETURN(rc);
}
+ if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
+ oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
+
osc_pack_req_body(req, oinfo);
ptlrpc_request_set_replen(req);
- if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
- LASSERT(oti);
- oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
- }
-
/* do mds to ost setattr asynchronously */
if (!rqset) {
/* Do not wait for response. */
cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
oa->o_undirty = 0;
} else if (atomic_read(&obd_dirty_pages) -
- atomic_read(&obd_dirty_transit_pages) > obd_max_dirty_pages){
+ atomic_read(&obd_dirty_transit_pages) > obd_max_dirty_pages + 1){
+ /* The atomic_read() allowing the atomic_inc() are not covered
+ * by a lock thus they may safely race and trip this CERROR()
+ * unless we add in a small fudge factor (+1). */
CERROR("dirty %d - %d > system dirty_max %d\n",
atomic_read(&obd_dirty_pages),
atomic_read(&obd_dirty_transit_pages),
static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock);
+ LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
cli->cl_dirty += CFS_PAGE_SIZE;
int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
ENTRY;
- LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock);
+ LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
EXIT;
return;
static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
{
+ /*
+ * ocd_grant is the total grant amount we're expect to hold: if we've
+ * been evicted, it's the new avail_grant amount, cl_dirty will drop
+ * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
+ *
+ * race is tolerable here: if we're evicted, but imp_state already
+ * left EVICTED state, then cl_dirty must be 0 already.
+ */
client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_avail_grant = ocd->ocd_grant;
+ if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
+ cli->cl_avail_grant = ocd->ocd_grant;
+ else
+ cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
client_obd_list_unlock(&cli->cl_loi_list_lock);
- if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
- list_empty(&cli->cl_grant_shrink_list))
- osc_add_shrink_grant(cli);
-
CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
cli->cl_avail_grant, cli->cl_lost_grant);
LASSERT(cli->cl_avail_grant >= 0);
+
+ if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
+ list_empty(&cli->cl_grant_shrink_list))
+ osc_add_shrink_grant(cli);
}
/* We assume that the reason this OSC got a short read is because it read
int requested_nob, int niocount,
obd_count page_count, struct brw_page **pga)
{
- int *remote_rcs, i;
+ int i;
+ __u32 *remote_rcs;
- /* return error if any niobuf was in error */
- remote_rcs = lustre_swab_repbuf(req, REQ_REC_OFF + 1,
- sizeof(*remote_rcs) * niocount, NULL);
+ remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
+ sizeof(*remote_rcs) *
+ niocount);
if (remote_rcs == NULL) {
CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
return(-EPROTO);
}
- if (lustre_msg_swabbed(req->rq_repmsg))
- for (i = 0; i < niocount; i++)
- __swab32s(&remote_rcs[i]);
+ /* return error if any niobuf was in error */
for (i = 0; i < niocount; i++) {
if (remote_rcs[i] < 0)
return(remote_rcs[i]);
}
pill = &req->rq_pill;
+ req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
+ sizeof(*ioobj));
req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
niocount * sizeof(*niobuf));
osc_set_capa_size(req, &RMF_CAPA1, ocapa);
body = req_capsule_client_get(pill, &RMF_OST_BODY);
ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
- LASSERT(body && ioobj && niobuf);
+ LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
lustre_set_wire_obdo(&body->oa, oa);
}
LASSERTF((void *)(niobuf - niocount) ==
- lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 2,
- niocount * sizeof(*niobuf)),
- "want %p - real %p\n", lustre_msg_buf(req->rq_reqmsg,
- REQ_REC_OFF + 2, niocount * sizeof(*niobuf)),
- (void *)(niobuf - niocount));
+ req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
+ "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
+ &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
if (osc_should_shrink_grant(cli))
}
oa->o_cksum = body->oa.o_cksum;
/* 1 RC per niobuf */
- req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER,
+ req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
sizeof(__u32) * niocount);
} else {
if (unlikely(cli->cl_checksum) &&
body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
}
- req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER, 0);
+ req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER, 0);
/* 1 RC for the whole I/O */
}
ptlrpc_request_set_replen(req);
RETURN(rc);
LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
- body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
- lustre_swab_ost_body);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
if (body == NULL) {
CDEBUG(D_INFO, "Can't unpack body\n");
RETURN(-EPROTO);
oap->oap_request = NULL;
}
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags = 0;
+ spin_unlock(&oap->oap_lock);
oap->oap_interrupted = 0;
if (oap->oap_cmd & OBD_BRW_WRITE) {
int i;
for (i = 0; i < aa->aa_page_count; i++)
osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
-
+
if (aa->aa_oa->o_flags & OBD_FL_TEMPORARY)
OBDO_FREE(aa->aa_oa);
}
/**
* prepare pages for ASYNC io and put pages in send queue.
*
- * \param cli -
- * \param loi -
- * \param cmd - OBD_BRW_* macroses
- * \param lop - pending pages
+ * \param cmd OBD_BRW_* macroses
+ * \param lop pending pages
*
* \return zero if pages successfully add to send queue.
* \return not zere if error occurring.
struct osc_brw_async_args *aa;
const struct obd_async_page_ops *ops;
CFS_LIST_HEAD(rpc_list);
+ CFS_LIST_HEAD(tmp_list);
unsigned int ending_offset;
unsigned starting_offset = 0;
int srvlock = 0;
struct cl_object *clob = NULL;
ENTRY;
- /* If there are HP OAPs we need to handle at least 1 of them,
- * move it the beginning of the pending list for that. */
- if (!list_empty(&lop->lop_urgent)) {
- oap = list_entry(lop->lop_urgent.next,
- struct osc_async_page, oap_urgent_item);
- if (oap->oap_async_flags & ASYNC_HP)
- list_move(&oap->oap_pending_item, &lop->lop_pending);
+ /* ASYNC_HP pages first. At present, when the lock the pages is
+ * to be canceled, the pages covered by the lock will be sent out
+ * with ASYNC_HP. We have to send out them as soon as possible. */
+ list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
+ if (oap->oap_async_flags & ASYNC_HP)
+ list_move(&oap->oap_pending_item, &tmp_list);
+ else
+ list_move_tail(&oap->oap_pending_item, &tmp_list);
+ if (++page_count >= cli->cl_max_pages_per_rpc)
+ break;
}
+ list_splice(&tmp_list, &lop->lop_pending);
+ page_count = 0;
+
/* first we find the pages we're allowed to work with */
list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
oap_pending_item) {
oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
break;
}
+
+ /* If there is a gap at the start of this page, it can't merge
+ * with any previous page, so we'll hand the network a
+ * "fragmented" page array that it can't transfer in 1 RDMA */
+ if (page_count != 0 && oap->oap_page_off != 0)
+ break;
+
/* in llite being 'ready' equates to the page being locked
* until completion unlocks it. commit_write submits a page
* as not ready because its unlock will happen unconditionally
case -EINTR:
/* the io isn't needed.. tell the checks
* below to complete the rpc with EINTR */
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&oap->oap_lock);
oap->oap_count = -EINTR;
break;
case 0:
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_READY;
+ spin_unlock(&oap->oap_lock);
break;
default:
LASSERTF(0, "oap %p page %p returned %d "
}
}
#endif
- /* If there is a gap at the start of this page, it can't merge
- * with any previous page, so we'll hand the network a
- * "fragmented" page array that it can't transfer in 1 RDMA */
- if (page_count != 0 && oap->oap_page_off != 0)
- break;
/* take the page out of our book-keeping */
list_del_init(&oap->oap_pending_item);
}
if (!hprpc && !list_empty(&loi->loi_read_lop.lop_urgent)) {
- oap = list_entry(loi->loi_write_lop.lop_urgent.next,
+ oap = list_entry(loi->loi_read_lop.lop_urgent.next,
struct osc_async_page, oap_urgent_item);
hprpc = !!(oap->oap_async_flags & ASYNC_HP);
}
if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE,
&loi->loi_write_lop);
- if (rc < 0)
- break;
+ if (rc < 0) {
+ CERROR("Write request failed with %d\n", rc);
+
+ /* osc_send_oap_rpc failed, mostly because of
+ * memory pressure.
+ *
+ * It can't break here, because if:
+ * - a page was submitted by osc_io_submit, so
+ * page locked;
+ * - no request in flight
+ * - no subsequent request
+ * The system will be in live-lock state,
+ * because there is no chance to call
+ * osc_io_unplug() and osc_check_rpcs() any
+ * more. pdflush can't help in this case,
+ * because it might be blocked at grabbing
+ * the page lock as we mentioned.
+ *
+ * Anyway, continue to drain pages. */
+ /* break; */
+ }
+
if (rc > 0)
race_counter = 0;
else
rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ,
&loi->loi_read_lop);
if (rc < 0)
- break;
+ CERROR("Read request failed with %d\n", rc);
+
if (rc > 0)
race_counter = 0;
else
oap->oap_page_off = off;
oap->oap_count = count;
oap->oap_brw_flags = brw_flags;
+ /* Give a hint to OST that requests are coming from kswapd - bug19529 */
+ if (libcfs_memory_pressure_get())
+ oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags = async_flags;
+ spin_unlock(&oap->oap_lock);
if (cmd & OBD_BRW_WRITE) {
rc = osc_enter_cache(env, cli, loi, oap);
obd_flag async_flags)
{
struct loi_oap_pages *lop;
+ int flags = 0;
ENTRY;
LASSERT(!list_empty(&oap->oap_pending_item));
RETURN(0);
if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
- oap->oap_async_flags |= ASYNC_READY;
+ flags |= ASYNC_READY;
if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
list_empty(&oap->oap_rpc_item)) {
list_add(&oap->oap_urgent_item, &lop->lop_urgent);
else
list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
- oap->oap_async_flags |= ASYNC_URGENT;
+ flags |= ASYNC_URGENT;
loi_list_maint(cli, loi);
}
+ spin_lock(&oap->oap_lock);
+ oap->oap_async_flags |= flags;
+ spin_unlock(&oap->oap_lock);
LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
oap->oap_async_flags);
if (!list_empty(&oap->oap_urgent_item)) {
list_del_init(&oap->oap_urgent_item);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
+ spin_unlock(&oap->oap_lock);
}
if (!list_empty(&oap->oap_pending_item)) {
list_del_init(&oap->oap_pending_item);
/* Complete obtaining the lock procedure. */
rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
mode, aa->oa_flags, aa->oa_lvb,
- sizeof(*aa->oa_lvb), lustre_swab_ost_lvb,
- &handle, rc);
+ sizeof(*aa->oa_lvb), &handle, rc);
/* Complete osc stuff. */
rc = osc_enqueue_fini(req, aa->oa_lvb,
aa->oa_upcall, aa->oa_cookie, aa->oa_flags, rc);
*flags &= ~LDLM_FL_BLOCK_GRANTED;
rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
- sizeof(*lvb), lustre_swab_ost_lvb, lockh, async);
+ sizeof(*lvb), lockh, async);
if (rqset) {
if (!rc) {
struct osc_enqueue_args *aa;
rc = mode;
if (mode == LCK_PR)
rc |= LCK_PW;
- rc = ldlm_lock_match(obd->obd_namespace, lflags | LDLM_FL_LVB_READY,
+ rc = ldlm_lock_match(obd->obd_namespace, lflags,
res_id, type, policy, rc, lockh, unref);
if (rc) {
if (data != NULL)
struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)
{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
struct obd_statfs *msfs;
+ __u64 used;
ENTRY;
+ if (rc == -EBADR)
+ /* The request has in fact never been sent
+ * due to issues at a higher level (LOV).
+ * Exit immediately since the caller is
+ * aware of the problem and takes care
+ * of the clean up */
+ RETURN(rc);
+
if ((rc == -ENOTCONN || rc == -EAGAIN) &&
(aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
GOTO(out, rc = 0);
GOTO(out, rc = -EPROTO);
}
+ /* Reinitialize the RDONLY and DEGRADED flags at the client
+ * on each statfs, so they don't stay set permanently. */
+ spin_lock(&cli->cl_oscc.oscc_lock);
+
+ if (unlikely(msfs->os_state & OS_STATE_DEGRADED))
+ cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED;
+ else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_DEGRADED))
+ cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_DEGRADED;
+
+ if (unlikely(msfs->os_state & OS_STATE_READONLY))
+ cli->cl_oscc.oscc_flags |= OSCC_FLAG_RDONLY;
+ else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_RDONLY))
+ cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_RDONLY;
+
+ /* Add a bit of hysteresis so this flag isn't continually flapping,
+ * and ensure that new files don't get extremely fragmented due to
+ * only a small amount of available space in the filesystem.
+ * We want to set the NOSPC flag when there is less than ~0.1% free
+ * and clear it when there is at least ~0.2% free space, so:
+ * avail < ~0.1% max max = avail + used
+ * 1025 * avail < avail + used used = blocks - free
+ * 1024 * avail < used
+ * 1024 * avail < blocks - free
+ * avail < ((blocks - free) >> 10)
+ *
+ * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
+ * lose that amount of space so in those cases we report no space left
+ * if their is less than 1 GB left. */
+ used = min_t(__u64,(msfs->os_blocks - msfs->os_bfree) >> 10, 1 << 30);
+ if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) == 0) &&
+ ((msfs->os_ffree < 32) || (msfs->os_bavail < used))))
+ cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC;
+ else if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
+ (msfs->os_ffree > 64) && (msfs->os_bavail > (used << 1))))
+ cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_NOSPC;
+
+ spin_unlock(&cli->cl_oscc.oscc_lock);
+
*aa->aa_oi->oi_osfs = *msfs;
out:
rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
if (KEY_IS(KEY_NEXT_ID)) {
+ obd_id new_val;
+ struct osc_creator *oscc = &obd->u.cli.cl_oscc;
+
if (vallen != sizeof(obd_id))
RETURN(-ERANGE);
if (val == NULL)
RETURN(-EINVAL);
- obd->u.cli.cl_oscc.oscc_next_id = *((obd_id*)val) + 1;
+
+ if (vallen != sizeof(obd_id))
+ RETURN(-EINVAL);
+
+ /* avoid race between allocate new object and set next id
+ * from ll_sync thread */
+ spin_lock(&oscc->oscc_lock);
+ new_val = *((obd_id*)val) + 1;
+ if (new_val > oscc->oscc_next_id)
+ oscc->oscc_next_id = new_val;
+ spin_unlock(&oscc->oscc_lock);
CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
exp->exp_obd->obd_name,
obd->u.cli.cl_oscc.oscc_next_id);
RETURN(0);
}
- if (KEY_IS(KEY_UNLINKED)) {
- struct osc_creator *oscc = &obd->u.cli.cl_oscc;
- spin_lock(&oscc->oscc_lock);
- oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
- spin_unlock(&oscc->oscc_lock);
- RETURN(0);
- }
-
if (KEY_IS(KEY_INIT_RECOV)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
if (KEY_IS(KEY_GRANT_SHRINK))
req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO);
else
- req = ptlrpc_request_alloc(imp, &RQF_OST_SET_INFO);
+ req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
if (req == NULL)
RETURN(-ENOMEM);
};
static struct llog_operations osc_mds_ost_orig_logops;
-static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
- struct obd_device *tgt, int count,
- struct llog_catid *catid, struct obd_uuid *uuid)
+
+static int __osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *tgt, struct llog_catid *catid)
{
int rc;
ENTRY;
- LASSERT(olg == &obd->obd_olg);
- spin_lock(&obd->obd_dev_lock);
- if (osc_mds_ost_orig_logops.lop_setup != llog_obd_origin_setup) {
- osc_mds_ost_orig_logops = llog_lvfs_ops;
- osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
- osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
- osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add;
- osc_mds_ost_orig_logops.lop_connect = llog_origin_connect;
- }
- spin_unlock(&obd->obd_dev_lock);
-
- rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, count,
+ rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, 1,
&catid->lci_logid, &osc_mds_ost_orig_logops);
if (rc) {
CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n");
GOTO(out, rc);
}
- rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, count,
+ rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, 1,
NULL, &osc_size_repl_logops);
if (rc) {
struct llog_ctxt *ctxt =
GOTO(out, rc);
out:
if (rc) {
- CERROR("osc '%s' tgt '%s' cnt %d catid %p rc=%d\n",
- obd->obd_name, tgt->obd_name, count, catid, rc);
+ CERROR("osc '%s' tgt '%s' catid %p rc=%d\n",
+ obd->obd_name, tgt->obd_name, catid, rc);
CERROR("logid "LPX64":0x%x\n",
catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
}
return rc;
}
+static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *disk_obd, int *index)
+{
+ struct llog_catid catid;
+ static char name[32] = CATLIST;
+ int rc;
+ ENTRY;
+
+ LASSERT(olg == &obd->obd_olg);
+
+ mutex_down(&olg->olg_cat_processing);
+ rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid);
+ if (rc) {
+ CERROR("rc: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
+ obd->obd_name, *index, catid.lci_logid.lgl_oid,
+ catid.lci_logid.lgl_ogr, catid.lci_logid.lgl_ogen);
+
+ rc = __osc_llog_init(obd, olg, disk_obd, &catid);
+ if (rc) {
+ CERROR("rc: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ rc = llog_put_cat_list(disk_obd, name, *index, 1, &catid);
+ if (rc) {
+ CERROR("rc: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ out:
+ mutex_up(&olg->olg_cat_processing);
+
+ return rc;
+}
+
static int osc_llog_finish(struct obd_device *obd, int count)
{
struct llog_ctxt *ctxt;
long lost_grant;
client_obd_list_lock(&cli->cl_loi_list_lock);
- data->ocd_grant = cli->cl_avail_grant ?:
+ data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
lost_grant = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
client_obd_list_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld "
- "cl_lost_grant: %ld\n", data->ocd_grant,
- cli->cl_avail_grant, lost_grant);
+ "cl_dirty: %ld cl_lost_grant: %ld\n", data->ocd_grant,
+ cli->cl_avail_grant, cli->cl_dirty, lost_grant);
CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
" ocd_grant: %d\n", data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
int osc_cleanup(struct obd_device *obd)
{
- struct osc_creator *oscc = &obd->u.cli.cl_oscc;
int rc;
ENTRY;
ptlrpc_lprocfs_unregister_obd(obd);
lprocfs_obd_cleanup(obd);
- spin_lock(&oscc->oscc_lock);
- oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
- oscc->oscc_flags |= OSCC_FLAG_EXITING;
- spin_unlock(&oscc->oscc_lock);
-
/* free memory of osc quota cache */
lquota_cleanup(quota_interface, obd);
.o_unpackmd = osc_unpackmd,
.o_precreate = osc_precreate,
.o_create = osc_create,
+ .o_create_async = osc_create_async,
.o_destroy = osc_destroy,
.o_getattr = osc_getattr,
.o_getattr_async = osc_getattr_async,
spin_lock_init(&osc_ast_guard);
lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
+ osc_mds_ost_orig_logops = llog_lvfs_ops;
+ osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
+ osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
+ osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add;
+ osc_mds_ost_orig_logops.lop_connect = llog_origin_connect;
+
RETURN(rc);
}