#include <uapi/linux/lustre/lustre_ioctl.h>
#include <lustre_net.h>
#include <lustre_obdo.h>
-#include <uapi/linux/lustre/lustre_param.h>
#include <obd.h>
#include <obd_cksum.h>
#include <obd_class.h>
}
static int osc_setattr_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_setattr_args *sa, int rc)
+ struct ptlrpc_request *req, void *args, int rc)
{
- struct ost_body *body;
- ENTRY;
+ struct osc_setattr_args *sa = args;
+ struct ost_body *body;
- if (rc != 0)
- GOTO(out, rc);
+ ENTRY;
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL)
- GOTO(out, rc = -EPROTO);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ GOTO(out, rc = -EPROTO);
lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
&body->oa);
out:
- rc = sa->sa_upcall(sa->sa_cookie, rc);
- RETURN(rc);
+ rc = sa->sa_upcall(sa->sa_cookie, rc);
+ RETURN(rc);
}
int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
/* Do not wait for response. */
ptlrpcd_add_req(req);
} else {
- req->rq_interpret_reply =
- (ptlrpc_interpterer_t)osc_setattr_interpret;
+ req->rq_interpret_reply = osc_setattr_interpret;
CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
sa = ptlrpc_req_async_args(req);
ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
+ req->rq_interpret_reply = osc_setattr_interpret;
CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
sa = ptlrpc_req_async_args(req);
sa->sa_oa = oa;
EXPORT_SYMBOL(osc_punch_send);
static int osc_sync_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *arg, int rc)
+ struct ptlrpc_request *req, void *args, int rc)
{
- struct osc_fsync_args *fa = arg;
- struct ost_body *body;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- unsigned long valid = 0;
- struct cl_object *obj;
+ struct osc_fsync_args *fa = args;
+ struct ost_body *body;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ unsigned long valid = 0;
+ struct cl_object *obj;
ENTRY;
if (rc != 0)
}
static int osc_destroy_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data,
- int rc)
+ struct ptlrpc_request *req, void *args, int rc)
{
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
atomic_dec(&cli->cl_destroy_in_flight);
wake_up(&cli->cl_destroy_waitq);
+
return 0;
}
/* Do not ask for more than OBD_MAX_GRANT - a margin for server
* to add extent tax, etc.
*/
- oa->o_undirty = min(undirty, OBD_MAX_GRANT -
- (PTLRPC_MAX_BRW_PAGES << PAGE_SHIFT)*4UL);
+ oa->o_undirty = min(undirty, OBD_MAX_GRANT &
+ ~(PTLRPC_MAX_BRW_SIZE * 4UL));
}
oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
oa->o_dropped = cli->cl_lost_grant;
static struct grant_thread_data client_gtd;
static int osc_shrink_grant_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *aa, int rc)
+ struct ptlrpc_request *req,
+ void *args, int rc)
{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
- struct ost_body *body;
+ struct osc_grant_args *aa = args;
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ost_body *body;
- if (rc != 0) {
- __osc_update_grant(cli, oa->o_grant);
- GOTO(out, rc);
- }
+ if (rc != 0) {
+ __osc_update_grant(cli, aa->aa_oa->o_grant);
+ GOTO(out, rc);
+ }
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- osc_update_grant(cli, body);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ osc_update_grant(cli, body);
out:
- OBDO_FREE(oa);
- return rc;
+ OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
+
+ return rc;
}
static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
osc_announce_cached(cli, &body->oa, 0);
spin_lock(&cli->cl_loi_list_lock);
+ if (target_bytes >= cli->cl_avail_grant) {
+ /* available grant has changed since target calculation */
+ spin_unlock(&cli->cl_loi_list_lock);
+ GOTO(out_free, rc = 0);
+ }
body->oa.o_grant = cli->cl_avail_grant - target_bytes;
cli->cl_avail_grant = target_bytes;
spin_unlock(&cli->cl_loi_list_lock);
sizeof(*body), body, NULL);
if (rc != 0)
__osc_update_grant(cli, body->oa.o_grant);
+out_free:
OBD_FREE_PTR(body);
RETURN(rc);
}
mutex_lock(&client_gtd.gtd_mutex);
list_for_each_entry(cli, &client_gtd.gtd_clients,
cl_grant_chain) {
- if (++rpc_sent < GRANT_SHRINK_RPC_BATCH &&
- osc_should_shrink_grant(cli))
+ if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
+ osc_should_shrink_grant(cli)) {
osc_shrink_grant(cli);
+ rpc_sent++;
+ }
if (!init_next_shrink) {
if (cli->cl_next_shrink_grant < next_shrink &&
if (client_gtd.gtd_stopped == 1)
return;
- if (next_shrink > ktime_get_seconds())
- schedule_delayed_work(&work, msecs_to_jiffies(
- (next_shrink - ktime_get_seconds()) *
- MSEC_PER_SEC));
- else
+ if (next_shrink > ktime_get_seconds()) {
+ time64_t delay = next_shrink - ktime_get_seconds();
+
+ schedule_delayed_work(&work, cfs_time_seconds(delay));
+ } else {
schedule_work(&work.work);
+ }
+}
+
+void osc_schedule_grant_work(void)
+{
+ cancel_delayed_work_sync(&work);
+ schedule_work(&work.work);
}
/**
/* return error if any niobuf was in error */
for (i = 0; i < niocount; i++) {
- if ((int)remote_rcs[i] < 0)
- return(remote_rcs[i]);
+ if ((int)remote_rcs[i] < 0) {
+ CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
+ i, remote_rcs[i], req);
+ return remote_rcs[i];
+ }
if (remote_rcs[i] != 0) {
CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
return (p1->off + p1->count == p2->off);
}
+#if IS_ENABLED(CONFIG_CRC_T10DIF)
static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
size_t pg_count, struct brw_page **pga,
int opc, obd_dif_csum_fn *fn,
int sector_size,
u32 *check_sum)
{
- struct cfs_crypto_hash_desc *hdesc;
+ struct ahash_request *req;
/* Used Adler as the default checksum type on top of DIF tags */
unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
struct page *__page;
if (__page == NULL)
return -ENOMEM;
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
- rc = PTR_ERR(hdesc);
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
+ rc = PTR_ERR(req);
CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
obd_name, cfs_crypto_hash_name(cfs_alg), rc);
GOTO(out, rc);
* The left guard number should be able to hold checksums of a
* whole page
*/
- rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg, 0,
+ rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
+ pga[i]->off & ~PAGE_MASK,
count,
guard_start + used_number,
guard_number - used_number,
used_number += used;
if (used_number == guard_number) {
- cfs_crypto_hash_update_page(hdesc, __page, 0,
+ cfs_crypto_hash_update_page(req, __page, 0,
used_number * sizeof(*guard_start));
used_number = 0;
}
GOTO(out, rc);
if (used_number != 0)
- cfs_crypto_hash_update_page(hdesc, __page, 0,
+ cfs_crypto_hash_update_page(req, __page, 0,
used_number * sizeof(*guard_start));
bufsize = sizeof(cksum);
- cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+ cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo */
__free_page(__page);
return rc;
}
+#else /* !CONFIG_CRC_T10DIF */
+#define obd_dif_ip_fn NULL
+#define obd_dif_crc_fn NULL
+#define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum) \
+ -EOPNOTSUPP
+#endif /* CONFIG_CRC_T10DIF */
static int osc_checksum_bulk(int nob, size_t pg_count,
struct brw_page **pga, int opc,
u32 *cksum)
{
int i = 0;
- struct cfs_crypto_hash_desc *hdesc;
+ struct ahash_request *req;
unsigned int bufsize;
unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
LASSERT(pg_count > 0);
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
CERROR("Unable to initialize checksum hash %s\n",
cfs_crypto_hash_name(cfs_alg));
- return PTR_ERR(hdesc);
+ return PTR_ERR(req);
}
while (nob > 0 && pg_count > 0) {
memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
kunmap(pga[i]->pg);
}
- cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
+ cfs_crypto_hash_update_page(req, pga[i]->pg,
pga[i]->off & ~PAGE_MASK,
count);
LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
}
bufsize = sizeof(*cksum);
- cfs_crypto_hash_final(hdesc, (unsigned char *)cksum, &bufsize);
+ cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo */
for (i = 0; i < page_count; i++)
short_io_size += pga[i]->count;
- /* Check if we can do a short io. */
- if (!(short_io_size <= cli->cl_short_io_bytes && niocount == 1 &&
- imp_connect_shortio(cli->cl_import)))
+ /* Check if read/write is small enough to be a short io. */
+ if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
+ !imp_connect_shortio(cli->cl_import))
short_io_size = 0;
req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
kunmap(pga[i]->pg);
}
- rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
+ rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
if (rc)
CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
filp_close(filp, NULL);
enum cksum_types cksum_type;
obd_dif_csum_fn *fn = NULL;
int sector_size = 0;
- bool t10pi = false;
__u32 new_cksum;
char *msg;
int rc;
switch (cksum_type) {
case OBD_CKSUM_T10IP512:
- t10pi = true;
fn = obd_dif_ip_fn;
sector_size = 512;
break;
case OBD_CKSUM_T10IP4K:
- t10pi = true;
fn = obd_dif_ip_fn;
sector_size = 4096;
break;
case OBD_CKSUM_T10CRC512:
- t10pi = true;
fn = obd_dif_crc_fn;
sector_size = 512;
break;
case OBD_CKSUM_T10CRC4K:
- t10pi = true;
fn = obd_dif_crc_fn;
sector_size = 4096;
break;
break;
}
- if (t10pi)
+ if (fn)
rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
- aa->aa_page_count,
- aa->aa_ppga,
- OST_WRITE,
- fn,
- sector_size,
+ aa->aa_page_count, aa->aa_ppga,
+ OST_WRITE, fn, sector_size,
&new_cksum);
else
rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
CDEBUG(D_QUOTA, "setdq for [%u %u %u] with valid %#llx, flags %x\n",
body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
body->oa.o_valid, body->oa.o_flags);
- osc_quota_setdq(cli, qid, body->oa.o_valid,
+ osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
body->oa.o_flags);
}
}
}
}
- /* New request takes over pga and oaps from old request.
- * Note that copying a list_head doesn't work, need to move it... */
- aa->aa_resends++;
- new_req->rq_interpret_reply = request->rq_interpret_reply;
- new_req->rq_async_args = request->rq_async_args;
+ /*
+ * New request takes over pga and oaps from old request.
+ * Note that copying a list_head doesn't work, need to move it...
+ */
+ aa->aa_resends++;
+ new_req->rq_interpret_reply = request->rq_interpret_reply;
+ new_req->rq_async_args = request->rq_async_args;
new_req->rq_commit_cb = request->rq_commit_cb;
/* cap resend delay to the current request timeout, this is similar to
* what ptlrpc does (see after_reply()) */
}
static int brw_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
+ struct ptlrpc_request *req, void *args, int rc)
{
- struct osc_brw_async_args *aa = data;
+ struct osc_brw_async_args *aa = args;
struct osc_extent *ext;
struct osc_extent *tmp;
struct client_obd *cli = aa->aa_cli;
- unsigned long transferred = 0;
- ENTRY;
+ unsigned long transferred = 0;
+
+ ENTRY;
- rc = osc_brw_fini_request(req, rc);
- CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
- /* When server return -EINPROGRESS, client should always retry
- * regardless of the number of times the bulk was resent already. */
+ rc = osc_brw_fini_request(req, rc);
+ CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
+ /*
+ * When server returns -EINPROGRESS, client should always retry
+ * regardless of the number of times the bulk was resent already.
+ */
if (osc_recoverable_error(rc) && !req->rq_no_delay) {
if (req->rq_import_generation !=
req->rq_import->imp_generation) {
cl_object_attr_update(env, obj, attr, valid);
cl_object_attr_unlock(obj);
}
- OBDO_FREE(aa->aa_oa);
+ OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
osc_inc_unstable_pages(req);
if (pga == NULL)
GOTO(out, rc = -ENOMEM);
- OBDO_ALLOC(oa);
+ OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
if (oa == NULL)
GOTO(out, rc = -ENOMEM);
LASSERT(req == NULL);
if (oa)
- OBDO_FREE(oa);
+ OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
if (pga)
OBD_FREE(pga, sizeof(*pga) * page_count);
/* this should happen rarely and is pretty bad, it makes the
}
int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
- struct osc_enqueue_args *aa, int rc)
+ void *args, int rc)
{
+ struct osc_enqueue_args *aa = args;
struct ldlm_lock *lock;
struct lustre_handle *lockh = &aa->oa_lockh;
enum ldlm_mode mode = aa->oa_mode;
aa->oa_flags = NULL;
}
- req->rq_interpret_reply =
- (ptlrpc_interpterer_t)osc_enqueue_interpret;
+ req->rq_interpret_reply = osc_enqueue_interpret;
if (rqset == PTLRPCD_SET)
ptlrpcd_add_req(req);
else
}
static int osc_statfs_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_async_args *aa, int rc)
+ struct ptlrpc_request *req, void *args, int rc)
{
- struct obd_statfs *msfs;
- ENTRY;
+ struct osc_async_args *aa = args;
+ struct obd_statfs *msfs;
- if (rc == -EBADR)
- /* The request has in fact never been sent
- * due to issues at a higher level (LOV).
- * Exit immediately since the caller is
- * aware of the problem and takes care
- * of the clean up */
- RETURN(rc);
+ ENTRY;
+ if (rc == -EBADR)
+ /*
+ * The request has in fact never been sent due to issues at
+ * a higher level (LOV). Exit immediately since the caller
+ * is aware of the problem and takes care of the clean up.
+ */
+ RETURN(rc);
- if ((rc == -ENOTCONN || rc == -EAGAIN) &&
- (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
- GOTO(out, rc = 0);
+ if ((rc == -ENOTCONN || rc == -EAGAIN) &&
+ (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
+ GOTO(out, rc = 0);
- if (rc != 0)
- GOTO(out, rc);
+ if (rc != 0)
+ GOTO(out, rc);
- msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (msfs == NULL) {
+ msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
+ if (msfs == NULL)
GOTO(out, rc = -EPROTO);
- }
- *aa->aa_oi->oi_osfs = *msfs;
+ *aa->aa_oi->oi_osfs = *msfs;
out:
- rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
- RETURN(rc);
+ rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
+
+ RETURN(rc);
}
static int osc_statfs_async(struct obd_export *exp,
int rc;
ENTRY;
+ if (obd->obd_osfs_age >= max_age) {
+ CDEBUG(D_SUPER,
+ "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
+ obd->obd_name, &obd->obd_osfs,
+ obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
+ obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
+ spin_lock(&obd->obd_osfs_lock);
+ memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
+ spin_unlock(&obd->obd_osfs_lock);
+ oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
+ if (oinfo->oi_cb_up)
+ oinfo->oi_cb_up(oinfo, 0);
+
+ RETURN(0);
+ }
+
/* We could possibly pass max_age in the request (as an absolute
* timestamp or a "seconds.usec ago") so the target can avoid doing
* extra calls into the filesystem if that isn't necessary (e.g.
req->rq_no_delay = 1;
}
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
+ req->rq_interpret_reply = osc_statfs_interpret;
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void __user *uarg)
{
- struct obd_device *obd = exp->exp_obd;
- struct obd_ioctl_data *data = karg;
- int err = 0;
- ENTRY;
+ struct obd_device *obd = exp->exp_obd;
+ struct obd_ioctl_data *data = karg;
+ int rc = 0;
+ ENTRY;
if (!try_module_get(THIS_MODULE)) {
CERROR("%s: cannot get module '%s'\n", obd->obd_name,
module_name(THIS_MODULE));
return -EINVAL;
}
- switch (cmd) {
- case OBD_IOC_CLIENT_RECOVER:
- err = ptlrpc_recover_import(obd->u.cli.cl_import,
- data->ioc_inlbuf1, 0);
- if (err > 0)
- err = 0;
- GOTO(out, err);
- case IOC_OSC_SET_ACTIVE:
- err = ptlrpc_set_import_active(obd->u.cli.cl_import,
- data->ioc_offset);
- GOTO(out, err);
- case OBD_IOC_PING_TARGET:
- err = ptlrpc_obd_ping(obd);
- GOTO(out, err);
+ switch (cmd) {
+ case OBD_IOC_CLIENT_RECOVER:
+ rc = ptlrpc_recover_import(obd->u.cli.cl_import,
+ data->ioc_inlbuf1, 0);
+ if (rc > 0)
+ rc = 0;
+ break;
+ case IOC_OSC_SET_ACTIVE:
+ rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
+ data->ioc_offset);
+ break;
default:
- CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
- cmd, current_comm());
- GOTO(out, err = -ENOTTY);
+ rc = -ENOTTY;
+ CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
+ obd->obd_name, cmd, current_comm(), rc);
+ break;
}
-out:
+
module_put(THIS_MODULE);
- return err;
+ return rc;
}
int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
RETURN(0);
}
- if (KEY_IS(KEY_CACHE_SET)) {
- struct client_obd *cli = &obd->u.cli;
-
- LASSERT(cli->cl_cache == NULL); /* only once */
- cli->cl_cache = (struct cl_client_cache *)val;
- cl_cache_incref(cli->cl_cache);
- cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
-
- /* add this osc into entity list */
- LASSERT(list_empty(&cli->cl_lru_osc));
- spin_lock(&cli->cl_cache->ccc_lru_lock);
- list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
- spin_unlock(&cli->cl_cache->ccc_lru_lock);
-
- RETURN(0);
- }
-
if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
struct client_obd *cli = &obd->u.cli;
long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
- OBDO_ALLOC(oa);
+ OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
if (!oa) {
ptlrpc_req_finished(req);
RETURN(-ENOMEM);
spin_lock(&cli->cl_loi_list_lock);
grant = cli->cl_avail_grant + cli->cl_reserved_grant;
- if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM)
+ if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
+ /* restore ocd_grant_blkbits as client page bits */
+ data->ocd_grant_blkbits = PAGE_SHIFT;
grant += cli->cl_dirty_grant;
- else
+ } else {
grant += cli->cl_dirty_pages << PAGE_SHIFT;
+ }
data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
lost_grant = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
* Cancel all unused and granted extent lock.
*/
if (lock->l_resource->lr_type == LDLM_EXTENT &&
- lock->l_granted_mode == lock->l_req_mode &&
+ ldlm_is_granted(lock) &&
osc_ldlm_weigh_ast(lock) == 0)
RETURN(1);
list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
spin_unlock(&osc_shrink_lock);
cli->cl_import->imp_idle_timeout = osc_idle_timeout;
+ cli->cl_import->imp_idle_debug = D_HA;
RETURN(0);
}
}
EXPORT_SYMBOL(osc_cleanup_common);
-int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- ssize_t count = class_modify_config(lcfg, PARAM_OSC,
- &obd->obd_kset.kobj);
- return count > 0 ? 0 : count;
-}
-
-static int osc_process_config(struct obd_device *obd, size_t len, void *buf)
-{
- return osc_process_config_base(obd, buf);
-}
-
static struct obd_ops osc_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = osc_setup,
.o_cleanup = osc_cleanup_common,
.o_add_conn = client_import_add_conn,
.o_del_conn = client_import_del_conn,
- .o_connect = client_connect_import,
+ .o_connect = client_connect_import,
.o_reconnect = osc_reconnect,
.o_disconnect = osc_disconnect,
.o_statfs = osc_statfs,
.o_iocontrol = osc_iocontrol,
.o_set_info_async = osc_set_info_async,
.o_import_event = osc_import_event,
- .o_process_config = osc_process_config,
.o_quotactl = osc_quotactl,
};
static int __init osc_init(void)
{
- bool enable_proc = true;
- struct obd_type *type;
unsigned int reqpool_size;
unsigned int reqsize;
int rc;
if (rc)
RETURN(rc);
- type = class_search_type(LUSTRE_OSP_NAME);
- if (type != NULL && type->typ_procsym != NULL)
- enable_proc = false;
-
- rc = class_register_type(&osc_obd_ops, NULL, enable_proc, NULL,
+ rc = class_register_type(&osc_obd_ops, NULL, true, NULL,
LUSTRE_OSC_NAME, &osc_device_type);
if (rc)
GOTO(out_kmem, rc);