* GPL HEADER END
*/
/*
- * Copyright (c) 2013, Intel Corporation.
+ * Copyright (c) 2013, 2014, Intel Corporation.
*
* Copyright 2012 Xyratex Technology Limited
*/
struct nrs_orr_req_range *range)
{
/* Should we do this at page boundaries ? */
- range->or_start = nb[0].offset & CFS_PAGE_MASK;
- range->or_end = (nb[niocount - 1].offset +
- nb[niocount - 1].len - 1) | ~CFS_PAGE_MASK;
+ range->or_start = nb[0].rnb_offset & CFS_PAGE_MASK;
+ range->or_end = (nb[niocount - 1].rnb_offset +
+ nb[niocount - 1].rnb_len - 1) | ~CFS_PAGE_MASK;
}
/**
return cfs_hash_djb2_hash(key, sizeof(struct nrs_orr_key), mask);
}
-static void *nrs_orr_hop_key(cfs_hlist_node_t *hnode)
+static void *nrs_orr_hop_key(struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
return &orro->oo_key;
}
-static int nrs_orr_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int nrs_orr_hop_keycmp(const void *key, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
&((struct nrs_orr_key *)key)->ok_fid);
}
-static void *nrs_orr_hop_object(cfs_hlist_node_t *hnode)
+static void *nrs_orr_hop_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct nrs_orr_object, oo_hnode);
+ return hlist_entry(hnode, struct nrs_orr_object, oo_hnode);
}
-static void nrs_orr_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_orr_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
orro->oo_ref++;
* Removes an nrs_orr_object the hash and frees its memory, if the object has
* no active users.
*/
-static void nrs_orr_hop_put_free(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_orr_hop_put_free(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
struct nrs_orr_data *orrd = container_of(orro->oo_res.res_parent,
OBD_SLAB_FREE_PTR(orro, orrd->od_cache);
}
-static void nrs_orr_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_orr_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
orro->oo_ref--;
}
-static int nrs_trr_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int nrs_trr_hop_keycmp(const void *key, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
return orro->oo_key.ok_idx == ((struct nrs_orr_key *)key)->ok_idx;
}
-static void nrs_trr_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_trr_hop_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct nrs_orr_object *orro = cfs_hlist_entry(hnode,
+ struct nrs_orr_object *orro = hlist_entry(hnode,
struct nrs_orr_object,
oo_hnode);
struct nrs_orr_data *orrd = container_of(orro->oo_res.res_parent,
* \retval -ENOMEM OOM error
* \retval 0 success
*/
-static int nrs_orr_start(struct ptlrpc_nrs_policy *policy)
+static int nrs_orr_start(struct ptlrpc_nrs_policy *policy, char *arg)
{
struct nrs_orr_data *orrd;
cfs_hash_ops_t *ops;
* \param[in] opc the opcode
* \param[in,out] arg used for passing parameters and information
*
- * \pre spin_is_locked(&policy->pol_nrs->->nrs_lock)
- * \post spin_is_locked(&policy->pol_nrs->->nrs_lock)
+ * \pre assert_spin_locked(&policy->pol_nrs->->nrs_lock)
+ * \post assert_spin_locked(&policy->pol_nrs->->nrs_lock)
*
* \retval 0 operation carried successfully
* \retval -ve error
*/
-int nrs_orr_ctl(struct ptlrpc_nrs_policy *policy, enum ptlrpc_nrs_ctl opc,
- void *arg)
+static int nrs_orr_ctl(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc, void *arg)
{
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
switch((enum nrs_ctl_orr)opc) {
default:
*
* \see nrs_resource_get_safe()
*/
-int nrs_orr_res_get(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp, bool moving_req)
+static int nrs_orr_res_get(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp, bool moving_req)
{
struct nrs_orr_data *orrd;
struct nrs_orr_object *orro;
OBD_SLAB_CPT_ALLOC_PTR_GFP(orro, orrd->od_cache,
nrs_pol2cptab(policy), nrs_pol2cptid(policy),
- moving_req ? GFP_ATOMIC : __GFP_IO);
+ moving_req ? GFP_ATOMIC : GFP_NOFS);
if (orro == NULL)
RETURN(-ENOMEM);
* so that lprocfs read/write functions can be used by both the ORR and TRR
* policies.
*/
-struct nrs_lprocfs_orr_data {
+static struct nrs_lprocfs_orr_data {
struct ptlrpc_service *svc;
char *name;
} lprocfs_orr_data = {
}
LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs_orr_supported);
-int nrs_orr_lprocfs_init(struct ptlrpc_service *svc)
+static int nrs_orr_lprocfs_init(struct ptlrpc_service *svc)
{
int i;
return lprocfs_seq_add_vars(svc->srv_procroot, nrs_orr_lprocfs_vars, NULL);
}
-void nrs_orr_lprocfs_fini(struct ptlrpc_service *svc)
+static void nrs_orr_lprocfs_fini(struct ptlrpc_service *svc)
{
if (svc->srv_procroot == NULL)
return;
#ifdef LPROCFS
-int nrs_trr_lprocfs_init(struct ptlrpc_service *svc)
+static int nrs_trr_lprocfs_init(struct ptlrpc_service *svc)
{
int rc;
int i;
return rc;
}
-void nrs_trr_lprocfs_fini(struct ptlrpc_service *svc)
+static void nrs_trr_lprocfs_fini(struct ptlrpc_service *svc)
{
if (svc->srv_procroot == NULL)
return;