* GPL HEADER END
*/
/*
- * Copyright (c) 2011 Intel Corporation
+ * Copyright (c) 2011, 2014, Intel Corporation.
*
* Copyright 2012 Xyratex Technology Limited
*/
#ifdef HAVE_SERVER_SUPPORT
#define DEBUG_SUBSYSTEM S_RPC
-#ifndef __KERNEL__
-#include <liblustre.h>
-#endif
#include <obd_support.h>
#include <obd_class.h>
#include <lustre_net.h>
* \retval 0 e1 > e2
* \retval 1 e1 <= e2
*/
-static int crrn_req_compare(cfs_binheap_node_t *e1, cfs_binheap_node_t *e2)
+static int
+crrn_req_compare(struct cfs_binheap_node *e1, struct cfs_binheap_node *e2)
{
struct ptlrpc_nrs_request *nrq1;
struct ptlrpc_nrs_request *nrq2;
return nrq1->nr_u.crr.cr_sequence < nrq2->nr_u.crr.cr_sequence;
}
-static cfs_binheap_ops_t nrs_crrn_heap_ops = {
+static struct cfs_binheap_ops nrs_crrn_heap_ops = {
.hop_enter = NULL,
.hop_exit = NULL,
.hop_compare = crrn_req_compare,
#define NRS_NID_BKT_BITS 8
#define NRS_NID_BITS 16
-static unsigned nrs_crrn_hop_hash(cfs_hash_t *hs, const void *key,
+static unsigned nrs_crrn_hop_hash(struct cfs_hash *hs, const void *key,
unsigned mask)
{
return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
}
-static int nrs_crrn_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int nrs_crrn_hop_keycmp(const void *key, struct hlist_node *hnode)
{
lnet_nid_t *nid = (lnet_nid_t *)key;
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
return *nid == cli->cc_nid;
}
-static void *nrs_crrn_hop_key(cfs_hlist_node_t *hnode)
+static void *nrs_crrn_hop_key(struct hlist_node *hnode)
{
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
return &cli->cc_nid;
}
-static void *nrs_crrn_hop_object(cfs_hlist_node_t *hnode)
+static void *nrs_crrn_hop_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct nrs_crrn_client, cc_hnode);
+ return hlist_entry(hnode, struct nrs_crrn_client, cc_hnode);
}
-static void nrs_crrn_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_crrn_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
- cfs_atomic_inc(&cli->cc_ref);
+ atomic_inc(&cli->cc_ref);
}
-static void nrs_crrn_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_crrn_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
{
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
- cfs_atomic_dec(&cli->cc_ref);
+ atomic_dec(&cli->cc_ref);
}
-static void nrs_crrn_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void nrs_crrn_hop_exit(struct cfs_hash *hs, struct hlist_node *hnode)
{
- struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
+ struct nrs_crrn_client *cli = hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
- LASSERTF(cfs_atomic_read(&cli->cc_ref) == 0,
+ LASSERTF(atomic_read(&cli->cc_ref) == 0,
"Busy CRR-N object from client with NID %s, with %d refs\n",
- libcfs_nid2str(cli->cc_nid), cfs_atomic_read(&cli->cc_ref));
+ libcfs_nid2str(cli->cc_nid), atomic_read(&cli->cc_ref));
OBD_FREE_PTR(cli);
}
-static cfs_hash_ops_t nrs_crrn_hash_ops = {
+static struct cfs_hash_ops nrs_crrn_hash_ops = {
.hs_hash = nrs_crrn_hop_hash,
.hs_keycmp = nrs_crrn_hop_keycmp,
.hs_key = nrs_crrn_hop_key,
* \retval -ENOMEM OOM error
* \retval 0 success
*/
-static int nrs_crrn_start(struct ptlrpc_nrs_policy *policy)
+static int nrs_crrn_start(struct ptlrpc_nrs_policy *policy, char *arg)
{
struct nrs_crrn_net *net;
int rc = 0;
* with the default max_rpcs_in_flight value, as we are scheduling over
* NIDs, and there may be more than one mount point per client.
*/
- net->cn_quantum = OSC_MAX_RIF_DEFAULT;
+ net->cn_quantum = OBD_MAX_RIF_DEFAULT;
/**
* Set to 1 so that the test inside nrs_crrn_req_add() can evaluate to
* true.
* \param[in] opc the opcode
* \param[in,out] arg used for passing parameters and information
*
- * \pre spin_is_locked(&policy->pol_nrs->->nrs_lock)
- * \post spin_is_locked(&policy->pol_nrs->->nrs_lock)
+ * \pre assert_spin_locked(&policy->pol_nrs->->nrs_lock)
+ * \post assert_spin_locked(&policy->pol_nrs->->nrs_lock)
*
* \retval 0 operation carried out successfully
* \retval -ve error
*/
-int nrs_crrn_ctl(struct ptlrpc_nrs_policy *policy, enum ptlrpc_nrs_ctl opc,
- void *arg)
+static int nrs_crrn_ctl(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc,
+ void *arg)
{
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
switch((enum nrs_ctl_crr)opc) {
default:
*
* \see nrs_resource_get_safe()
*/
-int nrs_crrn_res_get(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp, bool moving_req)
+static int nrs_crrn_res_get(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp, bool moving_req)
{
struct nrs_crrn_net *net;
struct nrs_crrn_client *cli;
goto out;
OBD_CPT_ALLOC_GFP(cli, nrs_pol2cptab(policy), nrs_pol2cptid(policy),
- sizeof(*cli), moving_req ? CFS_ALLOC_ATOMIC :
- CFS_ALLOC_IO);
+ sizeof(*cli), moving_req ? GFP_ATOMIC : GFP_NOFS);
if (cli == NULL)
return -ENOMEM;
cli->cc_nid = req->rq_peer.nid;
- cfs_atomic_set(&cli->cc_ref, 1);
+ atomic_set(&cli->cc_ref, 1);
tmp = cfs_hash_findadd_unique(net->cn_cli_hash, &cli->cc_nid,
&cli->cc_hnode);
if (tmp != cli) {
bool peek, bool force)
{
struct nrs_crrn_net *net = policy->pol_private;
- cfs_binheap_node_t *node = cfs_binheap_root(net->cn_binheap);
+ struct cfs_binheap_node *node = cfs_binheap_root(net->cn_binheap);
struct ptlrpc_nrs_request *nrq;
nrq = unlikely(node == NULL) ? NULL :
*/
if (unlikely(is_root)) {
/** Peek at the next request to be served */
- cfs_binheap_node_t *node = cfs_binheap_root(net->cn_binheap);
+ struct cfs_binheap_node *node = cfs_binheap_root(net->cn_binheap);
/** No more requests */
if (unlikely(node == NULL)) {
libcfs_id2str(req->rq_peer), nrq->nr_u.crr.cr_round);
}
-#ifdef LPROCFS
+#ifdef CONFIG_PROC_FS
/**
* lprocfs interface
* reg_quantum:8
* hp_quantum:4
*/
-static int ptlrpc_lprocfs_rd_nrs_crrn_quantum(char *page, char **start,
- off_t off, int count, int *eof,
- void *data)
+static int
+ptlrpc_lprocfs_nrs_crrn_quantum_seq_show(struct seq_file *m, void *data)
{
- struct ptlrpc_service *svc = data;
- __u16 quantum;
- int rc;
- int rc2 = 0;
+ struct ptlrpc_service *svc = m->private;
+ __u16 quantum;
+ int rc;
/**
* Perform two separate calls to this as only one of the NRS heads'
NRS_CTL_CRRN_RD_QUANTUM,
true, &quantum);
if (rc == 0) {
- *eof = 1;
- rc2 = snprintf(page, count, NRS_LPROCFS_QUANTUM_NAME_REG
- "%-5d\n", quantum);
+ seq_printf(m, NRS_LPROCFS_QUANTUM_NAME_REG
+ "%-5d\n", quantum);
/**
* Ignore -ENODEV as the regular NRS head's policy may be in the
* ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
NRS_CTL_CRRN_RD_QUANTUM,
true, &quantum);
if (rc == 0) {
- *eof = 1;
- rc2 += snprintf(page + rc2, count - rc2,
- NRS_LPROCFS_QUANTUM_NAME_HP"%-5d\n", quantum);
+ seq_printf(m, NRS_LPROCFS_QUANTUM_NAME_HP"%-5d\n", quantum);
/**
* Ignore -ENODEV as the high priority NRS head's policy may be
* in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
}
no_hp:
-
- return rc2 ? : rc;
+ return rc;
}
/**
* policy instances in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state
* are skipped later by nrs_crrn_ctl().
*/
-static int ptlrpc_lprocfs_wr_nrs_crrn_quantum(struct file *file,
- const char *buffer,
- unsigned long count, void *data)
+static ssize_t
+ptlrpc_lprocfs_nrs_crrn_quantum_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count,
+ loff_t *off)
{
- struct ptlrpc_service *svc = data;
+ struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
enum ptlrpc_nrs_queue_type queue = 0;
char kernbuf[LPROCFS_NRS_WR_QUANTUM_MAX_CMD];
char *val;
long quantum_reg;
long quantum_hp;
/** lprocfs_find_named_value() modifies its argument, so keep a copy */
- unsigned long count_copy;
+ size_t count_copy;
int rc = 0;
int rc2 = 0;
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
return rc == -ENODEV && rc2 == -ENODEV ? -ENODEV : count;
}
+LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs_crrn_quantum);
/**
* Initializes a CRR-N policy's lprocfs interface for service \a svc
* \retval 0 success
* \retval != 0 error
*/
-int nrs_crrn_lprocfs_init(struct ptlrpc_service *svc)
+static int nrs_crrn_lprocfs_init(struct ptlrpc_service *svc)
{
- int rc;
-
struct lprocfs_vars nrs_crrn_lprocfs_vars[] = {
{ .name = "nrs_crrn_quantum",
- .read_fptr = ptlrpc_lprocfs_rd_nrs_crrn_quantum,
- .write_fptr = ptlrpc_lprocfs_wr_nrs_crrn_quantum,
+ .fops = &ptlrpc_lprocfs_nrs_crrn_quantum_fops,
.data = svc },
{ NULL }
};
if (svc->srv_procroot == NULL)
return 0;
- rc = lprocfs_add_vars(svc->srv_procroot, nrs_crrn_lprocfs_vars, NULL);
-
- return rc;
+ return lprocfs_add_vars(svc->srv_procroot, nrs_crrn_lprocfs_vars, NULL);
}
/**
*
* \param[in] svc the service
*/
-void nrs_crrn_lprocfs_fini(struct ptlrpc_service *svc)
+static void nrs_crrn_lprocfs_fini(struct ptlrpc_service *svc)
{
if (svc->srv_procroot == NULL)
return;
lprocfs_remove_proc_entry("nrs_crrn_quantum", svc->srv_procroot);
}
-#endif /* LPROCFS */
+#endif /* CONFIG_PROC_FS */
/**
* CRR-N policy operations
.op_req_enqueue = nrs_crrn_req_add,
.op_req_dequeue = nrs_crrn_req_del,
.op_req_stop = nrs_crrn_req_stop,
-#ifdef LPROCFS
+#ifdef CONFIG_PROC_FS
.op_lprocfs_init = nrs_crrn_lprocfs_init,
.op_lprocfs_fini = nrs_crrn_lprocfs_fini,
#endif