4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, 2017, Intel Corporation.
25 * Copyright 2012 Xyratex Technology Limited
28 * lustre/ptlrpc/nrs_crr.c
30 * Network Request Scheduler (NRS) CRR-N policy
32 * Request ordering in a batched Round-Robin manner over client NIDs
34 * Author: Liang Zhen <liang@whamcloud.com>
35 * Author: Nikitas Angelinas <nikitas_angelinas@xyratex.com>
42 #define DEBUG_SUBSYSTEM S_RPC
43 #include <obd_support.h>
44 #include <obd_class.h>
45 #include <lustre_net.h>
46 #include <lprocfs_status.h>
47 #include "ptlrpc_internal.h"
52 * Client Round-Robin scheduling over client NIDs
58 #define NRS_POL_NAME_CRRN "crrn"
61 * Binary heap predicate.
63 * Uses ptlrpc_nrs_request::nr_u::crr::cr_round and
64 * ptlrpc_nrs_request::nr_u::crr::cr_sequence to compare two binheap nodes and
65 * produce a binary predicate that shows their relative priority, so that the
66 * binary heap can perform the necessary sorting operations.
68 * \param[in] e1 the first binheap node to compare
69 * \param[in] e2 the second binheap node to compare
75 crrn_req_compare(struct binheap_node *e1, struct binheap_node *e2)
77 struct ptlrpc_nrs_request *nrq1;
78 struct ptlrpc_nrs_request *nrq2;
80 nrq1 = container_of(e1, struct ptlrpc_nrs_request, nr_node);
81 nrq2 = container_of(e2, struct ptlrpc_nrs_request, nr_node);
83 if (nrq1->nr_u.crr.cr_round < nrq2->nr_u.crr.cr_round)
85 else if (nrq1->nr_u.crr.cr_round > nrq2->nr_u.crr.cr_round)
88 return nrq1->nr_u.crr.cr_sequence < nrq2->nr_u.crr.cr_sequence;
91 static struct binheap_ops nrs_crrn_heap_ops = {
94 .hop_compare = crrn_req_compare,
98 * rhashtable operations for nrs_crrn_net::cn_cli_hash
100 * This uses ptlrpc_request::rq_peer.nid (as nid4) as its key, in order to hash
101 * nrs_crrn_client objects.
103 static u32 nrs_crrn_hashfn(const void *data, u32 len, u32 seed)
105 const struct lnet_nid *nid = data;
107 return cfs_hash_32(nidhash(nid) ^ seed, 32);
110 static int nrs_crrn_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
112 const struct nrs_crrn_client *cli = obj;
113 const struct lnet_nid *nid = arg->key;
115 return nid_same(nid, &cli->cc_nid) ? 0 : -ESRCH;
118 static const struct rhashtable_params nrs_crrn_hash_params = {
119 .key_len = sizeof(struct lnet_nid),
120 .key_offset = offsetof(struct nrs_crrn_client, cc_nid),
121 .head_offset = offsetof(struct nrs_crrn_client, cc_rhead),
122 .hashfn = nrs_crrn_hashfn,
123 .obj_cmpfn = nrs_crrn_cmpfn,
126 static void nrs_crrn_exit(void *vcli, void *data)
128 struct nrs_crrn_client *cli = vcli;
130 LASSERTF(atomic_read(&cli->cc_ref) == 0,
131 "Busy CRR-N object from client with NID %s, with %d refs\n",
132 libcfs_nidstr(&cli->cc_nid), atomic_read(&cli->cc_ref));
138 * Called when a CRR-N policy instance is started.
140 * \param[in] policy the policy
142 * \retval -ENOMEM OOM error
145 static int nrs_crrn_start(struct ptlrpc_nrs_policy *policy, char *arg)
147 struct nrs_crrn_net *net;
151 OBD_CPT_ALLOC_PTR(net, nrs_pol2cptab(policy), nrs_pol2cptid(policy));
155 net->cn_binheap = binheap_create(&nrs_crrn_heap_ops,
156 CBH_FLAG_ATOMIC_GROW, 4096, NULL,
157 nrs_pol2cptab(policy),
158 nrs_pol2cptid(policy));
159 if (net->cn_binheap == NULL)
160 GOTO(out_net, rc = -ENOMEM);
162 rc = rhashtable_init(&net->cn_cli_hash, &nrs_crrn_hash_params);
164 GOTO(out_binheap, rc);
167 * Set default quantum value to max_rpcs_in_flight for non-MDS OSCs;
168 * there may be more RPCs pending from each struct nrs_crrn_client even
169 * with the default max_rpcs_in_flight value, as we are scheduling over
170 * NIDs, and there may be more than one mount point per client.
172 net->cn_quantum = OBD_MAX_RIF_DEFAULT;
174 * Set to 1 so that the test inside nrs_crrn_req_add() can evaluate to
177 net->cn_sequence = 1;
179 policy->pol_private = net;
184 binheap_destroy(net->cn_binheap);
192 * Called when a CRR-N policy instance is stopped.
194 * Called when the policy has been instructed to transition to the
195 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state and has no more pending
198 * \param[in] policy the policy
200 static void nrs_crrn_stop(struct ptlrpc_nrs_policy *policy)
202 struct nrs_crrn_net *net = policy->pol_private;
205 LASSERT(net != NULL);
206 LASSERT(net->cn_binheap != NULL);
207 LASSERT(binheap_is_empty(net->cn_binheap));
209 rhashtable_free_and_destroy(&net->cn_cli_hash, nrs_crrn_exit, NULL);
210 binheap_destroy(net->cn_binheap);
216 * Performs a policy-specific ctl function on CRR-N policy instances; similar
219 * \param[in] policy the policy instance
220 * \param[in] opc the opcode
221 * \param[in,out] arg used for passing parameters and information
223 * \pre assert_spin_locked(&policy->pol_nrs->->nrs_lock)
224 * \post assert_spin_locked(&policy->pol_nrs->->nrs_lock)
226 * \retval 0 operation carried out successfully
229 static int nrs_crrn_ctl(struct ptlrpc_nrs_policy *policy,
230 enum ptlrpc_nrs_ctl opc,
233 assert_spin_locked(&policy->pol_nrs->nrs_lock);
240 * Read Round Robin quantum size of a policy instance.
242 case NRS_CTL_CRRN_RD_QUANTUM: {
243 struct nrs_crrn_net *net = policy->pol_private;
245 *(__u16 *)arg = net->cn_quantum;
250 * Write Round Robin quantum size of a policy instance.
252 case NRS_CTL_CRRN_WR_QUANTUM: {
253 struct nrs_crrn_net *net = policy->pol_private;
255 net->cn_quantum = *(__u16 *)arg;
256 LASSERT(net->cn_quantum != 0);
265 * Obtains resources from CRR-N policy instances. The top-level resource lives
266 * inside \e nrs_crrn_net and the second-level resource inside
267 * \e nrs_crrn_client object instances.
269 * \param[in] policy the policy for which resources are being taken for
271 * \param[in] nrq the request for which resources are being taken
272 * \param[in] parent parent resource, embedded in nrs_crrn_net for the
274 * \param[out] resp resources references are placed in this array
275 * \param[in] moving_req signifies limited caller context; used to perform
276 * memory allocations in an atomic context in this
279 * \retval 0 we are returning a top-level, parent resource, one that is
280 * embedded in an nrs_crrn_net object
281 * \retval 1 we are returning a bottom-level resource, one that is embedded
282 * in an nrs_crrn_client object
284 * \see nrs_resource_get_safe()
286 static int nrs_crrn_res_get(struct ptlrpc_nrs_policy *policy,
287 struct ptlrpc_nrs_request *nrq,
288 const struct ptlrpc_nrs_resource *parent,
289 struct ptlrpc_nrs_resource **resp, bool moving_req)
291 struct nrs_crrn_net *net;
292 struct nrs_crrn_client *cli;
293 struct nrs_crrn_client *tmp;
294 struct ptlrpc_request *req;
296 if (parent == NULL) {
297 *resp = &((struct nrs_crrn_net *)policy->pol_private)->cn_res;
301 net = container_of(parent, struct nrs_crrn_net, cn_res);
302 req = container_of(nrq, struct ptlrpc_request, rq_nrq);
303 cli = rhashtable_lookup_fast(&net->cn_cli_hash, &req->rq_peer.nid,
304 nrs_crrn_hash_params);
308 OBD_CPT_ALLOC_GFP(cli, nrs_pol2cptab(policy), nrs_pol2cptid(policy),
309 sizeof(*cli), moving_req ? GFP_ATOMIC : GFP_NOFS);
313 cli->cc_nid = req->rq_peer.nid;
315 atomic_set(&cli->cc_ref, 0);
317 tmp = rhashtable_lookup_get_insert_fast(&net->cn_cli_hash,
319 nrs_crrn_hash_params);
321 /* insertion failed */
328 atomic_inc(&cli->cc_ref);
329 *resp = &cli->cc_res;
335 * Called when releasing references to the resource hierachy obtained for a
336 * request for scheduling using the CRR-N policy.
338 * \param[in] policy the policy the resource belongs to
339 * \param[in] res the resource to be released
341 static void nrs_crrn_res_put(struct ptlrpc_nrs_policy *policy,
342 const struct ptlrpc_nrs_resource *res)
344 struct nrs_crrn_client *cli;
347 * Do nothing for freeing parent, nrs_crrn_net resources
349 if (res->res_parent == NULL)
352 cli = container_of(res, struct nrs_crrn_client, cc_res);
354 atomic_dec(&cli->cc_ref);
358 * Called when getting a request from the CRR-N policy for handlingso that it can be served
360 * \param[in] policy the policy being polled
361 * \param[in] peek when set, signifies that we just want to examine the
362 * request, and not handle it, so the request is not removed
364 * \param[in] force force the policy to return a request; unused in this policy
366 * \retval the request to be handled
367 * \retval NULL no request available
369 * \see ptlrpc_nrs_req_get_nolock()
370 * \see nrs_request_get()
373 struct ptlrpc_nrs_request *nrs_crrn_req_get(struct ptlrpc_nrs_policy *policy,
374 bool peek, bool force)
376 struct nrs_crrn_net *net = policy->pol_private;
377 struct binheap_node *node = binheap_root(net->cn_binheap);
378 struct ptlrpc_nrs_request *nrq;
380 nrq = unlikely(node == NULL) ? NULL :
381 container_of(node, struct ptlrpc_nrs_request, nr_node);
383 if (likely(!peek && nrq != NULL)) {
384 struct nrs_crrn_client *cli;
385 struct ptlrpc_request *req = container_of(nrq,
386 struct ptlrpc_request,
389 cli = container_of(nrs_request_resource(nrq),
390 struct nrs_crrn_client, cc_res);
392 LASSERT(nrq->nr_u.crr.cr_round <= cli->cc_round);
394 binheap_remove(net->cn_binheap, &nrq->nr_node);
398 "NRS: starting to handle %s request from %s, with round "
399 "%llu\n", NRS_POL_NAME_CRRN,
400 libcfs_idstr(&req->rq_peer), nrq->nr_u.crr.cr_round);
402 /** Peek at the next request to be served */
403 node = binheap_root(net->cn_binheap);
405 /** No more requests */
406 if (unlikely(node == NULL)) {
409 struct ptlrpc_nrs_request *next;
411 next = container_of(node, struct ptlrpc_nrs_request,
414 if (net->cn_round < next->nr_u.crr.cr_round)
415 net->cn_round = next->nr_u.crr.cr_round;
423 * Adds request \a nrq to a CRR-N \a policy instance's set of queued requests
425 * A scheduling round is a stream of requests that have been sorted in batches
426 * according to the client that they originate from (as identified by its NID);
427 * there can be only one batch for each client in each round. The batches are of
428 * maximum size nrs_crrn_net:cn_quantum. When a new request arrives for
429 * scheduling from a client that has exhausted its quantum in its current round,
430 * it will start scheduling requests on the next scheduling round. Clients are
431 * allowed to schedule requests against a round until all requests for the round
432 * are serviced, so a client might miss a round if it is not generating requests
433 * for a long enough period of time. Clients that miss a round will continue
434 * with scheduling the next request that they generate, starting at the round
435 * that requests are being dispatched for, at the time of arrival of this new
438 * Requests are tagged with the round number and a sequence number; the sequence
439 * number indicates the relative ordering amongst the batches of requests in a
440 * round, and is identical for all requests in a batch, as is the round number.
441 * The round and sequence numbers are used by crrn_req_compare() in order to
442 * maintain an ordered set of rounds, with each round consisting of an ordered
443 * set of batches of requests.
445 * \param[in] policy the policy
446 * \param[in] nrq the request to add
448 * \retval 0 request successfully added
451 static int nrs_crrn_req_add(struct ptlrpc_nrs_policy *policy,
452 struct ptlrpc_nrs_request *nrq)
454 struct nrs_crrn_net *net;
455 struct nrs_crrn_client *cli;
458 cli = container_of(nrs_request_resource(nrq),
459 struct nrs_crrn_client, cc_res);
460 net = container_of(nrs_request_resource(nrq)->res_parent,
461 struct nrs_crrn_net, cn_res);
463 if (cli->cc_quantum == 0 || cli->cc_round < net->cn_round ||
464 (cli->cc_active == 0 && cli->cc_quantum > 0)) {
467 * If the client has no pending requests, and still some of its
468 * quantum remaining unused, which implies it has not had a
469 * chance to schedule up to its maximum allowed batch size of
470 * requests in the previous round it participated, schedule this
471 * next request on a new round; this avoids fragmentation of
472 * request batches caused by client inactivity, at the expense
473 * of potentially slightly increased service time for the
474 * request batch this request will be a part of.
476 if (cli->cc_active == 0 && cli->cc_quantum > 0)
479 /** A new scheduling round has commenced */
480 if (cli->cc_round < net->cn_round)
481 cli->cc_round = net->cn_round;
483 /** I was not the last client through here */
484 if (cli->cc_sequence < net->cn_sequence)
485 cli->cc_sequence = ++net->cn_sequence;
487 * Reset the quantum if we have reached the maximum quantum
488 * size for this batch, or even if we have not managed to
489 * complete a batch size up to its maximum allowed size.
490 * XXX: Accessed unlocked
492 cli->cc_quantum = net->cn_quantum;
495 nrq->nr_u.crr.cr_round = cli->cc_round;
496 nrq->nr_u.crr.cr_sequence = cli->cc_sequence;
498 rc = binheap_insert(net->cn_binheap, &nrq->nr_node);
501 if (--cli->cc_quantum == 0)
508 * Removes request \a nrq from a CRR-N \a policy instance's set of queued
511 * \param[in] policy the policy
512 * \param[in] nrq the request to remove
514 static void nrs_crrn_req_del(struct ptlrpc_nrs_policy *policy,
515 struct ptlrpc_nrs_request *nrq)
517 struct nrs_crrn_net *net;
518 struct nrs_crrn_client *cli;
521 cli = container_of(nrs_request_resource(nrq),
522 struct nrs_crrn_client, cc_res);
523 net = container_of(nrs_request_resource(nrq)->res_parent,
524 struct nrs_crrn_net, cn_res);
526 LASSERT(nrq->nr_u.crr.cr_round <= cli->cc_round);
528 is_root = &nrq->nr_node == binheap_root(net->cn_binheap);
530 binheap_remove(net->cn_binheap, &nrq->nr_node);
534 * If we just deleted the node at the root of the binheap, we may have
535 * to adjust round numbers.
537 if (unlikely(is_root)) {
538 /** Peek at the next request to be served */
539 struct binheap_node *node = binheap_root(net->cn_binheap);
541 /** No more requests */
542 if (unlikely(node == NULL)) {
545 nrq = container_of(node, struct ptlrpc_nrs_request,
548 if (net->cn_round < nrq->nr_u.crr.cr_round)
549 net->cn_round = nrq->nr_u.crr.cr_round;
555 * Called right after the request \a nrq finishes being handled by CRR-N policy
556 * instance \a policy.
558 * \param[in] policy the policy that handled the request
559 * \param[in] nrq the request that was handled
561 static void nrs_crrn_req_stop(struct ptlrpc_nrs_policy *policy,
562 struct ptlrpc_nrs_request *nrq)
564 struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request,
568 "NRS: finished handling %s request from %s, with round %llu"
569 "\n", NRS_POL_NAME_CRRN,
570 libcfs_idstr(&req->rq_peer), nrq->nr_u.crr.cr_round);
578 * Retrieves the value of the Round Robin quantum (i.e. the maximum batch size)
579 * for CRR-N policy instances on both the regular and high-priority NRS head
580 * of a service, as long as a policy instance is not in the
581 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state; policy instances in this
582 * state are skipped later by nrs_crrn_ctl().
584 * Quantum values are in # of RPCs, and output is in YAML format.
592 ptlrpc_lprocfs_nrs_crrn_quantum_seq_show(struct seq_file *m, void *data)
594 struct ptlrpc_service *svc = m->private;
599 * Perform two separate calls to this as only one of the NRS heads'
600 * policies may be in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
601 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING state.
603 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_REG,
605 NRS_CTL_CRRN_RD_QUANTUM,
608 seq_printf(m, NRS_LPROCFS_QUANTUM_NAME_REG
611 * Ignore -ENODEV as the regular NRS head's policy may be in the
612 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
614 } else if (rc != -ENODEV) {
618 if (!nrs_svc_has_hp(svc))
621 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_HP,
623 NRS_CTL_CRRN_RD_QUANTUM,
626 seq_printf(m, NRS_LPROCFS_QUANTUM_NAME_HP"%-5d\n", quantum);
628 * Ignore -ENODEV as the high priority NRS head's policy may be
629 * in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
631 } else if (rc != -ENODEV) {
640 * Sets the value of the Round Robin quantum (i.e. the maximum batch size)
641 * for CRR-N policy instances of a service. The user can set the quantum size
642 * for the regular or high priority NRS head individually by specifying each
643 * value, or both together in a single invocation.
647 * lctl set_param *.*.*.nrs_crrn_quantum=reg_quantum:32, to set the regular
648 * request quantum size on all PTLRPC services to 32
650 * lctl set_param *.*.*.nrs_crrn_quantum=hp_quantum:16, to set the high
651 * priority request quantum size on all PTLRPC services to 16, and
653 * lctl set_param *.*.ost_io.nrs_crrn_quantum=16, to set both the regular and
654 * high priority request quantum sizes of the ost_io service to 16.
656 * policy instances in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state
657 * are skipped later by nrs_crrn_ctl().
660 ptlrpc_lprocfs_nrs_crrn_quantum_seq_write(struct file *file,
661 const char __user *buffer,
665 struct seq_file *m = file->private_data;
666 struct ptlrpc_service *svc = m->private;
667 enum ptlrpc_nrs_queue_type queue = 0;
668 char kernbuf[LPROCFS_NRS_WR_QUANTUM_MAX_CMD];
672 /** lprocfs_find_named_value() modifies its argument, so keep a copy */
677 if (count > (sizeof(kernbuf) - 1))
680 if (copy_from_user(kernbuf, buffer, count))
683 kernbuf[count] = '\0';
688 * Check if the regular quantum value has been specified
690 val = lprocfs_find_named_value(kernbuf, NRS_LPROCFS_QUANTUM_NAME_REG,
692 if (val != kernbuf) {
693 rc = kstrtol(val, 10, &quantum_reg);
697 queue |= PTLRPC_NRS_QUEUE_REG;
703 * Check if the high priority quantum value has been specified
705 val = lprocfs_find_named_value(kernbuf, NRS_LPROCFS_QUANTUM_NAME_HP,
707 if (val != kernbuf) {
708 if (!nrs_svc_has_hp(svc))
711 rc = kstrtol(val, 10, &quantum_hp);
715 queue |= PTLRPC_NRS_QUEUE_HP;
719 * If none of the queues has been specified, look for a valid numerical
723 rc = kstrtol(kernbuf, 10, &quantum_reg);
727 queue = PTLRPC_NRS_QUEUE_REG;
729 if (nrs_svc_has_hp(svc)) {
730 queue |= PTLRPC_NRS_QUEUE_HP;
731 quantum_hp = quantum_reg;
735 if ((((queue & PTLRPC_NRS_QUEUE_REG) != 0) &&
736 ((quantum_reg > LPROCFS_NRS_QUANTUM_MAX || quantum_reg <= 0))) ||
737 (((queue & PTLRPC_NRS_QUEUE_HP) != 0) &&
738 ((quantum_hp > LPROCFS_NRS_QUANTUM_MAX || quantum_hp <= 0))))
742 * We change the values on regular and HP NRS heads separately, so that
743 * we do not exit early from ptlrpc_nrs_policy_control() with an error
744 * returned by nrs_policy_ctl_locked(), in cases where the user has not
745 * started the policy on either the regular or HP NRS head; i.e. we are
746 * ignoring -ENODEV within nrs_policy_ctl_locked(). -ENODEV is returned
747 * only if the operation fails with -ENODEV on all heads that have been
748 * specified by the command; if at least one operation succeeds,
749 * success is returned.
751 if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) {
752 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_REG,
754 NRS_CTL_CRRN_WR_QUANTUM, false,
756 if ((rc < 0 && rc != -ENODEV) ||
757 (rc == -ENODEV && queue == PTLRPC_NRS_QUEUE_REG))
761 if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) {
762 rc2 = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_HP,
764 NRS_CTL_CRRN_WR_QUANTUM, false,
766 if ((rc2 < 0 && rc2 != -ENODEV) ||
767 (rc2 == -ENODEV && queue == PTLRPC_NRS_QUEUE_HP))
771 return rc == -ENODEV && rc2 == -ENODEV ? -ENODEV : count;
774 LDEBUGFS_SEQ_FOPS(ptlrpc_lprocfs_nrs_crrn_quantum);
777 * Initializes a CRR-N policy's lprocfs interface for service \a svc
779 * \param[in] svc the service
784 static int nrs_crrn_lprocfs_init(struct ptlrpc_service *svc)
786 struct ldebugfs_vars nrs_crrn_lprocfs_vars[] = {
787 { .name = "nrs_crrn_quantum",
788 .fops = &ptlrpc_lprocfs_nrs_crrn_quantum_fops,
793 if (!svc->srv_debugfs_entry)
796 ldebugfs_add_vars(svc->srv_debugfs_entry, nrs_crrn_lprocfs_vars, NULL);
802 * CRR-N policy operations
804 static const struct ptlrpc_nrs_pol_ops nrs_crrn_ops = {
805 .op_policy_start = nrs_crrn_start,
806 .op_policy_stop = nrs_crrn_stop,
807 .op_policy_ctl = nrs_crrn_ctl,
808 .op_res_get = nrs_crrn_res_get,
809 .op_res_put = nrs_crrn_res_put,
810 .op_req_get = nrs_crrn_req_get,
811 .op_req_enqueue = nrs_crrn_req_add,
812 .op_req_dequeue = nrs_crrn_req_del,
813 .op_req_stop = nrs_crrn_req_stop,
814 .op_lprocfs_init = nrs_crrn_lprocfs_init,
818 * CRR-N policy configuration
820 struct ptlrpc_nrs_pol_conf nrs_conf_crrn = {
821 .nc_name = NRS_POL_NAME_CRRN,
822 .nc_ops = &nrs_crrn_ops,
823 .nc_compat = nrs_policy_compat_all,
826 /** @} CRR-N policy */