4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, 2017, Intel Corporation.
25 * Copyright 2012 Xyratex Technology Limited
28 * lustre/ptlrpc/nrs_orr.c
30 * Network Request Scheduler (NRS) ORR and TRR policies
32 * Request scheduling in a Round-Robin manner over backend-fs objects and OSTs
35 * Author: Liang Zhen <liang@whamcloud.com>
36 * Author: Nikitas Angelinas <nikitas_angelinas@xyratex.com>
38 #ifdef HAVE_SERVER_SUPPORT
44 #define DEBUG_SUBSYSTEM S_RPC
45 #include <obd_support.h>
46 #include <obd_class.h>
47 #include <lustre_net.h>
48 #include <lustre_req_layout.h>
49 #include "ptlrpc_internal.h"
52 * \name ORR/TRR policy
54 * ORR/TRR (Object-based Round Robin/Target-based Round Robin) NRS policies
56 * ORR performs batched Round Robin shceduling of brw RPCs, based on the FID of
57 * the backend-fs object that the brw RPC pertains to; the TRR policy performs
58 * batched Round Robin scheduling of brw RPCs, based on the OST index that the
59 * RPC pertains to. Both policies also order RPCs in each batch in ascending
60 * offset order, which is lprocfs-tunable between logical file offsets, and
61 * physical disk offsets, as reported by fiemap.
63 * The TRR policy reuses much of the functionality of ORR. These two scheduling
64 * algorithms could alternatively be implemented under a single NRS policy, that
65 * uses an lprocfs tunable in order to switch between the two types of
66 * scheduling behaviour. The two algorithms have been implemented as separate
67 * policies for reasons of clarity to the user, and to avoid issues that would
68 * otherwise arise at the point of switching between behaviours in the case of
69 * having a single policy, such as resource cleanup for nrs_orr_object
70 * instances. It is possible that this may need to be re-examined in the future,
71 * along with potentially coalescing other policies that perform batched request
72 * scheduling in a Round-Robin manner, all into one policy.
77 #define NRS_POL_NAME_ORR "orr"
78 #define NRS_POL_NAME_TRR "trr"
81 * Checks if the RPC type of \a nrq is currently handled by an ORR/TRR policy
83 * \param[in] orrd the ORR/TRR policy scheduler instance
84 * \param[in] nrq the request
85 * \param[out] opcode the opcode is saved here, just in order to avoid calling
86 * lustre_msg_get_opc() again later
88 * \retval true request type is supported by the policy instance
89 * \retval false request type is not supported by the policy instance
91 static bool nrs_orr_req_supported(struct nrs_orr_data *orrd,
92 struct ptlrpc_nrs_request *nrq, __u32 *opcode)
94 struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request,
96 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
100 * XXX: nrs_orr_data::od_supp accessed unlocked.
104 rc = orrd->od_supp & NOS_OST_READ;
107 rc = orrd->od_supp & NOS_OST_WRITE;
118 * Returns the ORR/TRR key fields for the request \a nrq in \a key.
120 * \param[in] orrd the ORR/TRR policy scheduler instance
121 * \param[in] nrq the request
122 * \param[in] opc the request's opcode
123 * \param[in] name the policy name
124 * \param[out] key fields of the key are returned here.
126 * \retval 0 key filled successfully
129 static int nrs_orr_key_fill(struct nrs_orr_data *orrd,
130 struct ptlrpc_nrs_request *nrq, __u32 opc,
131 char *name, struct nrs_orr_key *key)
133 struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request,
135 struct ost_body *body;
137 bool is_orr = strncmp(name, NRS_POL_NAME_ORR,
138 NRS_POL_NAME_MAX) == 0;
140 LASSERT(req != NULL);
143 * This is an attempt to fill in the request key fields while
144 * moving a request from the regular to the high-priority NRS
145 * head (via ldlm_lock_reorder_req()), but the request key has
146 * been adequately filled when nrs_orr_res_get() was called through
147 * ptlrpc_nrs_req_initialize() for the regular NRS head's ORR/TRR
148 * policy, so there is nothing to do.
150 if ((is_orr && nrq->nr_u.orr.or_orr_set) ||
151 (!is_orr && nrq->nr_u.orr.or_trr_set)) {
152 *key = nrq->nr_u.orr.or_key;
156 /* Bounce unconnected requests to the default policy. */
157 if (req->rq_export == NULL)
160 if (nrq->nr_u.orr.or_orr_set || nrq->nr_u.orr.or_trr_set)
161 memset(&nrq->nr_u.orr.or_key, 0, sizeof(nrq->nr_u.orr.or_key));
163 ost_idx = class_server_data(req->rq_export->exp_obd)->lsd_osd_index;
168 * The request pill for OST_READ and OST_WRITE requests is
169 * initialized in the ost_io service's
170 * ptlrpc_service_ops::so_hpreq_handler, ost_io_hpreq_handler(),
171 * so no need to redo it here.
173 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
177 rc = ostid_to_fid(&key->ok_fid, &body->oa.o_oi, ost_idx);
181 nrq->nr_u.orr.or_orr_set = 1;
183 key->ok_idx = ost_idx;
184 nrq->nr_u.orr.or_trr_set = 1;
191 * Populates the range values in \a range with logical offsets obtained via
194 * \param[in] nb niobuf_remote struct array for this request
195 * \param[in] niocount count of niobuf_remote structs for this request
196 * \param[out] range the offset range is returned here
198 static void nrs_orr_range_fill_logical(struct niobuf_remote *nb, int niocount,
199 struct nrs_orr_req_range *range)
201 /* Should we do this at page boundaries ? */
202 range->or_start = nb[0].rnb_offset & PAGE_MASK;
203 range->or_end = (nb[niocount - 1].rnb_offset +
204 nb[niocount - 1].rnb_len - 1) | ~PAGE_MASK;
208 * We obtain information just for a single extent, as the request can only be in
209 * a single place in the binary heap anyway.
211 #define ORR_NUM_EXTENTS 1
214 * Converts the logical file offset range in \a range, to a physical disk offset
215 * range in \a range, for a request. Uses obd_get_info() in order to carry out a
216 * fiemap call and obtain backend-fs extent information. The returned range is
217 * in physical block numbers.
219 * \param[in] nrq the request
220 * \param[in] oa obdo struct for this request
221 * \param[in,out] range the offset range in bytes; logical range in, physical
224 * \retval 0 physical offsets obtained successfully
227 static int nrs_orr_range_fill_physical(struct ptlrpc_nrs_request *nrq,
229 struct nrs_orr_req_range *range)
231 struct ptlrpc_request *req = container_of(nrq,
232 struct ptlrpc_request,
234 char fiemap_buf[offsetof(struct fiemap,
235 fm_extents[ORR_NUM_EXTENTS])];
236 struct fiemap *fiemap = (struct fiemap *)fiemap_buf;
237 struct ll_fiemap_info_key key;
242 key = (typeof(key)) {
243 .lfik_name = KEY_FIEMAP,
246 .fm_start = range->or_start,
247 .fm_length = range->or_end - range->or_start,
248 .fm_extent_count = ORR_NUM_EXTENTS
252 rc = obd_get_info(req->rq_svc_thread->t_env, req->rq_export,
253 sizeof(key), &key, NULL, fiemap);
257 if (fiemap->fm_mapped_extents == 0 ||
258 fiemap->fm_mapped_extents > ORR_NUM_EXTENTS)
259 GOTO(out, rc = -EFAULT);
262 * Calculate the physical offset ranges for the request from the extent
263 * information and the logical request offsets.
265 start = fiemap->fm_extents[0].fe_physical + range->or_start -
266 fiemap->fm_extents[0].fe_logical;
267 end = start + range->or_end - range->or_start;
269 range->or_start = start;
272 nrq->nr_u.orr.or_physical_set = 1;
278 * Sets the offset range the request covers; either in logical file
279 * offsets or in physical disk offsets.
281 * \param[in] nrq the request
282 * \param[in] orrd the ORR/TRR policy scheduler instance
283 * \param[in] opc the request's opcode
284 * \param[in] moving_req is the request in the process of moving onto the
285 * high-priority NRS head?
287 * \retval 0 range filled successfully
290 static int nrs_orr_range_fill(struct ptlrpc_nrs_request *nrq,
291 struct nrs_orr_data *orrd, __u32 opc,
294 struct ptlrpc_request *req = container_of(nrq,
295 struct ptlrpc_request,
297 struct obd_ioobj *ioo;
298 struct niobuf_remote *nb;
299 struct ost_body *body;
300 struct nrs_orr_req_range range;
305 * If we are scheduling using physical disk offsets, but we have filled
306 * the offset information in the request previously
307 * (i.e. ldlm_lock_reorder_req() is moving the request to the
308 * high-priority NRS head), there is no need to do anything, and we can
309 * exit. Moreover than the lack of need, we would be unable to perform
310 * the obd_get_info() call required in nrs_orr_range_fill_physical(),
311 * because ldlm_lock_reorder_lock() calls into here while holding a
312 * spinlock, and retrieving fiemap information via obd_get_info() is a
313 * potentially sleeping operation.
315 if (orrd->od_physical && nrq->nr_u.orr.or_physical_set)
318 ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
320 GOTO(out, rc = -EFAULT);
322 niocount = ioo->ioo_bufcnt;
324 nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
326 GOTO(out, rc = -EFAULT);
329 * Use logical information from niobuf_remote structures.
331 nrs_orr_range_fill_logical(nb, niocount, &range);
334 * Obtain physical offsets if selected, and this is an OST_READ RPC
335 * RPC. We do not enter this block if moving_req is set which indicates
336 * that the request is being moved to the high-priority NRS head by
337 * ldlm_lock_reorder_req(), as that function calls in here while holding
338 * a spinlock, and nrs_orr_range_physical() can sleep, so we just use
339 * logical file offsets for the range values for such requests.
341 if (orrd->od_physical && opc == OST_READ && !moving_req) {
342 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
344 GOTO(out, rc = -EFAULT);
347 * Translate to physical block offsets from backend filesystem
349 * Ignore return values; if obtaining the physical offsets
350 * fails, use the logical offsets.
352 nrs_orr_range_fill_physical(nrq, &body->oa, &range);
355 nrq->nr_u.orr.or_range = range;
361 * Generates a character string that can be used in order to register uniquely
362 * named libcfs_hash and slab objects for ORR/TRR policy instances. The
363 * character string is unique per policy instance, as it includes the policy's
364 * name, the CPT number, and a {reg|hp} token, and there is one policy instance
365 * per NRS head on each CPT, and the policy is only compatible with the ost_io
368 * \param[in] policy the policy instance
369 * \param[out] name the character array that will hold the generated name
371 static void nrs_orr_genobjname(struct ptlrpc_nrs_policy *policy, char *name)
373 snprintf(name, NRS_ORR_OBJ_NAME_MAX, "%s%s%s%d",
374 "nrs_", policy->pol_desc->pd_name,
375 policy->pol_nrs->nrs_queue_type == PTLRPC_NRS_QUEUE_REG ?
376 "_reg_" : "_hp_", nrs_pol2cptid(policy));
380 * ORR/TRR hash operations
382 #define NRS_ORR_BITS 24
383 #define NRS_ORR_BKT_BITS 12
384 #define NRS_ORR_HASH_FLAGS (CFS_HASH_SPIN_BKTLOCK | CFS_HASH_ASSERT_EMPTY)
386 #define NRS_TRR_BITS 4
387 #define NRS_TRR_BKT_BITS 2
388 #define NRS_TRR_HASH_FLAGS CFS_HASH_SPIN_BKTLOCK
391 nrs_orr_hop_hash(struct cfs_hash *hs, const void *key, unsigned mask)
393 return cfs_hash_djb2_hash(key, sizeof(struct nrs_orr_key), mask);
396 static void *nrs_orr_hop_key(struct hlist_node *hnode)
398 struct nrs_orr_object *orro = hlist_entry(hnode,
399 struct nrs_orr_object,
401 return &orro->oo_key;
404 static int nrs_orr_hop_keycmp(const void *key, struct hlist_node *hnode)
406 struct nrs_orr_object *orro = hlist_entry(hnode,
407 struct nrs_orr_object,
410 return lu_fid_eq(&orro->oo_key.ok_fid,
411 &((struct nrs_orr_key *)key)->ok_fid);
414 static void *nrs_orr_hop_object(struct hlist_node *hnode)
416 return hlist_entry(hnode, struct nrs_orr_object, oo_hnode);
419 static void nrs_orr_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
421 struct nrs_orr_object *orro = hlist_entry(hnode,
422 struct nrs_orr_object,
428 * Removes an nrs_orr_object the hash and frees its memory, if the object has
431 static void nrs_orr_hop_put_free(struct cfs_hash *hs, struct hlist_node *hnode)
433 struct nrs_orr_object *orro = hlist_entry(hnode,
434 struct nrs_orr_object,
436 struct nrs_orr_data *orrd = container_of(orro->oo_res.res_parent,
437 struct nrs_orr_data, od_res);
438 struct cfs_hash_bd bd;
440 cfs_hash_bd_get_and_lock(hs, &orro->oo_key, &bd, 1);
442 if (--orro->oo_ref > 1) {
443 cfs_hash_bd_unlock(hs, &bd, 1);
447 LASSERT(orro->oo_ref == 1);
449 cfs_hash_bd_del_locked(hs, &bd, hnode);
450 cfs_hash_bd_unlock(hs, &bd, 1);
452 OBD_SLAB_FREE_PTR(orro, orrd->od_cache);
455 static void nrs_orr_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
457 struct nrs_orr_object *orro = hlist_entry(hnode,
458 struct nrs_orr_object,
463 static int nrs_trr_hop_keycmp(const void *key, struct hlist_node *hnode)
465 struct nrs_orr_object *orro = hlist_entry(hnode,
466 struct nrs_orr_object,
469 return orro->oo_key.ok_idx == ((struct nrs_orr_key *)key)->ok_idx;
472 static void nrs_trr_hop_exit(struct cfs_hash *hs, struct hlist_node *hnode)
474 struct nrs_orr_object *orro = hlist_entry(hnode,
475 struct nrs_orr_object,
477 struct nrs_orr_data *orrd = container_of(orro->oo_res.res_parent,
478 struct nrs_orr_data, od_res);
480 LASSERTF(orro->oo_ref == 0,
481 "Busy NRS TRR policy object for OST with index %u, with %ld "
482 "refs\n", orro->oo_key.ok_idx, orro->oo_ref);
484 OBD_SLAB_FREE_PTR(orro, orrd->od_cache);
487 static struct cfs_hash_ops nrs_orr_hash_ops = {
488 .hs_hash = nrs_orr_hop_hash,
489 .hs_key = nrs_orr_hop_key,
490 .hs_keycmp = nrs_orr_hop_keycmp,
491 .hs_object = nrs_orr_hop_object,
492 .hs_get = nrs_orr_hop_get,
493 .hs_put = nrs_orr_hop_put_free,
494 .hs_put_locked = nrs_orr_hop_put,
497 static struct cfs_hash_ops nrs_trr_hash_ops = {
498 .hs_hash = nrs_orr_hop_hash,
499 .hs_key = nrs_orr_hop_key,
500 .hs_keycmp = nrs_trr_hop_keycmp,
501 .hs_object = nrs_orr_hop_object,
502 .hs_get = nrs_orr_hop_get,
503 .hs_put = nrs_orr_hop_put,
504 .hs_put_locked = nrs_orr_hop_put,
505 .hs_exit = nrs_trr_hop_exit,
508 #define NRS_ORR_QUANTUM_DFLT 256
511 * Binary heap predicate.
514 * ptlrpc_nrs_request::nr_u::orr::or_round,
515 * ptlrpc_nrs_request::nr_u::orr::or_sequence, and
516 * ptlrpc_nrs_request::nr_u::orr::or_range to compare two binheap nodes and
517 * produce a binary predicate that indicates their relative priority, so that
518 * the binary heap can perform the necessary sorting operations.
520 * \param[in] e1 the first binheap node to compare
521 * \param[in] e2 the second binheap node to compare
527 orr_req_compare(struct cfs_binheap_node *e1, struct cfs_binheap_node *e2)
529 struct ptlrpc_nrs_request *nrq1;
530 struct ptlrpc_nrs_request *nrq2;
532 nrq1 = container_of(e1, struct ptlrpc_nrs_request, nr_node);
533 nrq2 = container_of(e2, struct ptlrpc_nrs_request, nr_node);
536 * Requests have been scheduled against a different scheduling round.
538 if (nrq1->nr_u.orr.or_round < nrq2->nr_u.orr.or_round)
540 else if (nrq1->nr_u.orr.or_round > nrq2->nr_u.orr.or_round)
544 * Requests have been scheduled against the same scheduling round, but
545 * belong to a different batch, i.e. they pertain to a different
546 * backend-fs object (for ORR policy instances) or OST (for TRR policy
549 if (nrq1->nr_u.orr.or_sequence < nrq2->nr_u.orr.or_sequence)
551 else if (nrq1->nr_u.orr.or_sequence > nrq2->nr_u.orr.or_sequence)
555 * If round numbers and sequence numbers are equal, the two requests
556 * have been scheduled on the same round, and belong to the same batch,
557 * which means they pertain to the same backend-fs object (if this is an
558 * ORR policy instance), or to the same OST (if this is a TRR policy
559 * instance), so these requests should be sorted by ascending offset
562 if (nrq1->nr_u.orr.or_range.or_start <
563 nrq2->nr_u.orr.or_range.or_start) {
565 } else if (nrq1->nr_u.orr.or_range.or_start >
566 nrq2->nr_u.orr.or_range.or_start) {
570 * Requests start from the same offset; Dispatch the shorter one
571 * first; perhaps slightly more chances of hitting caches like
574 return nrq1->nr_u.orr.or_range.or_end <
575 nrq2->nr_u.orr.or_range.or_end;
580 * ORR binary heap operations
582 static struct cfs_binheap_ops nrs_orr_heap_ops = {
585 .hop_compare = orr_req_compare,
589 * Prints a warning message if an ORR/TRR policy is started on a service with
590 * more than one CPT. Not printed on the console for now, since we don't
591 * have any performance metrics in the first place, and it is annoying.
593 * \param[in] policy the policy instance
597 static int nrs_orr_init(struct ptlrpc_nrs_policy *policy)
599 if (policy->pol_nrs->nrs_svcpt->scp_service->srv_ncpts > 1)
600 CDEBUG(D_CONFIG, "%s: The %s NRS policy was registered on a "
601 "service with multiple service partitions. This policy "
602 "may perform better with a single partition.\n",
603 policy->pol_nrs->nrs_svcpt->scp_service->srv_name,
604 policy->pol_desc->pd_name);
610 * Called when an ORR policy instance is started.
612 * \param[in] policy the policy
614 * \retval -ENOMEM OOM error
617 static int nrs_orr_start(struct ptlrpc_nrs_policy *policy, char *arg)
619 struct nrs_orr_data *orrd;
620 struct cfs_hash_ops *ops;
628 OBD_CPT_ALLOC_PTR(orrd, nrs_pol2cptab(policy), nrs_pol2cptid(policy));
633 * Binary heap instance for sorted incoming requests.
635 orrd->od_binheap = cfs_binheap_create(&nrs_orr_heap_ops,
636 CBH_FLAG_ATOMIC_GROW, 4096, NULL,
637 nrs_pol2cptab(policy),
638 nrs_pol2cptid(policy));
639 if (orrd->od_binheap == NULL)
640 GOTO(out_orrd, rc = -ENOMEM);
642 nrs_orr_genobjname(policy, orrd->od_objname);
645 * Slab cache for NRS ORR/TRR objects.
647 orrd->od_cache = kmem_cache_create(orrd->od_objname,
648 sizeof(struct nrs_orr_object),
650 if (orrd->od_cache == NULL)
651 GOTO(out_binheap, rc = -ENOMEM);
653 if (strncmp(policy->pol_desc->pd_name, NRS_POL_NAME_ORR,
654 NRS_POL_NAME_MAX) == 0) {
655 ops = &nrs_orr_hash_ops;
656 cur_bits = NRS_ORR_BITS;
657 max_bits = NRS_ORR_BITS;
658 bkt_bits = NRS_ORR_BKT_BITS;
659 flags = NRS_ORR_HASH_FLAGS;
661 ops = &nrs_trr_hash_ops;
662 cur_bits = NRS_TRR_BITS;
663 max_bits = NRS_TRR_BITS;
664 bkt_bits = NRS_TRR_BKT_BITS;
665 flags = NRS_TRR_HASH_FLAGS;
669 * Hash for finding objects by struct nrs_orr_key.
670 * XXX: For TRR, it might be better to avoid using libcfs_hash?
671 * All that needs to be resolved are OST indices, and they
672 * will stay relatively stable during an OSS node's lifetime.
674 orrd->od_obj_hash = cfs_hash_create(orrd->od_objname, cur_bits,
675 max_bits, bkt_bits, 0,
677 CFS_HASH_MAX_THETA, ops, flags);
678 if (orrd->od_obj_hash == NULL)
679 GOTO(out_cache, rc = -ENOMEM);
681 /* XXX: Fields accessed unlocked */
682 orrd->od_quantum = NRS_ORR_QUANTUM_DFLT;
683 orrd->od_supp = NOS_DFLT;
684 orrd->od_physical = true;
686 * Set to 1 so that the test inside nrs_orr_req_add() can evaluate to
689 orrd->od_sequence = 1;
691 policy->pol_private = orrd;
696 kmem_cache_destroy(orrd->od_cache);
698 cfs_binheap_destroy(orrd->od_binheap);
706 * Called when an ORR/TRR policy instance is stopped.
708 * Called when the policy has been instructed to transition to the
709 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state and has no more
710 * pending requests to serve.
712 * \param[in] policy the policy
714 static void nrs_orr_stop(struct ptlrpc_nrs_policy *policy)
716 struct nrs_orr_data *orrd = policy->pol_private;
719 LASSERT(orrd != NULL);
720 LASSERT(orrd->od_binheap != NULL);
721 LASSERT(orrd->od_obj_hash != NULL);
722 LASSERT(orrd->od_cache != NULL);
723 LASSERT(cfs_binheap_is_empty(orrd->od_binheap));
725 cfs_binheap_destroy(orrd->od_binheap);
726 cfs_hash_putref(orrd->od_obj_hash);
727 kmem_cache_destroy(orrd->od_cache);
733 * Performs a policy-specific ctl function on ORR/TRR policy instances; similar
736 * \param[in] policy the policy instance
737 * \param[in] opc the opcode
738 * \param[in,out] arg used for passing parameters and information
740 * \pre assert_spin_locked(&policy->pol_nrs->->nrs_lock)
741 * \post assert_spin_locked(&policy->pol_nrs->->nrs_lock)
743 * \retval 0 operation carried successfully
746 static int nrs_orr_ctl(struct ptlrpc_nrs_policy *policy,
747 enum ptlrpc_nrs_ctl opc, void *arg)
749 assert_spin_locked(&policy->pol_nrs->nrs_lock);
751 switch((enum nrs_ctl_orr)opc) {
755 case NRS_CTL_ORR_RD_QUANTUM: {
756 struct nrs_orr_data *orrd = policy->pol_private;
758 *(__u16 *)arg = orrd->od_quantum;
762 case NRS_CTL_ORR_WR_QUANTUM: {
763 struct nrs_orr_data *orrd = policy->pol_private;
765 orrd->od_quantum = *(__u16 *)arg;
766 LASSERT(orrd->od_quantum != 0);
770 case NRS_CTL_ORR_RD_OFF_TYPE: {
771 struct nrs_orr_data *orrd = policy->pol_private;
773 *(bool *)arg = orrd->od_physical;
777 case NRS_CTL_ORR_WR_OFF_TYPE: {
778 struct nrs_orr_data *orrd = policy->pol_private;
780 orrd->od_physical = *(bool *)arg;
784 case NRS_CTL_ORR_RD_SUPP_REQ: {
785 struct nrs_orr_data *orrd = policy->pol_private;
787 *(enum nrs_orr_supp *)arg = orrd->od_supp;
791 case NRS_CTL_ORR_WR_SUPP_REQ: {
792 struct nrs_orr_data *orrd = policy->pol_private;
794 orrd->od_supp = *(enum nrs_orr_supp *)arg;
795 LASSERT((orrd->od_supp & NOS_OST_RW) != 0);
803 * Obtains resources for ORR/TRR policy instances. The top-level resource lives
804 * inside \e nrs_orr_data and the second-level resource inside
805 * \e nrs_orr_object instances.
807 * \param[in] policy the policy for which resources are being taken for
809 * \param[in] nrq the request for which resources are being taken
810 * \param[in] parent parent resource, embedded in nrs_orr_data for the
812 * \param[out] resp used to return resource references
813 * \param[in] moving_req signifies limited caller context; used to perform
814 * memory allocations in an atomic context in this
817 * \retval 0 we are returning a top-level, parent resource, one that is
818 * embedded in an nrs_orr_data object
819 * \retval 1 we are returning a bottom-level resource, one that is embedded
820 * in an nrs_orr_object object
822 * \see nrs_resource_get_safe()
824 static int nrs_orr_res_get(struct ptlrpc_nrs_policy *policy,
825 struct ptlrpc_nrs_request *nrq,
826 const struct ptlrpc_nrs_resource *parent,
827 struct ptlrpc_nrs_resource **resp, bool moving_req)
829 struct nrs_orr_data *orrd;
830 struct nrs_orr_object *orro;
831 struct nrs_orr_object *tmp;
832 struct nrs_orr_key key = { { { 0 } } };
837 * struct nrs_orr_data is requested.
839 if (parent == NULL) {
840 *resp = &((struct nrs_orr_data *)policy->pol_private)->od_res;
844 orrd = container_of(parent, struct nrs_orr_data, od_res);
847 * If the request type is not supported, fail the enqueuing; the RPC
848 * will be handled by the fallback NRS policy.
850 if (!nrs_orr_req_supported(orrd, nrq, &opc))
854 * Fill in the key for the request; OST FID for ORR policy instances,
855 * and OST index for TRR policy instances.
857 rc = nrs_orr_key_fill(orrd, nrq, opc, policy->pol_desc->pd_name, &key);
862 * Set the offset range the request covers
864 rc = nrs_orr_range_fill(nrq, orrd, opc, moving_req);
868 orro = cfs_hash_lookup(orrd->od_obj_hash, &key);
872 OBD_SLAB_CPT_ALLOC_PTR_GFP(orro, orrd->od_cache,
873 nrs_pol2cptab(policy), nrs_pol2cptid(policy),
874 moving_req ? GFP_ATOMIC : GFP_NOFS);
881 tmp = cfs_hash_findadd_unique(orrd->od_obj_hash, &orro->oo_key,
884 OBD_SLAB_FREE_PTR(orro, orrd->od_cache);
889 * For debugging purposes
891 nrq->nr_u.orr.or_key = orro->oo_key;
893 *resp = &orro->oo_res;
899 * Called when releasing references to the resource hierachy obtained for a
900 * request for scheduling using ORR/TRR policy instances
902 * \param[in] policy the policy the resource belongs to
903 * \param[in] res the resource to be released
905 static void nrs_orr_res_put(struct ptlrpc_nrs_policy *policy,
906 const struct ptlrpc_nrs_resource *res)
908 struct nrs_orr_data *orrd;
909 struct nrs_orr_object *orro;
912 * Do nothing for freeing parent, nrs_orr_data resources.
914 if (res->res_parent == NULL)
917 orro = container_of(res, struct nrs_orr_object, oo_res);
918 orrd = container_of(res->res_parent, struct nrs_orr_data, od_res);
920 cfs_hash_put(orrd->od_obj_hash, &orro->oo_hnode);
924 * Called when polling an ORR/TRR policy instance for a request so that it can
925 * be served. Returns the request that is at the root of the binary heap, as
926 * that is the lowest priority one (i.e. libcfs_heap is an implementation of a
929 * \param[in] policy the policy instance being polled
930 * \param[in] peek when set, signifies that we just want to examine the
931 * request, and not handle it, so the request is not removed
933 * \param[in] force force the policy to return a request; unused in this policy
935 * \retval the request to be handled
936 * \retval NULL no request available
938 * \see ptlrpc_nrs_req_get_nolock()
939 * \see nrs_request_get()
942 struct ptlrpc_nrs_request *nrs_orr_req_get(struct ptlrpc_nrs_policy *policy,
943 bool peek, bool force)
945 struct nrs_orr_data *orrd = policy->pol_private;
946 struct cfs_binheap_node *node = cfs_binheap_root(orrd->od_binheap);
947 struct ptlrpc_nrs_request *nrq;
949 nrq = unlikely(node == NULL) ? NULL :
950 container_of(node, struct ptlrpc_nrs_request, nr_node);
952 if (likely(!peek && nrq != NULL)) {
953 struct nrs_orr_object *orro;
955 orro = container_of(nrs_request_resource(nrq),
956 struct nrs_orr_object, oo_res);
958 LASSERT(nrq->nr_u.orr.or_round <= orro->oo_round);
960 cfs_binheap_remove(orrd->od_binheap, &nrq->nr_node);
963 if (strncmp(policy->pol_desc->pd_name, NRS_POL_NAME_ORR,
964 NRS_POL_NAME_MAX) == 0)
966 "NRS: starting to handle %s request for object "
967 "with FID "DFID", from OST with index %u, with "
968 "round %llu\n", NRS_POL_NAME_ORR,
969 PFID(&orro->oo_key.ok_fid),
970 nrq->nr_u.orr.or_key.ok_idx,
971 nrq->nr_u.orr.or_round);
974 "NRS: starting to handle %s request from OST "
975 "with index %u, with round %llu\n",
976 NRS_POL_NAME_TRR, nrq->nr_u.orr.or_key.ok_idx,
977 nrq->nr_u.orr.or_round);
979 /** Peek at the next request to be served */
980 node = cfs_binheap_root(orrd->od_binheap);
982 /** No more requests */
983 if (unlikely(node == NULL)) {
986 struct ptlrpc_nrs_request *next;
988 next = container_of(node, struct ptlrpc_nrs_request,
991 if (orrd->od_round < next->nr_u.orr.or_round)
992 orrd->od_round = next->nr_u.orr.or_round;
1000 * Sort-adds request \a nrq to an ORR/TRR \a policy instance's set of queued
1001 * requests in the policy's binary heap.
1003 * A scheduling round is a stream of requests that have been sorted in batches
1004 * according to the backend-fs object (for ORR policy instances) or OST (for TRR
1005 * policy instances) that they pertain to (as identified by its IDIF FID or OST
1006 * index respectively); there can be only one batch for each object or OST in
1007 * each round. The batches are of maximum size nrs_orr_data:od_quantum. When a
1008 * new request arrives for scheduling for an object or OST that has exhausted
1009 * its quantum in its current round, the request will be scheduled on the next
1010 * scheduling round. Requests are allowed to be scheduled against a round until
1011 * all requests for the round are serviced, so an object or OST might miss a
1012 * round if requests are not scheduled for it for a long enough period of time.
1013 * Objects or OSTs that miss a round will continue with having their next
1014 * request scheduled, starting at the round that requests are being dispatched
1015 * for, at the time of arrival of this request.
1017 * Requests are tagged with the round number and a sequence number; the sequence
1018 * number indicates the relative ordering amongst the batches of requests in a
1019 * round, and is identical for all requests in a batch, as is the round number.
1020 * The round and sequence numbers are used by orr_req_compare() in order to use
1021 * nrs_orr_data::od_binheap in order to maintain an ordered set of rounds, with
1022 * each round consisting of an ordered set of batches of requests, and each
1023 * batch consisting of an ordered set of requests according to their logical
1024 * file or physical disk offsets.
1026 * \param[in] policy the policy
1027 * \param[in] nrq the request to add
1029 * \retval 0 request successfully added
1030 * \retval != 0 error
1032 static int nrs_orr_req_add(struct ptlrpc_nrs_policy *policy,
1033 struct ptlrpc_nrs_request *nrq)
1035 struct nrs_orr_data *orrd;
1036 struct nrs_orr_object *orro;
1039 orro = container_of(nrs_request_resource(nrq),
1040 struct nrs_orr_object, oo_res);
1041 orrd = container_of(nrs_request_resource(nrq)->res_parent,
1042 struct nrs_orr_data, od_res);
1044 if (orro->oo_quantum == 0 || orro->oo_round < orrd->od_round ||
1045 (orro->oo_active == 0 && orro->oo_quantum > 0)) {
1048 * If there are no pending requests for the object/OST, but some
1049 * of its quantum still remains unused, which implies we did not
1050 * get a chance to schedule up to its maximum allowed batch size
1051 * of requests in the previous round this object/OST
1052 * participated in, schedule this next request on a new round;
1053 * this avoids fragmentation of request batches caused by
1054 * intermittent inactivity on the object/OST, at the expense of
1055 * potentially slightly increased service time for the request
1056 * batch this request will be a part of.
1058 if (orro->oo_active == 0 && orro->oo_quantum > 0)
1061 /** A new scheduling round has commenced */
1062 if (orro->oo_round < orrd->od_round)
1063 orro->oo_round = orrd->od_round;
1065 /** I was not the last object/OST that scheduled a request */
1066 if (orro->oo_sequence < orrd->od_sequence)
1067 orro->oo_sequence = ++orrd->od_sequence;
1069 * Reset the quantum if we have reached the maximum quantum
1070 * size for this batch, or even if we have not managed to
1071 * complete a batch size up to its maximum allowed size.
1072 * XXX: Accessed unlocked
1074 orro->oo_quantum = orrd->od_quantum;
1077 nrq->nr_u.orr.or_round = orro->oo_round;
1078 nrq->nr_u.orr.or_sequence = orro->oo_sequence;
1080 rc = cfs_binheap_insert(orrd->od_binheap, &nrq->nr_node);
1083 if (--orro->oo_quantum == 0)
1090 * Removes request \a nrq from an ORR/TRR \a policy instance's set of queued
1093 * \param[in] policy the policy
1094 * \param[in] nrq the request to remove
1096 static void nrs_orr_req_del(struct ptlrpc_nrs_policy *policy,
1097 struct ptlrpc_nrs_request *nrq)
1099 struct nrs_orr_data *orrd;
1100 struct nrs_orr_object *orro;
1103 orro = container_of(nrs_request_resource(nrq),
1104 struct nrs_orr_object, oo_res);
1105 orrd = container_of(nrs_request_resource(nrq)->res_parent,
1106 struct nrs_orr_data, od_res);
1108 LASSERT(nrq->nr_u.orr.or_round <= orro->oo_round);
1110 is_root = &nrq->nr_node == cfs_binheap_root(orrd->od_binheap);
1112 cfs_binheap_remove(orrd->od_binheap, &nrq->nr_node);
1116 * If we just deleted the node at the root of the binheap, we may have
1117 * to adjust round numbers.
1119 if (unlikely(is_root)) {
1120 /** Peek at the next request to be served */
1121 struct cfs_binheap_node *node = cfs_binheap_root(orrd->od_binheap);
1123 /** No more requests */
1124 if (unlikely(node == NULL)) {
1127 nrq = container_of(node, struct ptlrpc_nrs_request,
1130 if (orrd->od_round < nrq->nr_u.orr.or_round)
1131 orrd->od_round = nrq->nr_u.orr.or_round;
1137 * Called right after the request \a nrq finishes being handled by ORR policy
1138 * instance \a policy.
1140 * \param[in] policy the policy that handled the request
1141 * \param[in] nrq the request that was handled
1143 static void nrs_orr_req_stop(struct ptlrpc_nrs_policy *policy,
1144 struct ptlrpc_nrs_request *nrq)
1146 /** NB: resource control, credits etc can be added here */
1147 if (strncmp(policy->pol_desc->pd_name, NRS_POL_NAME_ORR,
1148 NRS_POL_NAME_MAX) == 0)
1150 "NRS: finished handling %s request for object with FID "
1151 DFID", from OST with index %u, with round %llu\n",
1152 NRS_POL_NAME_ORR, PFID(&nrq->nr_u.orr.or_key.ok_fid),
1153 nrq->nr_u.orr.or_key.ok_idx, nrq->nr_u.orr.or_round);
1156 "NRS: finished handling %s request from OST with index %u,"
1157 " with round %llu\n",
1158 NRS_POL_NAME_TRR, nrq->nr_u.orr.or_key.ok_idx,
1159 nrq->nr_u.orr.or_round);
1166 #ifdef CONFIG_PROC_FS
1169 * This allows to bundle the policy name into the lprocfs_vars::data pointer
1170 * so that lprocfs read/write functions can be used by both the ORR and TRR
1173 static struct nrs_lprocfs_orr_data {
1174 struct ptlrpc_service *svc;
1176 } lprocfs_orr_data = {
1177 .name = NRS_POL_NAME_ORR
1178 }, lprocfs_trr_data = {
1179 .name = NRS_POL_NAME_TRR
1183 * Retrieves the value of the Round Robin quantum (i.e. the maximum batch size)
1184 * for ORR/TRR policy instances on both the regular and high-priority NRS head
1185 * of a service, as long as a policy instance is not in the
1186 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state; policy instances in this
1187 * state are skipped later by nrs_orr_ctl().
1189 * Quantum values are in # of RPCs, and the output is in YAML format.
1196 * XXX: the CRR-N version of this, ptlrpc_lprocfs_rd_nrs_crrn_quantum() is
1197 * almost identical; it can be reworked and then reused for ORR/TRR.
1200 ptlrpc_lprocfs_nrs_orr_quantum_seq_show(struct seq_file *m, void *data)
1202 struct nrs_lprocfs_orr_data *orr_data = m->private;
1203 struct ptlrpc_service *svc = orr_data->svc;
1208 * Perform two separate calls to this as only one of the NRS heads'
1209 * policies may be in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
1210 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING state.
1212 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_REG,
1214 NRS_CTL_ORR_RD_QUANTUM,
1217 seq_printf(m, NRS_LPROCFS_QUANTUM_NAME_REG "%-5d\n", quantum);
1219 * Ignore -ENODEV as the regular NRS head's policy may be in the
1220 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
1222 } else if (rc != -ENODEV) {
1227 * We know the ost_io service which is the only one ORR/TRR policies are
1228 * compatible with, do have an HP NRS head, but it may be best to guard
1229 * against a possible change of this in the future.
1231 if (!nrs_svc_has_hp(svc))
1234 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_HP,
1235 orr_data->name, NRS_CTL_ORR_RD_QUANTUM,
1238 seq_printf(m, NRS_LPROCFS_QUANTUM_NAME_HP"%-5d\n", quantum);
1240 * Ignore -ENODEV as the high priority NRS head's policy may be
1241 * in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
1243 } else if (rc != -ENODEV) {
1253 * Sets the value of the Round Robin quantum (i.e. the maximum batch size)
1254 * for ORR/TRR policy instances of a service. The user can set the quantum size
1255 * for the regular and high priority NRS head separately by specifying each
1256 * value, or both together in a single invocation.
1260 * lctl set_param ost.OSS.ost_io.nrs_orr_quantum=req_quantum:64, to set the
1261 * request quantum size of the ORR policy instance on the regular NRS head of
1262 * the ost_io service to 64
1264 * lctl set_param ost.OSS.ost_io.nrs_trr_quantum=hp_quantum:8 to set the request
1265 * quantum size of the TRR policy instance on the high priority NRS head of the
1266 * ost_io service to 8
1268 * lctl set_param ost.OSS.ost_io.nrs_orr_quantum=32, to set both the request
1269 * quantum size of the ORR policy instance on both the regular and the high
1270 * priority NRS head of the ost_io service to 32
1272 * policy instances in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state
1273 * are skipped later by nrs_orr_ctl().
1275 * XXX: the CRR-N version of this, ptlrpc_lprocfs_wr_nrs_crrn_quantum() is
1276 * almost identical; it can be reworked and then reused for ORR/TRR.
1279 ptlrpc_lprocfs_nrs_orr_quantum_seq_write(struct file *file,
1280 const char __user *buffer,
1281 size_t count, loff_t *off)
1283 struct seq_file *m = file->private_data;
1284 struct nrs_lprocfs_orr_data *orr_data = m->private;
1285 struct ptlrpc_service *svc = orr_data->svc;
1286 enum ptlrpc_nrs_queue_type queue = 0;
1287 char kernbuf[LPROCFS_NRS_WR_QUANTUM_MAX_CMD];
1291 /** lprocfs_find_named_value() modifies its argument, so keep a copy */
1296 if (count > (sizeof(kernbuf) - 1))
1299 if (copy_from_user(kernbuf, buffer, count))
1302 kernbuf[count] = '\0';
1307 * Check if the regular quantum value has been specified
1309 val = lprocfs_find_named_value(kernbuf, NRS_LPROCFS_QUANTUM_NAME_REG,
1311 if (val != kernbuf) {
1312 quantum_reg = simple_strtol(val, NULL, 10);
1314 queue |= PTLRPC_NRS_QUEUE_REG;
1320 * Check if the high priority quantum value has been specified
1322 val = lprocfs_find_named_value(kernbuf, NRS_LPROCFS_QUANTUM_NAME_HP,
1324 if (val != kernbuf) {
1325 if (!nrs_svc_has_hp(svc))
1328 quantum_hp = simple_strtol(val, NULL, 10);
1330 queue |= PTLRPC_NRS_QUEUE_HP;
1334 * If none of the queues has been specified, look for a valid numerical
1338 if (!isdigit(kernbuf[0]))
1341 quantum_reg = simple_strtol(kernbuf, NULL, 10);
1343 queue = PTLRPC_NRS_QUEUE_REG;
1345 if (nrs_svc_has_hp(svc)) {
1346 queue |= PTLRPC_NRS_QUEUE_HP;
1347 quantum_hp = quantum_reg;
1351 if ((((queue & PTLRPC_NRS_QUEUE_REG) != 0) &&
1352 ((quantum_reg > LPROCFS_NRS_QUANTUM_MAX || quantum_reg <= 0))) ||
1353 (((queue & PTLRPC_NRS_QUEUE_HP) != 0) &&
1354 ((quantum_hp > LPROCFS_NRS_QUANTUM_MAX || quantum_hp <= 0))))
1358 * We change the values on regular and HP NRS heads separately, so that
1359 * we do not exit early from ptlrpc_nrs_policy_control() with an error
1360 * returned by nrs_policy_ctl_locked(), in cases where the user has not
1361 * started the policy on either the regular or HP NRS head; i.e. we are
1362 * ignoring -ENODEV within nrs_policy_ctl_locked(). -ENODEV is returned
1363 * only if the operation fails with -ENODEV on all heads that have been
1364 * specified by the command; if at least one operation succeeds,
1365 * success is returned.
1367 if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) {
1368 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_REG,
1370 NRS_CTL_ORR_WR_QUANTUM, false,
1372 if ((rc < 0 && rc != -ENODEV) ||
1373 (rc == -ENODEV && queue == PTLRPC_NRS_QUEUE_REG))
1377 if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) {
1378 rc2 = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_HP,
1380 NRS_CTL_ORR_WR_QUANTUM, false,
1382 if ((rc2 < 0 && rc2 != -ENODEV) ||
1383 (rc2 == -ENODEV && queue == PTLRPC_NRS_QUEUE_HP))
1387 return rc == -ENODEV && rc2 == -ENODEV ? -ENODEV : count;
1389 LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs_orr_quantum);
1391 #define LPROCFS_NRS_OFF_NAME_REG "reg_offset_type:"
1392 #define LPROCFS_NRS_OFF_NAME_HP "hp_offset_type:"
1394 #define LPROCFS_NRS_OFF_NAME_PHYSICAL "physical"
1395 #define LPROCFS_NRS_OFF_NAME_LOGICAL "logical"
1398 * Retrieves the offset type used by ORR/TRR policy instances on both the
1399 * regular and high-priority NRS head of a service, as long as a policy
1400 * instance is not in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state;
1401 * policy instances in this state are skipped later by nrs_orr_ctl().
1403 * Offset type information is a (physical|logical) string, and output is
1408 * reg_offset_type:physical
1409 * hp_offset_type:logical
1412 ptlrpc_lprocfs_nrs_orr_offset_type_seq_show(struct seq_file *m, void *data)
1414 struct nrs_lprocfs_orr_data *orr_data = m->private;
1415 struct ptlrpc_service *svc = orr_data->svc;
1420 * Perform two separate calls to this as only one of the NRS heads'
1421 * policies may be in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED
1422 * or ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING state.
1424 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_REG,
1425 orr_data->name, NRS_CTL_ORR_RD_OFF_TYPE,
1428 seq_printf(m, LPROCFS_NRS_OFF_NAME_REG"%s\n",
1429 physical ? LPROCFS_NRS_OFF_NAME_PHYSICAL :
1430 LPROCFS_NRS_OFF_NAME_LOGICAL);
1432 * Ignore -ENODEV as the regular NRS head's policy may be in the
1433 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
1435 } else if (rc != -ENODEV) {
1440 * We know the ost_io service which is the only one ORR/TRR policies are
1441 * compatible with, do have an HP NRS head, but it may be best to guard
1442 * against a possible change of this in the future.
1444 if (!nrs_svc_has_hp(svc))
1447 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_HP,
1448 orr_data->name, NRS_CTL_ORR_RD_OFF_TYPE,
1451 seq_printf(m, LPROCFS_NRS_OFF_NAME_HP"%s\n",
1452 physical ? LPROCFS_NRS_OFF_NAME_PHYSICAL :
1453 LPROCFS_NRS_OFF_NAME_LOGICAL);
1455 * Ignore -ENODEV as the high priority NRS head's policy may be
1456 * in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
1458 } else if (rc != -ENODEV) {
1467 * Max valid command string is the size of the labels, plus "physical" twice.
1468 * plus a separating ' '
1470 #define LPROCFS_NRS_WR_OFF_TYPE_MAX_CMD \
1471 sizeof(LPROCFS_NRS_OFF_NAME_REG LPROCFS_NRS_OFF_NAME_PHYSICAL " " \
1472 LPROCFS_NRS_OFF_NAME_HP LPROCFS_NRS_OFF_NAME_PHYSICAL)
1475 * Sets the type of offsets used to order RPCs in ORR/TRR policy instances. The
1476 * user can set offset type for the regular or high priority NRS head
1477 * separately by specifying each value, or both together in a single invocation.
1481 * lctl set_param ost.OSS.ost_io.nrs_orr_offset_type=
1482 * reg_offset_type:physical, to enable the ORR policy instance on the regular
1483 * NRS head of the ost_io service to use physical disk offset ordering.
1485 * lctl set_param ost.OSS.ost_io.nrs_trr_offset_type=logical, to enable the TRR
1486 * policy instances on both the regular ang high priority NRS heads of the
1487 * ost_io service to use logical file offset ordering.
1489 * policy instances in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state are
1490 * are skipped later by nrs_orr_ctl().
1493 ptlrpc_lprocfs_nrs_orr_offset_type_seq_write(struct file *file,
1494 const char __user *buffer,
1498 struct seq_file *m = file->private_data;
1499 struct nrs_lprocfs_orr_data *orr_data = m->private;
1500 struct ptlrpc_service *svc = orr_data->svc;
1501 enum ptlrpc_nrs_queue_type queue = 0;
1502 char kernbuf[LPROCFS_NRS_WR_OFF_TYPE_MAX_CMD];
1511 if (count > (sizeof(kernbuf) - 1))
1514 if (copy_from_user(kernbuf, buffer, count))
1517 kernbuf[count] = '\0';
1522 * Check if the regular offset type has been specified
1524 val_reg = lprocfs_find_named_value(kernbuf,
1525 LPROCFS_NRS_OFF_NAME_REG,
1527 if (val_reg != kernbuf)
1528 queue |= PTLRPC_NRS_QUEUE_REG;
1533 * Check if the high priority offset type has been specified
1535 val_hp = lprocfs_find_named_value(kernbuf, LPROCFS_NRS_OFF_NAME_HP,
1537 if (val_hp != kernbuf) {
1538 if (!nrs_svc_has_hp(svc))
1541 queue |= PTLRPC_NRS_QUEUE_HP;
1545 * If none of the queues has been specified, there may be a valid
1546 * command string at the start of the buffer.
1549 queue = PTLRPC_NRS_QUEUE_REG;
1551 if (nrs_svc_has_hp(svc))
1552 queue |= PTLRPC_NRS_QUEUE_HP;
1555 if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) {
1556 if (strncmp(val_reg, LPROCFS_NRS_OFF_NAME_PHYSICAL,
1557 sizeof(LPROCFS_NRS_OFF_NAME_PHYSICAL) - 1) == 0)
1558 physical_reg = true;
1559 else if (strncmp(val_reg, LPROCFS_NRS_OFF_NAME_LOGICAL,
1560 sizeof(LPROCFS_NRS_OFF_NAME_LOGICAL) - 1) == 0)
1561 physical_reg = false;
1566 if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) {
1567 if (strncmp(val_hp, LPROCFS_NRS_OFF_NAME_PHYSICAL,
1568 sizeof(LPROCFS_NRS_OFF_NAME_PHYSICAL) - 1) == 0)
1570 else if (strncmp(val_hp, LPROCFS_NRS_OFF_NAME_LOGICAL,
1571 sizeof(LPROCFS_NRS_OFF_NAME_LOGICAL) - 1) == 0)
1572 physical_hp = false;
1578 * We change the values on regular and HP NRS heads separately, so that
1579 * we do not exit early from ptlrpc_nrs_policy_control() with an error
1580 * returned by nrs_policy_ctl_locked(), in cases where the user has not
1581 * started the policy on either the regular or HP NRS head; i.e. we are
1582 * ignoring -ENODEV within nrs_policy_ctl_locked(). -ENODEV is returned
1583 * only if the operation fails with -ENODEV on all heads that have been
1584 * specified by the command; if at least one operation succeeds,
1585 * success is returned.
1587 if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) {
1588 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_REG,
1590 NRS_CTL_ORR_WR_OFF_TYPE, false,
1592 if ((rc < 0 && rc != -ENODEV) ||
1593 (rc == -ENODEV && queue == PTLRPC_NRS_QUEUE_REG))
1597 if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) {
1598 rc2 = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_HP,
1600 NRS_CTL_ORR_WR_OFF_TYPE, false,
1602 if ((rc2 < 0 && rc2 != -ENODEV) ||
1603 (rc2 == -ENODEV && queue == PTLRPC_NRS_QUEUE_HP))
1607 return rc == -ENODEV && rc2 == -ENODEV ? -ENODEV : count;
1609 LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs_orr_offset_type);
1611 #define NRS_LPROCFS_REQ_SUPP_NAME_REG "reg_supported:"
1612 #define NRS_LPROCFS_REQ_SUPP_NAME_HP "hp_supported:"
1614 #define LPROCFS_NRS_SUPP_NAME_READS "reads"
1615 #define LPROCFS_NRS_SUPP_NAME_WRITES "writes"
1616 #define LPROCFS_NRS_SUPP_NAME_READWRITES "reads_and_writes"
1619 * Translates enum nrs_orr_supp values to a corresponding string.
1621 static const char *nrs_orr_supp2str(enum nrs_orr_supp supp)
1627 return LPROCFS_NRS_SUPP_NAME_READS;
1629 return LPROCFS_NRS_SUPP_NAME_WRITES;
1631 return LPROCFS_NRS_SUPP_NAME_READWRITES;
1636 * Translates strings to the corresponding enum nrs_orr_supp value
1638 static enum nrs_orr_supp nrs_orr_str2supp(const char *val)
1640 if (strncmp(val, LPROCFS_NRS_SUPP_NAME_READWRITES,
1641 sizeof(LPROCFS_NRS_SUPP_NAME_READWRITES) - 1) == 0)
1643 else if (strncmp(val, LPROCFS_NRS_SUPP_NAME_READS,
1644 sizeof(LPROCFS_NRS_SUPP_NAME_READS) - 1) == 0)
1645 return NOS_OST_READ;
1646 else if (strncmp(val, LPROCFS_NRS_SUPP_NAME_WRITES,
1647 sizeof(LPROCFS_NRS_SUPP_NAME_WRITES) - 1) == 0)
1648 return NOS_OST_WRITE;
1654 * Retrieves the type of RPCs handled at the point of invocation by ORR/TRR
1655 * policy instances on both the regular and high-priority NRS head of a service,
1656 * as long as a policy instance is not in the
1657 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state; policy instances in this
1658 * state are skipped later by nrs_orr_ctl().
1660 * Supported RPC type information is a (reads|writes|reads_and_writes) string,
1661 * and output is in YAML format.
1665 * reg_supported:reads
1666 * hp_supported:reads_and_writes
1669 ptlrpc_lprocfs_nrs_orr_supported_seq_show(struct seq_file *m, void *data)
1671 struct nrs_lprocfs_orr_data *orr_data = m->private;
1672 struct ptlrpc_service *svc = orr_data->svc;
1673 enum nrs_orr_supp supported;
1677 * Perform two separate calls to this as only one of the NRS heads'
1678 * policies may be in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED
1679 * or ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING state.
1681 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_REG,
1683 NRS_CTL_ORR_RD_SUPP_REQ, true,
1687 seq_printf(m, NRS_LPROCFS_REQ_SUPP_NAME_REG"%s\n",
1688 nrs_orr_supp2str(supported));
1690 * Ignore -ENODEV as the regular NRS head's policy may be in the
1691 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
1693 } else if (rc != -ENODEV) {
1698 * We know the ost_io service which is the only one ORR/TRR policies are
1699 * compatible with, do have an HP NRS head, but it may be best to guard
1700 * against a possible change of this in the future.
1702 if (!nrs_svc_has_hp(svc))
1705 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_HP,
1707 NRS_CTL_ORR_RD_SUPP_REQ, true,
1710 seq_printf(m, NRS_LPROCFS_REQ_SUPP_NAME_HP"%s\n",
1711 nrs_orr_supp2str(supported));
1713 * Ignore -ENODEV as the high priority NRS head's policy may be
1714 * in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state.
1716 } else if (rc != -ENODEV) {
1726 * Max valid command string is the size of the labels, plus "reads_and_writes"
1727 * twice, plus a separating ' '
1729 #define LPROCFS_NRS_WR_REQ_SUPP_MAX_CMD \
1730 sizeof(NRS_LPROCFS_REQ_SUPP_NAME_REG LPROCFS_NRS_SUPP_NAME_READWRITES \
1731 NRS_LPROCFS_REQ_SUPP_NAME_HP LPROCFS_NRS_SUPP_NAME_READWRITES \
1735 * Sets the type of RPCs handled by ORR/TRR policy instances. The user can
1736 * modify this setting for the regular or high priority NRS heads separately, or
1737 * both together in a single invocation.
1741 * lctl set_param ost.OSS.ost_io.nrs_orr_supported=
1742 * "reg_supported:reads", to enable the ORR policy instance on the regular NRS
1743 * head of the ost_io service to handle OST_READ RPCs.
1745 * lctl set_param ost.OSS.ost_io.nrs_trr_supported=reads_and_writes, to enable
1746 * the TRR policy instances on both the regular ang high priority NRS heads of
1747 * the ost_io service to use handle OST_READ and OST_WRITE RPCs.
1749 * policy instances in the ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED state are
1750 * are skipped later by nrs_orr_ctl().
1753 ptlrpc_lprocfs_nrs_orr_supported_seq_write(struct file *file,
1754 const char __user *buffer,
1758 struct seq_file *m = file->private_data;
1759 struct nrs_lprocfs_orr_data *orr_data = m->private;
1760 struct ptlrpc_service *svc = orr_data->svc;
1761 enum ptlrpc_nrs_queue_type queue = 0;
1762 char kernbuf[LPROCFS_NRS_WR_REQ_SUPP_MAX_CMD];
1765 enum nrs_orr_supp supp_reg;
1766 enum nrs_orr_supp supp_hp;
1771 if (count > (sizeof(kernbuf) - 1))
1774 if (copy_from_user(kernbuf, buffer, count))
1777 kernbuf[count] = '\0';
1782 * Check if the regular supported requests setting has been specified
1784 val_reg = lprocfs_find_named_value(kernbuf,
1785 NRS_LPROCFS_REQ_SUPP_NAME_REG,
1787 if (val_reg != kernbuf)
1788 queue |= PTLRPC_NRS_QUEUE_REG;
1793 * Check if the high priority supported requests setting has been
1796 val_hp = lprocfs_find_named_value(kernbuf, NRS_LPROCFS_REQ_SUPP_NAME_HP,
1798 if (val_hp != kernbuf) {
1799 if (!nrs_svc_has_hp(svc))
1802 queue |= PTLRPC_NRS_QUEUE_HP;
1806 * If none of the queues has been specified, there may be a valid
1807 * command string at the start of the buffer.
1810 queue = PTLRPC_NRS_QUEUE_REG;
1812 if (nrs_svc_has_hp(svc))
1813 queue |= PTLRPC_NRS_QUEUE_HP;
1816 if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) {
1817 supp_reg = nrs_orr_str2supp(val_reg);
1818 if (supp_reg == -EINVAL)
1822 if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) {
1823 supp_hp = nrs_orr_str2supp(val_hp);
1824 if (supp_hp == -EINVAL)
1829 * We change the values on regular and HP NRS heads separately, so that
1830 * we do not exit early from ptlrpc_nrs_policy_control() with an error
1831 * returned by nrs_policy_ctl_locked(), in cases where the user has not
1832 * started the policy on either the regular or HP NRS head; i.e. we are
1833 * ignoring -ENODEV within nrs_policy_ctl_locked(). -ENODEV is returned
1834 * only if the operation fails with -ENODEV on all heads that have been
1835 * specified by the command; if at least one operation succeeds,
1836 * success is returned.
1838 if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) {
1839 rc = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_REG,
1841 NRS_CTL_ORR_WR_SUPP_REQ, false,
1843 if ((rc < 0 && rc != -ENODEV) ||
1844 (rc == -ENODEV && queue == PTLRPC_NRS_QUEUE_REG))
1848 if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) {
1849 rc2 = ptlrpc_nrs_policy_control(svc, PTLRPC_NRS_QUEUE_HP,
1851 NRS_CTL_ORR_WR_SUPP_REQ, false,
1853 if ((rc2 < 0 && rc2 != -ENODEV) ||
1854 (rc2 == -ENODEV && queue == PTLRPC_NRS_QUEUE_HP))
1858 return rc == -ENODEV && rc2 == -ENODEV ? -ENODEV : count;
1860 LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs_orr_supported);
1862 static int nrs_orr_lprocfs_init(struct ptlrpc_service *svc)
1866 struct lprocfs_vars nrs_orr_lprocfs_vars[] = {
1867 { .name = "nrs_orr_quantum",
1868 .fops = &ptlrpc_lprocfs_nrs_orr_quantum_fops },
1869 { .name = "nrs_orr_offset_type",
1870 .fops = &ptlrpc_lprocfs_nrs_orr_offset_type_fops },
1871 { .name = "nrs_orr_supported",
1872 .fops = &ptlrpc_lprocfs_nrs_orr_supported_fops },
1876 if (svc->srv_procroot == NULL)
1879 lprocfs_orr_data.svc = svc;
1881 for (i = 0; i < ARRAY_SIZE(nrs_orr_lprocfs_vars); i++)
1882 nrs_orr_lprocfs_vars[i].data = &lprocfs_orr_data;
1884 return lprocfs_add_vars(svc->srv_procroot, nrs_orr_lprocfs_vars, NULL);
1887 static void nrs_orr_lprocfs_fini(struct ptlrpc_service *svc)
1889 if (svc->srv_procroot == NULL)
1892 lprocfs_remove_proc_entry("nrs_orr_quantum", svc->srv_procroot);
1893 lprocfs_remove_proc_entry("nrs_orr_offset_type", svc->srv_procroot);
1894 lprocfs_remove_proc_entry("nrs_orr_supported", svc->srv_procroot);
1897 #endif /* CONFIG_PROC_FS */
1899 static const struct ptlrpc_nrs_pol_ops nrs_orr_ops = {
1900 .op_policy_init = nrs_orr_init,
1901 .op_policy_start = nrs_orr_start,
1902 .op_policy_stop = nrs_orr_stop,
1903 .op_policy_ctl = nrs_orr_ctl,
1904 .op_res_get = nrs_orr_res_get,
1905 .op_res_put = nrs_orr_res_put,
1906 .op_req_get = nrs_orr_req_get,
1907 .op_req_enqueue = nrs_orr_req_add,
1908 .op_req_dequeue = nrs_orr_req_del,
1909 .op_req_stop = nrs_orr_req_stop,
1910 #ifdef CONFIG_PROC_FS
1911 .op_lprocfs_init = nrs_orr_lprocfs_init,
1912 .op_lprocfs_fini = nrs_orr_lprocfs_fini,
1916 struct ptlrpc_nrs_pol_conf nrs_conf_orr = {
1917 .nc_name = NRS_POL_NAME_ORR,
1918 .nc_ops = &nrs_orr_ops,
1919 .nc_compat = nrs_policy_compat_one,
1920 .nc_compat_svc_name = "ost_io",
1924 * TRR, Target-based Round Robin policy
1926 * TRR reuses much of the functions and data structures of ORR
1929 #ifdef CONFIG_PROC_FS
1931 static int nrs_trr_lprocfs_init(struct ptlrpc_service *svc)
1935 struct lprocfs_vars nrs_trr_lprocfs_vars[] = {
1936 { .name = "nrs_trr_quantum",
1937 .fops = &ptlrpc_lprocfs_nrs_orr_quantum_fops },
1938 { .name = "nrs_trr_offset_type",
1939 .fops = &ptlrpc_lprocfs_nrs_orr_offset_type_fops },
1940 { .name = "nrs_trr_supported",
1941 .fops = &ptlrpc_lprocfs_nrs_orr_supported_fops },
1945 if (svc->srv_procroot == NULL)
1948 lprocfs_trr_data.svc = svc;
1950 for (i = 0; i < ARRAY_SIZE(nrs_trr_lprocfs_vars); i++)
1951 nrs_trr_lprocfs_vars[i].data = &lprocfs_trr_data;
1953 return lprocfs_add_vars(svc->srv_procroot, nrs_trr_lprocfs_vars, NULL);
1956 static void nrs_trr_lprocfs_fini(struct ptlrpc_service *svc)
1958 if (svc->srv_procroot == NULL)
1961 lprocfs_remove_proc_entry("nrs_trr_quantum", svc->srv_procroot);
1962 lprocfs_remove_proc_entry("nrs_trr_offset_type", svc->srv_procroot);
1963 lprocfs_remove_proc_entry("nrs_trr_supported", svc->srv_procroot);
1966 #endif /* CONFIG_PROC_FS */
1969 * Reuse much of the ORR functionality for TRR.
1971 static const struct ptlrpc_nrs_pol_ops nrs_trr_ops = {
1972 .op_policy_init = nrs_orr_init,
1973 .op_policy_start = nrs_orr_start,
1974 .op_policy_stop = nrs_orr_stop,
1975 .op_policy_ctl = nrs_orr_ctl,
1976 .op_res_get = nrs_orr_res_get,
1977 .op_res_put = nrs_orr_res_put,
1978 .op_req_get = nrs_orr_req_get,
1979 .op_req_enqueue = nrs_orr_req_add,
1980 .op_req_dequeue = nrs_orr_req_del,
1981 .op_req_stop = nrs_orr_req_stop,
1982 #ifdef CONFIG_PROC_FS
1983 .op_lprocfs_init = nrs_trr_lprocfs_init,
1984 .op_lprocfs_fini = nrs_trr_lprocfs_fini,
1988 struct ptlrpc_nrs_pol_conf nrs_conf_trr = {
1989 .nc_name = NRS_POL_NAME_TRR,
1990 .nc_ops = &nrs_trr_ops,
1991 .nc_compat = nrs_policy_compat_one,
1992 .nc_compat_svc_name = "ost_io",
1995 /** @} ORR/TRR policy */
1999 #endif /* HAVE_SERVER_SUPPORT */