Whamcloud - gitweb
LU-14487 modules: remove references to Sun Trademark.
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/ptlrpc/ptlrpcd.c
32  */
33
34 /** \defgroup ptlrpcd PortalRPC daemon
35  *
36  * ptlrpcd is a special thread with its own set where other user might add
37  * requests when they don't want to wait for their completion.
38  * PtlRPCD will take care of sending such requests and then processing their
39  * replies and calling completion callbacks as necessary.
40  * The callbacks are called directly from ptlrpcd context.
41  * It is important to never significantly block (esp. on RPCs!) within such
42  * completion handler or a deadlock might occur where ptlrpcd enters some
43  * callback that attempts to send another RPC and wait for it to return,
44  * during which time ptlrpcd is completely blocked, so e.g. if import
45  * fails, recovery cannot progress because connection requests are also
46  * sent by ptlrpcd.
47  *
48  * @{
49  */
50
51 #define DEBUG_SUBSYSTEM S_RPC
52
53 #include <linux/kthread.h>
54 #include <libcfs/libcfs.h>
55 #include <lustre_net.h>
56 #include <lustre_lib.h>
57 #include <lustre_ha.h>
58 #include <obd_class.h>   /* for obd_zombie */
59 #include <obd_support.h> /* for OBD_FAIL_CHECK */
60 #include <cl_object.h> /* cl_env_{get,put}() */
61 #include <lprocfs_status.h>
62
63 #include "ptlrpc_internal.h"
64
65 /* One of these per CPT. */
66 struct ptlrpcd {
67         int                     pd_size;
68         int                     pd_index;
69         int                     pd_cpt;
70         int                     pd_cursor;
71         int                     pd_nthreads;
72         int                     pd_groupsize;
73         struct ptlrpcd_ctl      pd_threads[0];
74 };
75
76 /*
77  * max_ptlrpcds is obsolete, but retained to ensure that the kernel
78  * module will load on a system where it has been tuned.
79  * A value other than 0 implies it was tuned, in which case the value
80  * is used to derive a setting for ptlrpcd_per_cpt_max.
81  */
82 static int max_ptlrpcds;
83 module_param(max_ptlrpcds, int, 0644);
84 MODULE_PARM_DESC(max_ptlrpcds,
85                  "Max ptlrpcd thread count to be started (obsolete).");
86
87 /*
88  * ptlrpcd_bind_policy is obsolete, but retained to ensure that
89  * the kernel module will load on a system where it has been tuned.
90  * A value other than 0 implies it was tuned, in which case the value
91  * is used to derive a setting for ptlrpcd_partner_group_size.
92  */
93 static int ptlrpcd_bind_policy;
94 module_param(ptlrpcd_bind_policy, int, 0644);
95 MODULE_PARM_DESC(ptlrpcd_bind_policy,
96                  "Ptlrpcd threads binding mode (obsolete).");
97
98 /*
99  * ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run
100  * in a CPT.
101  */
102 static int ptlrpcd_per_cpt_max;
103 module_param(ptlrpcd_per_cpt_max, int, 0644);
104 MODULE_PARM_DESC(ptlrpcd_per_cpt_max,
105                  "Max ptlrpcd thread count to be started per CPT.");
106
107 /*
108  * ptlrpcd_partner_group_size: The desired number of threads in each
109  * ptlrpcd partner thread group. Default is 2, corresponding to the
110  * old PDB_POLICY_PAIR. A negative value makes all ptlrpcd threads in
111  * a CPT partners of each other.
112  */
113 static int ptlrpcd_partner_group_size;
114 module_param(ptlrpcd_partner_group_size, int, 0644);
115 MODULE_PARM_DESC(ptlrpcd_partner_group_size,
116                  "Number of ptlrpcd threads in a partner group.");
117
118 /*
119  * ptlrpcd_cpts: A CPT string describing the CPU partitions that
120  * ptlrpcd threads should run on. Used to make ptlrpcd threads run on
121  * a subset of all CPTs.
122  *
123  * ptlrpcd_cpts=2
124  * ptlrpcd_cpts=[2]
125  *   run ptlrpcd threads only on CPT 2.
126  *
127  * ptlrpcd_cpts=0-3
128  * ptlrpcd_cpts=[0-3]
129  *   run ptlrpcd threads on CPTs 0, 1, 2, and 3.
130  *
131  * ptlrpcd_cpts=[0-3,5,7]
132  *   run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7.
133  */
134 static char *ptlrpcd_cpts;
135 module_param(ptlrpcd_cpts, charp, 0644);
136 MODULE_PARM_DESC(ptlrpcd_cpts,
137                  "CPU partitions ptlrpcd threads should run in");
138
139 /* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */
140 static int              *ptlrpcds_cpt_idx;
141
142 /* ptlrpcds_num is the number of entries in the ptlrpcds array. */
143 static int              ptlrpcds_num;
144 static struct ptlrpcd   **ptlrpcds;
145
146 /*
147  * In addition to the regular thread pool above, there is a single
148  * global recovery thread. Recovery isn't critical for performance,
149  * and doesn't block, but must always be able to proceed, and it is
150  * possible that all normal ptlrpcd threads are blocked. Hence the
151  * need for a dedicated thread.
152  */
153 static struct ptlrpcd_ctl ptlrpcd_rcv;
154
155 struct mutex ptlrpcd_mutex;
156 static int ptlrpcd_users = 0;
157
158 void ptlrpcd_wake(struct ptlrpc_request *req)
159 {
160         struct ptlrpc_request_set *set = req->rq_set;
161
162         LASSERT(set != NULL);
163         wake_up(&set->set_waitq);
164 }
165 EXPORT_SYMBOL(ptlrpcd_wake);
166
167 static struct ptlrpcd_ctl *
168 ptlrpcd_select_pc(struct ptlrpc_request *req)
169 {
170         struct ptlrpcd  *pd;
171         int             cpt;
172         int             idx;
173
174         if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
175                 return &ptlrpcd_rcv;
176
177         cpt = cfs_cpt_current(cfs_cpt_tab, 1);
178         if (ptlrpcds_cpt_idx == NULL)
179                 idx = cpt;
180         else
181                 idx = ptlrpcds_cpt_idx[cpt];
182         pd = ptlrpcds[idx];
183
184         /* We do not care whether it is strict load balance. */
185         idx = pd->pd_cursor;
186         if (++idx == pd->pd_nthreads)
187                 idx = 0;
188         pd->pd_cursor = idx;
189
190         return &pd->pd_threads[idx];
191 }
192
193 /**
194  * Move all request from an existing request set to the ptlrpcd queue.
195  * All requests from the set must be in phase RQ_PHASE_NEW.
196  */
197 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
198 {
199         struct list_head *tmp, *pos;
200         struct ptlrpcd_ctl *pc;
201         struct ptlrpc_request_set *new;
202         int count, i;
203
204         pc = ptlrpcd_select_pc(NULL);
205         new = pc->pc_set;
206
207         list_for_each_safe(pos, tmp, &set->set_requests) {
208                 struct ptlrpc_request *req =
209                         list_entry(pos, struct ptlrpc_request,
210                                    rq_set_chain);
211
212                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
213                 req->rq_set = new;
214                 req->rq_queued_time = ktime_get_seconds();
215         }
216
217         spin_lock(&new->set_new_req_lock);
218         list_splice_init(&set->set_requests, &new->set_new_requests);
219         i = atomic_read(&set->set_remaining);
220         count = atomic_add_return(i, &new->set_new_count);
221         atomic_set(&set->set_remaining, 0);
222         spin_unlock(&new->set_new_req_lock);
223         if (count == i) {
224                 wake_up(&new->set_waitq);
225
226                 /*
227                  * XXX: It maybe unnecessary to wakeup all the partners. But to
228                  *      guarantee the async RPC can be processed ASAP, we have
229                  *      no other better choice. It maybe fixed in future.
230                  */
231                 for (i = 0; i < pc->pc_npartners; i++)
232                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
233         }
234 }
235
236 /**
237  * Return transferred RPCs count.
238  */
239 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
240                                struct ptlrpc_request_set *src)
241 {
242         struct ptlrpc_request *req;
243         int rc = 0;
244
245         spin_lock(&src->set_new_req_lock);
246         if (likely(!list_empty(&src->set_new_requests))) {
247                 list_for_each_entry(req, &src->set_new_requests, rq_set_chain)
248                         req->rq_set = des;
249
250                 list_splice_init(&src->set_new_requests,
251                                  &des->set_requests);
252                 rc = atomic_read(&src->set_new_count);
253                 atomic_add(rc, &des->set_remaining);
254                 atomic_set(&src->set_new_count, 0);
255         }
256         spin_unlock(&src->set_new_req_lock);
257         return rc;
258 }
259
260 /**
261  * Requests that are added to the ptlrpcd queue are sent via
262  * ptlrpcd_check->ptlrpc_check_set().
263  */
264 void ptlrpcd_add_req(struct ptlrpc_request *req)
265 {
266         struct ptlrpcd_ctl *pc;
267
268         if (req->rq_reqmsg)
269                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
270
271         spin_lock(&req->rq_lock);
272         if (req->rq_invalid_rqset) {
273                 req->rq_invalid_rqset = 0;
274                 spin_unlock(&req->rq_lock);
275                 if (wait_event_idle_timeout(req->rq_set_waitq,
276                                             req->rq_set == NULL,
277                                             cfs_time_seconds(5)) == 0)
278                         l_wait_event_abortable(req->rq_set_waitq,
279                                                req->rq_set == NULL);
280         } else if (req->rq_set) {
281                 /*
282                  * If we have a vaid "rq_set", just reuse it to avoid double
283                  * linked.
284                  */
285                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
286                 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
287
288                 /* ptlrpc_check_set will decrease the count */
289                 atomic_inc(&req->rq_set->set_remaining);
290                 spin_unlock(&req->rq_lock);
291                 wake_up(&req->rq_set->set_waitq);
292                 return;
293         } else {
294                 spin_unlock(&req->rq_lock);
295         }
296
297         pc = ptlrpcd_select_pc(req);
298
299         DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s+%d]",
300                   req, pc->pc_name, pc->pc_index);
301
302         ptlrpc_set_add_new_req(pc, req);
303 }
304 EXPORT_SYMBOL(ptlrpcd_add_req);
305
306 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
307 {
308         atomic_inc(&set->set_refcount);
309 }
310
311 /**
312  * Check if there is more work to do on ptlrpcd set.
313  * Returns 1 if yes.
314  */
315 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
316 {
317         struct ptlrpc_request *req, *tmp;
318         struct ptlrpc_request_set *set = pc->pc_set;
319         int rc = 0;
320         int rc2;
321
322         ENTRY;
323
324         if (atomic_read(&set->set_new_count)) {
325                 spin_lock(&set->set_new_req_lock);
326                 if (likely(!list_empty(&set->set_new_requests))) {
327                         list_splice_init(&set->set_new_requests,
328                                              &set->set_requests);
329                         atomic_add(atomic_read(&set->set_new_count),
330                                    &set->set_remaining);
331                         atomic_set(&set->set_new_count, 0);
332                         /*
333                          * Need to calculate its timeout.
334                          */
335                         rc = 1;
336                 }
337                 spin_unlock(&set->set_new_req_lock);
338         }
339
340         /*
341          * We should call lu_env_refill() before handling new requests to make
342          * sure that env key the requests depending on really exists.
343          */
344         rc2 = lu_env_refill(env);
345         if (rc2 != 0) {
346                 /*
347                  * XXX This is very awkward situation, because
348                  * execution can neither continue (request
349                  * interpreters assume that env is set up), nor repeat
350                  * the loop (as this potentially results in a tight
351                  * loop of -ENOMEM's).
352                  *
353                  * Fortunately, refill only ever does something when
354                  * new modules are loaded, i.e., early during boot up.
355                  */
356                 CERROR("Failure to refill session: %d\n", rc2);
357                 RETURN(rc);
358         }
359
360         if (atomic_read(&set->set_remaining))
361                 rc |= ptlrpc_check_set(env, set);
362
363         /*
364          * NB: ptlrpc_check_set has already moved complted request at the
365          * head of seq::set_requests
366          */
367         list_for_each_entry_safe(req, tmp, &set->set_requests, rq_set_chain) {
368                 if (req->rq_phase != RQ_PHASE_COMPLETE)
369                         break;
370
371                 list_del_init(&req->rq_set_chain);
372                 req->rq_set = NULL;
373                 ptlrpc_req_finished(req);
374         }
375
376         if (rc == 0) {
377                 /*
378                  * If new requests have been added, make sure to wake up.
379                  */
380                 rc = atomic_read(&set->set_new_count);
381
382                 /*
383                  * If we have nothing to do, check whether we can take some
384                  * work from our partner threads.
385                  */
386                 if (rc == 0 && pc->pc_npartners > 0) {
387                         struct ptlrpcd_ctl *partner;
388                         struct ptlrpc_request_set *ps;
389                         int first = pc->pc_cursor;
390
391                         do {
392                                 partner = pc->pc_partners[pc->pc_cursor++];
393                                 if (pc->pc_cursor >= pc->pc_npartners)
394                                         pc->pc_cursor = 0;
395                                 if (partner == NULL)
396                                         continue;
397
398                                 spin_lock(&partner->pc_lock);
399                                 ps = partner->pc_set;
400                                 if (ps == NULL) {
401                                         spin_unlock(&partner->pc_lock);
402                                         continue;
403                                 }
404
405                                 ptlrpc_reqset_get(ps);
406                                 spin_unlock(&partner->pc_lock);
407
408                                 if (atomic_read(&ps->set_new_count)) {
409                                         rc = ptlrpcd_steal_rqset(set, ps);
410                                         if (rc > 0)
411                                                 CDEBUG(D_RPCTRACE,
412                                                        "transfer %d async RPCs [%d->%d]\n",
413                                                        rc, partner->pc_index,
414                                                        pc->pc_index);
415                                 }
416                                 ptlrpc_reqset_put(ps);
417                         } while (rc == 0 && pc->pc_cursor != first);
418                 }
419         }
420
421         RETURN(rc || test_bit(LIOD_STOP, &pc->pc_flags));
422 }
423
424 /**
425  * Main ptlrpcd thread.
426  * ptlrpc's code paths like to execute in process context, so we have this
427  * thread which spins on a set which contains the rpcs and sends them.
428  */
429 static int ptlrpcd(void *arg)
430 {
431         struct ptlrpcd_ctl              *pc = arg;
432         struct ptlrpc_request_set       *set;
433         struct lu_context               ses = { 0 };
434         struct lu_env                   env = { .le_ses = &ses };
435         int                             rc = 0;
436         int                             exit = 0;
437
438         ENTRY;
439         if (cfs_cpt_bind(cfs_cpt_tab, pc->pc_cpt) != 0)
440                 CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
441
442         /*
443          * Allocate the request set after the thread has been bound
444          * above. This is safe because no requests will be queued
445          * until all ptlrpcd threads have confirmed that they have
446          * successfully started.
447          */
448         set = ptlrpc_prep_set();
449         if (set == NULL)
450                 GOTO(failed, rc = -ENOMEM);
451         spin_lock(&pc->pc_lock);
452         pc->pc_set = set;
453         spin_unlock(&pc->pc_lock);
454
455         /* Both client and server (MDT/OST) may use the environment. */
456         rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD |
457                                           LCT_DT_THREAD |
458                                           LCT_CL_THREAD |
459                                           LCT_REMEMBER  |
460                                           LCT_NOREF);
461         if (rc != 0)
462                 GOTO(failed, rc);
463         rc = lu_context_init(env.le_ses, LCT_SESSION  |
464                                          LCT_REMEMBER |
465                                          LCT_NOREF);
466         if (rc != 0) {
467                 lu_context_fini(&env.le_ctx);
468                 GOTO(failed, rc);
469         }
470
471         complete(&pc->pc_starting);
472
473         /*
474          * This mainloop strongly resembles ptlrpc_set_wait() except that our
475          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
476          * there are requests in the set. New requests come in on the set's
477          * new_req_list and ptlrpcd_check() moves them into the set.
478          */
479         do {
480                 time64_t timeout;
481
482                 timeout = ptlrpc_set_next_timeout(set);
483
484                 lu_context_enter(&env.le_ctx);
485                 lu_context_enter(env.le_ses);
486                 if (timeout == 0)
487                         wait_event_idle(set->set_waitq,
488                                         ptlrpcd_check(&env, pc));
489                 else if (wait_event_idle_timeout(set->set_waitq,
490                                                  ptlrpcd_check(&env, pc),
491                                                  cfs_time_seconds(timeout))
492                          == 0)
493                         ptlrpc_expired_set(set);
494                 lu_context_exit(&env.le_ctx);
495                 lu_context_exit(env.le_ses);
496
497                 /*
498                  * Abort inflight rpcs for forced stop case.
499                  */
500                 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
501                         if (test_bit(LIOD_FORCE, &pc->pc_flags))
502                                 ptlrpc_abort_set(set);
503                         exit++;
504                 }
505
506                 /*
507                  * Let's make one more loop to make sure that ptlrpcd_check()
508                  * copied all raced new rpcs into the set so we can kill them.
509                  */
510         } while (exit < 2);
511
512         /*
513          * Wait for inflight requests to drain.
514          */
515         if (!list_empty(&set->set_requests))
516                 ptlrpc_set_wait(&env, set);
517         lu_context_fini(&env.le_ctx);
518         lu_context_fini(env.le_ses);
519
520         complete(&pc->pc_finishing);
521
522         return 0;
523
524 failed:
525         pc->pc_error = rc;
526         complete(&pc->pc_starting);
527         RETURN(rc);
528 }
529
530 static void ptlrpcd_ctl_init(struct ptlrpcd_ctl *pc, int index, int cpt)
531 {
532         ENTRY;
533
534         pc->pc_index = index;
535         pc->pc_cpt = cpt;
536         init_completion(&pc->pc_starting);
537         init_completion(&pc->pc_finishing);
538         spin_lock_init(&pc->pc_lock);
539
540         if (index < 0) {
541                 /* Recovery thread. */
542                 snprintf(pc->pc_name, sizeof(pc->pc_name), "ptlrpcd_rcv");
543         } else {
544                 /* Regular thread. */
545                 snprintf(pc->pc_name, sizeof(pc->pc_name),
546                          "ptlrpcd_%02d_%02d", cpt, index);
547         }
548
549         EXIT;
550 }
551
552 /* XXX: We want multiple CPU cores to share the async RPC load. So we
553  *      start many ptlrpcd threads. We also want to reduce the ptlrpcd
554  *      overhead caused by data transfer cross-CPU cores. So we bind
555  *      all ptlrpcd threads to a CPT, in the expectation that CPTs
556  *      will be defined in a way that matches these boundaries. Within
557  *      a CPT a ptlrpcd thread can be scheduled on any available core.
558  *
559  *      Each ptlrpcd thread has its own request queue. This can cause
560  *      response delay if the thread is already busy. To help with
561  *      this we define partner threads: these are other threads bound
562  *      to the same CPT which will check for work in each other's
563  *      request queues if they have no work to do.
564  *
565  *      The desired number of partner threads can be tuned by setting
566  *      ptlrpcd_partner_group_size. The default is to create pairs of
567  *      partner threads.
568  */
569 static int ptlrpcd_partners(struct ptlrpcd *pd, int index)
570 {
571         struct ptlrpcd_ctl      *pc;
572         struct ptlrpcd_ctl      **ppc;
573         int                     first;
574         int                     i;
575         int                     rc = 0;
576
577         ENTRY;
578
579         LASSERT(index >= 0 && index < pd->pd_nthreads);
580         pc = &pd->pd_threads[index];
581         pc->pc_npartners = pd->pd_groupsize - 1;
582
583         if (pc->pc_npartners <= 0)
584                 GOTO(out, rc);
585
586         OBD_CPT_ALLOC(pc->pc_partners, cfs_cpt_tab, pc->pc_cpt,
587                       sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
588         if (pc->pc_partners == NULL) {
589                 pc->pc_npartners = 0;
590                 GOTO(out, rc = -ENOMEM);
591         }
592
593         first = index - index % pd->pd_groupsize;
594         ppc = pc->pc_partners;
595         for (i = first; i < first + pd->pd_groupsize; i++) {
596                 if (i != index)
597                         *ppc++ = &pd->pd_threads[i];
598         }
599 out:
600         RETURN(rc);
601 }
602
603 int ptlrpcd_start(struct ptlrpcd_ctl *pc)
604 {
605         struct task_struct      *task;
606         int                     rc = 0;
607
608         ENTRY;
609
610         /*
611          * Do not allow starting a second thread for one pc.
612          */
613         if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
614                 CWARN("Starting second thread (%s) for same pc %p\n",
615                       pc->pc_name, pc);
616                 RETURN(0);
617         }
618
619         task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
620         if (IS_ERR(task))
621                 GOTO(out_set, rc = PTR_ERR(task));
622
623         wait_for_completion(&pc->pc_starting);
624         rc = pc->pc_error;
625         if (rc != 0)
626                 GOTO(out_set, rc);
627
628         RETURN(0);
629
630 out_set:
631         if (pc->pc_set != NULL) {
632                 struct ptlrpc_request_set *set = pc->pc_set;
633
634                 spin_lock(&pc->pc_lock);
635                 pc->pc_set = NULL;
636                 spin_unlock(&pc->pc_lock);
637                 ptlrpc_set_destroy(set);
638         }
639         clear_bit(LIOD_START, &pc->pc_flags);
640         RETURN(rc);
641 }
642
643 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
644 {
645         ENTRY;
646
647         if (!test_bit(LIOD_START, &pc->pc_flags)) {
648                 CWARN("Thread for pc %p was not started\n", pc);
649                 goto out;
650         }
651
652         set_bit(LIOD_STOP, &pc->pc_flags);
653         if (force)
654                 set_bit(LIOD_FORCE, &pc->pc_flags);
655         wake_up(&pc->pc_set->set_waitq);
656
657 out:
658         EXIT;
659 }
660
661 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
662 {
663         struct ptlrpc_request_set *set = pc->pc_set;
664
665         ENTRY;
666
667         if (!test_bit(LIOD_START, &pc->pc_flags)) {
668                 CWARN("Thread for pc %p was not started\n", pc);
669                 goto out;
670         }
671
672         wait_for_completion(&pc->pc_finishing);
673
674         spin_lock(&pc->pc_lock);
675         pc->pc_set = NULL;
676         spin_unlock(&pc->pc_lock);
677         ptlrpc_set_destroy(set);
678
679         clear_bit(LIOD_START, &pc->pc_flags);
680         clear_bit(LIOD_STOP, &pc->pc_flags);
681         clear_bit(LIOD_FORCE, &pc->pc_flags);
682
683 out:
684         if (pc->pc_npartners > 0) {
685                 LASSERT(pc->pc_partners != NULL);
686
687                 OBD_FREE_PTR_ARRAY(pc->pc_partners, pc->pc_npartners);
688                 pc->pc_partners = NULL;
689         }
690         pc->pc_npartners = 0;
691         pc->pc_error = 0;
692         EXIT;
693 }
694
695 static void ptlrpcd_fini(void)
696 {
697         int     i;
698         int     j;
699         int     ncpts;
700
701         ENTRY;
702
703         if (ptlrpcds != NULL) {
704                 for (i = 0; i < ptlrpcds_num; i++) {
705                         if (ptlrpcds[i] == NULL)
706                                 break;
707                         for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
708                                 ptlrpcd_stop(&ptlrpcds[i]->pd_threads[j], 0);
709                         for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
710                                 ptlrpcd_free(&ptlrpcds[i]->pd_threads[j]);
711                         OBD_FREE(ptlrpcds[i], ptlrpcds[i]->pd_size);
712                         ptlrpcds[i] = NULL;
713                 }
714                 OBD_FREE_PTR_ARRAY(ptlrpcds, ptlrpcds_num);
715         }
716         ptlrpcds_num = 0;
717
718         ptlrpcd_stop(&ptlrpcd_rcv, 0);
719         ptlrpcd_free(&ptlrpcd_rcv);
720
721         if (ptlrpcds_cpt_idx != NULL) {
722                 ncpts = cfs_cpt_number(cfs_cpt_tab);
723                 OBD_FREE_PTR_ARRAY(ptlrpcds_cpt_idx, ncpts);
724                 ptlrpcds_cpt_idx = NULL;
725         }
726
727         EXIT;
728 }
729
730 static int ptlrpcd_init(void)
731 {
732         int                     nthreads;
733         int                     groupsize;
734         int                     size;
735         int                     i;
736         int                     j;
737         int                     rc = 0;
738         struct cfs_cpt_table    *cptable;
739         __u32                   *cpts = NULL;
740         int                     ncpts;
741         int                     cpt;
742         struct ptlrpcd          *pd;
743
744         ENTRY;
745
746         /*
747          * Determine the CPTs that ptlrpcd threads will run on.
748          */
749         cptable = cfs_cpt_tab;
750         ncpts = cfs_cpt_number(cptable);
751         if (ptlrpcd_cpts != NULL) {
752                 struct cfs_expr_list *el;
753
754                 size = ncpts * sizeof(ptlrpcds_cpt_idx[0]);
755                 OBD_ALLOC(ptlrpcds_cpt_idx, size);
756                 if (ptlrpcds_cpt_idx == NULL)
757                         GOTO(out, rc = -ENOMEM);
758
759                 rc = cfs_expr_list_parse(ptlrpcd_cpts,
760                                          strlen(ptlrpcd_cpts),
761                                          0, ncpts - 1, &el);
762                 if (rc != 0) {
763                         CERROR("%s: invalid CPT pattern string: %s",
764                                "ptlrpcd_cpts", ptlrpcd_cpts);
765                         GOTO(out, rc = -EINVAL);
766                 }
767
768                 rc = cfs_expr_list_values(el, ncpts, &cpts);
769                 cfs_expr_list_free(el);
770                 if (rc <= 0) {
771                         CERROR("%s: failed to parse CPT array %s: %d\n",
772                                "ptlrpcd_cpts", ptlrpcd_cpts, rc);
773                         if (rc == 0)
774                                 rc = -EINVAL;
775                         GOTO(out, rc);
776                 }
777
778                 /*
779                  * Create the cpt-to-index map. When there is no match
780                  * in the cpt table, pick a cpt at random. This could
781                  * be changed to take the topology of the system into
782                  * account.
783                  */
784                 for (cpt = 0; cpt < ncpts; cpt++) {
785                         for (i = 0; i < rc; i++)
786                                 if (cpts[i] == cpt)
787                                         break;
788                         if (i >= rc)
789                                 i = cpt % rc;
790                         ptlrpcds_cpt_idx[cpt] = i;
791                 }
792
793                 cfs_expr_list_values_free(cpts, rc);
794                 ncpts = rc;
795         }
796         ptlrpcds_num = ncpts;
797
798         size = ncpts * sizeof(ptlrpcds[0]);
799         OBD_ALLOC(ptlrpcds, size);
800         if (ptlrpcds == NULL)
801                 GOTO(out, rc = -ENOMEM);
802
803         /*
804          * The max_ptlrpcds parameter is obsolete, but do something
805          * sane if it has been tuned, and complain if
806          * ptlrpcd_per_cpt_max has also been tuned.
807          */
808         if (max_ptlrpcds != 0) {
809                 CWARN("max_ptlrpcds is obsolete.\n");
810                 if (ptlrpcd_per_cpt_max == 0) {
811                         ptlrpcd_per_cpt_max = max_ptlrpcds / ncpts;
812                         /* Round up if there is a remainder. */
813                         if (max_ptlrpcds % ncpts != 0)
814                                 ptlrpcd_per_cpt_max++;
815                         CWARN("Setting ptlrpcd_per_cpt_max = %d\n",
816                               ptlrpcd_per_cpt_max);
817                 } else {
818                         CWARN("ptlrpd_per_cpt_max is also set!\n");
819                 }
820         }
821
822         /*
823          * The ptlrpcd_bind_policy parameter is obsolete, but do
824          * something sane if it has been tuned, and complain if
825          * ptlrpcd_partner_group_size is also tuned.
826          */
827         if (ptlrpcd_bind_policy != 0) {
828                 CWARN("ptlrpcd_bind_policy is obsolete.\n");
829                 if (ptlrpcd_partner_group_size == 0) {
830                         switch (ptlrpcd_bind_policy) {
831                         case 1: /* PDB_POLICY_NONE */
832                         case 2: /* PDB_POLICY_FULL */
833                                 ptlrpcd_partner_group_size = 1;
834                                 break;
835                         case 3: /* PDB_POLICY_PAIR */
836                                 ptlrpcd_partner_group_size = 2;
837                                 break;
838                         case 4: /* PDB_POLICY_NEIGHBOR */
839 #ifdef CONFIG_NUMA
840                                 ptlrpcd_partner_group_size = -1; /* CPT */
841 #else
842                                 ptlrpcd_partner_group_size = 3; /* Triplets */
843 #endif
844                                 break;
845                         default: /* Illegal value, use the default. */
846                                 ptlrpcd_partner_group_size = 2;
847                                 break;
848                         }
849                         CWARN("Setting ptlrpcd_partner_group_size = %d\n",
850                               ptlrpcd_partner_group_size);
851                 } else {
852                         CWARN("ptlrpcd_partner_group_size is also set!\n");
853                 }
854         }
855
856         if (ptlrpcd_partner_group_size == 0)
857                 ptlrpcd_partner_group_size = 2;
858         else if (ptlrpcd_partner_group_size < 0)
859                 ptlrpcd_partner_group_size = -1;
860         else if (ptlrpcd_per_cpt_max > 0 &&
861                  ptlrpcd_partner_group_size > ptlrpcd_per_cpt_max)
862                 ptlrpcd_partner_group_size = ptlrpcd_per_cpt_max;
863
864         /*
865          * Start the recovery thread first.
866          */
867         set_bit(LIOD_RECOVERY, &ptlrpcd_rcv.pc_flags);
868         ptlrpcd_ctl_init(&ptlrpcd_rcv, -1, CFS_CPT_ANY);
869         rc = ptlrpcd_start(&ptlrpcd_rcv);
870         if (rc < 0)
871                 GOTO(out, rc);
872
873         for (i = 0; i < ncpts; i++) {
874                 if (cpts == NULL)
875                         cpt = i;
876                 else
877                         cpt = cpts[i];
878
879                 nthreads = cfs_cpt_weight(cptable, cpt);
880                 if (ptlrpcd_per_cpt_max > 0 && ptlrpcd_per_cpt_max < nthreads)
881                         nthreads = ptlrpcd_per_cpt_max;
882                 if (nthreads < 2)
883                         nthreads = 2;
884
885                 if (ptlrpcd_partner_group_size <= 0) {
886                         groupsize = nthreads;
887                 } else if (nthreads <= ptlrpcd_partner_group_size) {
888                         groupsize = nthreads;
889                 } else {
890                         groupsize = ptlrpcd_partner_group_size;
891                         if (nthreads % groupsize != 0)
892                                 nthreads += groupsize - (nthreads % groupsize);
893                 }
894
895                 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
896                 OBD_CPT_ALLOC(pd, cptable, cpt, size);
897
898                 if (!pd)
899                         GOTO(out, rc = -ENOMEM);
900                 pd->pd_size      = size;
901                 pd->pd_index     = i;
902                 pd->pd_cpt       = cpt;
903                 pd->pd_cursor    = 0;
904                 pd->pd_nthreads  = nthreads;
905                 pd->pd_groupsize = groupsize;
906                 ptlrpcds[i] = pd;
907
908                 /*
909                  * The ptlrpcd threads in a partner group can access
910                  * each other's struct ptlrpcd_ctl, so these must be
911                  * initialized before any thead is started.
912                  */
913                 for (j = 0; j < nthreads; j++) {
914                         ptlrpcd_ctl_init(&pd->pd_threads[j], j, cpt);
915                         rc = ptlrpcd_partners(pd, j);
916                         if (rc < 0)
917                                 GOTO(out, rc);
918                 }
919
920                 /* XXX: We start nthreads ptlrpc daemons on this cpt.
921                  *      Each of them can process any non-recovery
922                  *      async RPC to improve overall async RPC
923                  *      efficiency.
924                  *
925                  *      But there are some issues with async I/O RPCs
926                  *      and async non-I/O RPCs processed in the same
927                  *      set under some cases. The ptlrpcd may be
928                  *      blocked by some async I/O RPC(s), then will
929                  *      cause other async non-I/O RPC(s) can not be
930                  *      processed in time.
931                  *
932                  *      Maybe we should distinguish blocked async RPCs
933                  *      from non-blocked async RPCs, and process them
934                  *      in different ptlrpcd sets to avoid unnecessary
935                  *      dependency. But how to distribute async RPCs
936                  *      load among all the ptlrpc daemons becomes
937                  *      another trouble.
938                  */
939                 for (j = 0; j < nthreads; j++) {
940                         rc = ptlrpcd_start(&pd->pd_threads[j]);
941                         if (rc < 0)
942                                 GOTO(out, rc);
943                 }
944         }
945 out:
946         if (rc != 0)
947                 ptlrpcd_fini();
948
949         RETURN(rc);
950 }
951
952 int ptlrpcd_addref(void)
953 {
954         int rc = 0;
955
956         ENTRY;
957
958         mutex_lock(&ptlrpcd_mutex);
959         if (++ptlrpcd_users == 1) {
960                 rc = ptlrpcd_init();
961                 if (rc < 0)
962                         ptlrpcd_users--;
963         }
964         mutex_unlock(&ptlrpcd_mutex);
965         RETURN(rc);
966 }
967 EXPORT_SYMBOL(ptlrpcd_addref);
968
969 void ptlrpcd_decref(void)
970 {
971         mutex_lock(&ptlrpcd_mutex);
972         if (--ptlrpcd_users == 0)
973                 ptlrpcd_fini();
974         mutex_unlock(&ptlrpcd_mutex);
975 }
976 EXPORT_SYMBOL(ptlrpcd_decref);
977 /** @} ptlrpcd */