Whamcloud - gitweb
LU-2675 build: assume __linux__ and __KERNEL__
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38
39 /** \defgroup ptlrpcd PortalRPC daemon
40  *
41  * ptlrpcd is a special thread with its own set where other user might add
42  * requests when they don't want to wait for their completion.
43  * PtlRPCD will take care of sending such requests and then processing their
44  * replies and calling completion callbacks as necessary.
45  * The callbacks are called directly from ptlrpcd context.
46  * It is important to never significantly block (esp. on RPCs!) within such
47  * completion handler or a deadlock might occur where ptlrpcd enters some
48  * callback that attempts to send another RPC and wait for it to return,
49  * during which time ptlrpcd is completely blocked, so e.g. if import
50  * fails, recovery cannot progress because connection requests are also
51  * sent by ptlrpcd.
52  *
53  * @{
54  */
55
56 #define DEBUG_SUBSYSTEM S_RPC
57
58 #include <libcfs/libcfs.h>
59 #include <lustre_net.h>
60 #include <lustre_lib.h>
61 #include <lustre_ha.h>
62 #include <obd_class.h>   /* for obd_zombie */
63 #include <obd_support.h> /* for OBD_FAIL_CHECK */
64 #include <cl_object.h> /* cl_env_{get,put}() */
65 #include <lprocfs_status.h>
66
67 #include "ptlrpc_internal.h"
68
69 struct ptlrpcd {
70         int                pd_size;
71         int                pd_index;
72         int                pd_nthreads;
73         struct ptlrpcd_ctl pd_thread_rcv;
74         struct ptlrpcd_ctl pd_threads[0];
75 };
76
77 static int max_ptlrpcds;
78 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
79                 "Max ptlrpcd thread count to be started.");
80
81 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
82 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
83                 "Ptlrpcd threads binding mode.");
84 static struct ptlrpcd *ptlrpcds;
85
86 struct mutex ptlrpcd_mutex;
87 static int ptlrpcd_users = 0;
88
89 void ptlrpcd_wake(struct ptlrpc_request *req)
90 {
91         struct ptlrpc_request_set *rq_set = req->rq_set;
92
93         LASSERT(rq_set != NULL);
94
95         wake_up(&rq_set->set_waitq);
96 }
97 EXPORT_SYMBOL(ptlrpcd_wake);
98
99 static struct ptlrpcd_ctl *
100 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
101 {
102         int idx = 0;
103
104         if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
105                 return &ptlrpcds->pd_thread_rcv;
106
107         switch (policy) {
108         case PDL_POLICY_SAME:
109                 idx = smp_processor_id() % ptlrpcds->pd_nthreads;
110                 break;
111         case PDL_POLICY_LOCAL:
112                 /* Before CPU partition patches available, process it the same
113                  * as "PDL_POLICY_ROUND". */
114 # ifdef CFS_CPU_MODE_NUMA
115 # warning "fix this code to use new CPU partition APIs"
116 # endif
117                 /* Fall through to PDL_POLICY_ROUND until the CPU
118                  * CPU partition patches are available. */
119                 index = -1;
120         case PDL_POLICY_PREFERRED:
121                 if (index >= 0 && index < num_online_cpus()) {
122                         idx = index % ptlrpcds->pd_nthreads;
123                         break;
124                 }
125                 /* Fall through to PDL_POLICY_ROUND for bad index. */
126         default:
127                 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
128         case PDL_POLICY_ROUND:
129                 /* We do not care whether it is strict load balance. */
130                 idx = ptlrpcds->pd_index + 1;
131                 if (idx == smp_processor_id())
132                         idx++;
133                 idx %= ptlrpcds->pd_nthreads;
134                 ptlrpcds->pd_index = idx;
135                 break;
136         }
137
138         return &ptlrpcds->pd_threads[idx];
139 }
140
141 /**
142  * Move all request from an existing request set to the ptlrpcd queue.
143  * All requests from the set must be in phase RQ_PHASE_NEW.
144  */
145 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
146 {
147         struct list_head *tmp, *pos;
148         struct ptlrpcd_ctl *pc;
149         struct ptlrpc_request_set *new;
150         int count, i;
151
152         pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
153         new = pc->pc_set;
154
155         list_for_each_safe(pos, tmp, &set->set_requests) {
156                 struct ptlrpc_request *req =
157                         list_entry(pos, struct ptlrpc_request,
158                                    rq_set_chain);
159
160                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
161                 req->rq_set = new;
162                 req->rq_queued_time = cfs_time_current();
163         }
164
165         spin_lock(&new->set_new_req_lock);
166         list_splice_init(&set->set_requests, &new->set_new_requests);
167         i = atomic_read(&set->set_remaining);
168         count = atomic_add_return(i, &new->set_new_count);
169         atomic_set(&set->set_remaining, 0);
170         spin_unlock(&new->set_new_req_lock);
171         if (count == i) {
172                 wake_up(&new->set_waitq);
173
174                 /* XXX: It maybe unnecessary to wakeup all the partners. But to
175                  *      guarantee the async RPC can be processed ASAP, we have
176                  *      no other better choice. It maybe fixed in future. */
177                 for (i = 0; i < pc->pc_npartners; i++)
178                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
179         }
180 }
181 EXPORT_SYMBOL(ptlrpcd_add_rqset);
182
183 /**
184  * Return transferred RPCs count.
185  */
186 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
187                                struct ptlrpc_request_set *src)
188 {
189         struct list_head *tmp, *pos;
190         struct ptlrpc_request *req;
191         int rc = 0;
192
193         spin_lock(&src->set_new_req_lock);
194         if (likely(!list_empty(&src->set_new_requests))) {
195                 list_for_each_safe(pos, tmp, &src->set_new_requests) {
196                         req = list_entry(pos, struct ptlrpc_request,
197                                          rq_set_chain);
198                         req->rq_set = des;
199                 }
200                 list_splice_init(&src->set_new_requests,
201                                  &des->set_requests);
202                 rc = atomic_read(&src->set_new_count);
203                 atomic_add(rc, &des->set_remaining);
204                 atomic_set(&src->set_new_count, 0);
205         }
206         spin_unlock(&src->set_new_req_lock);
207         return rc;
208 }
209
210 /**
211  * Requests that are added to the ptlrpcd queue are sent via
212  * ptlrpcd_check->ptlrpc_check_set().
213  */
214 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
215 {
216         struct ptlrpcd_ctl *pc;
217
218         if (req->rq_reqmsg)
219                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
220
221         spin_lock(&req->rq_lock);
222         if (req->rq_invalid_rqset) {
223                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
224                                                      back_to_sleep, NULL);
225
226                 req->rq_invalid_rqset = 0;
227                 spin_unlock(&req->rq_lock);
228                 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
229         } else if (req->rq_set) {
230                 /* If we have a vaid "rq_set", just reuse it to avoid double
231                  * linked. */
232                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
233                 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
234
235                 /* ptlrpc_check_set will decrease the count */
236                 atomic_inc(&req->rq_set->set_remaining);
237                 spin_unlock(&req->rq_lock);
238                 wake_up(&req->rq_set->set_waitq);
239                 return;
240         } else {
241                 spin_unlock(&req->rq_lock);
242         }
243
244         pc = ptlrpcd_select_pc(req, policy, idx);
245
246         DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
247                   req, pc->pc_name, pc->pc_index);
248
249         ptlrpc_set_add_new_req(pc, req);
250 }
251 EXPORT_SYMBOL(ptlrpcd_add_req);
252
253 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
254 {
255         atomic_inc(&set->set_refcount);
256 }
257
258 /**
259  * Check if there is more work to do on ptlrpcd set.
260  * Returns 1 if yes.
261  */
262 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
263 {
264         struct list_head *tmp, *pos;
265         struct ptlrpc_request *req;
266         struct ptlrpc_request_set *set = pc->pc_set;
267         int rc = 0;
268         int rc2;
269         ENTRY;
270
271         if (atomic_read(&set->set_new_count)) {
272                 spin_lock(&set->set_new_req_lock);
273                 if (likely(!list_empty(&set->set_new_requests))) {
274                         list_splice_init(&set->set_new_requests,
275                                              &set->set_requests);
276                         atomic_add(atomic_read(&set->set_new_count),
277                                    &set->set_remaining);
278                         atomic_set(&set->set_new_count, 0);
279                         /*
280                          * Need to calculate its timeout.
281                          */
282                         rc = 1;
283                 }
284                 spin_unlock(&set->set_new_req_lock);
285         }
286
287         /* We should call lu_env_refill() before handling new requests to make
288          * sure that env key the requests depending on really exists.
289          */
290         rc2 = lu_env_refill(env);
291         if (rc2 != 0) {
292                 /*
293                  * XXX This is very awkward situation, because
294                  * execution can neither continue (request
295                  * interpreters assume that env is set up), nor repeat
296                  * the loop (as this potentially results in a tight
297                  * loop of -ENOMEM's).
298                  *
299                  * Fortunately, refill only ever does something when
300                  * new modules are loaded, i.e., early during boot up.
301                  */
302                 CERROR("Failure to refill session: %d\n", rc2);
303                 RETURN(rc);
304         }
305
306         if (atomic_read(&set->set_remaining))
307                 rc |= ptlrpc_check_set(env, set);
308
309         if (!list_empty(&set->set_requests)) {
310                 /*
311                  * XXX: our set never completes, so we prune the completed
312                  * reqs after each iteration. boy could this be smarter.
313                  */
314                 list_for_each_safe(pos, tmp, &set->set_requests) {
315                         req = list_entry(pos, struct ptlrpc_request,
316                                          rq_set_chain);
317                         if (req->rq_phase != RQ_PHASE_COMPLETE)
318                                 continue;
319
320                         list_del_init(&req->rq_set_chain);
321                         req->rq_set = NULL;
322                         ptlrpc_req_finished(req);
323                 }
324         }
325
326         if (rc == 0) {
327                 /*
328                  * If new requests have been added, make sure to wake up.
329                  */
330                 rc = atomic_read(&set->set_new_count);
331
332                 /* If we have nothing to do, check whether we can take some
333                  * work from our partner threads. */
334                 if (rc == 0 && pc->pc_npartners > 0) {
335                         struct ptlrpcd_ctl *partner;
336                         struct ptlrpc_request_set *ps;
337                         int first = pc->pc_cursor;
338
339                         do {
340                                 partner = pc->pc_partners[pc->pc_cursor++];
341                                 if (pc->pc_cursor >= pc->pc_npartners)
342                                         pc->pc_cursor = 0;
343                                 if (partner == NULL)
344                                         continue;
345
346                                 spin_lock(&partner->pc_lock);
347                                 ps = partner->pc_set;
348                                 if (ps == NULL) {
349                                         spin_unlock(&partner->pc_lock);
350                                         continue;
351                                 }
352
353                                 ptlrpc_reqset_get(ps);
354                                 spin_unlock(&partner->pc_lock);
355
356                                 if (atomic_read(&ps->set_new_count)) {
357                                         rc = ptlrpcd_steal_rqset(set, ps);
358                                         if (rc > 0)
359                                                 CDEBUG(D_RPCTRACE, "transfer %d"
360                                                        " async RPCs [%d->%d]\n",
361                                                        rc, partner->pc_index,
362                                                        pc->pc_index);
363                                 }
364                                 ptlrpc_reqset_put(ps);
365                         } while (rc == 0 && pc->pc_cursor != first);
366                 }
367         }
368
369         RETURN(rc);
370 }
371
372 /**
373  * Main ptlrpcd thread.
374  * ptlrpc's code paths like to execute in process context, so we have this
375  * thread which spins on a set which contains the rpcs and sends them.
376  *
377  */
378 static int ptlrpcd(void *arg)
379 {
380         struct ptlrpcd_ctl *pc = arg;
381         struct ptlrpc_request_set *set = pc->pc_set;
382         struct lu_context ses = { 0 };
383         struct lu_env env = { .le_ses = &ses };
384         int rc, exit = 0;
385         ENTRY;
386
387         unshare_fs_struct();
388 #if defined(CONFIG_SMP)
389         if (test_bit(LIOD_BIND, &pc->pc_flags)) {
390                 int index = pc->pc_index;
391
392                 if (index >= 0 && index < num_possible_cpus()) {
393                         while (!cpu_online(index)) {
394                                 if (++index >= num_possible_cpus())
395                                         index = 0;
396                         }
397                         set_cpus_allowed_ptr(current,
398                                      cpumask_of_node(cpu_to_node(index)));
399                 }
400         }
401 #endif
402         /* Both client and server (MDT/OST) may use the environment. */
403         rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD | LCT_DT_THREAD |
404                                           LCT_CL_THREAD | LCT_REMEMBER |
405                                           LCT_NOREF);
406         if (rc == 0) {
407                 rc = lu_context_init(env.le_ses,
408                                      LCT_SESSION|LCT_REMEMBER|LCT_NOREF);
409                 if (rc != 0)
410                         lu_context_fini(&env.le_ctx);
411         }
412         complete(&pc->pc_starting);
413
414         if (rc != 0)
415                 RETURN(rc);
416
417         /*
418          * This mainloop strongly resembles ptlrpc_set_wait() except that our
419          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
420          * there are requests in the set. New requests come in on the set's
421          * new_req_list and ptlrpcd_check() moves them into the set.
422          */
423         do {
424                 struct l_wait_info lwi;
425                 int timeout;
426
427                 timeout = ptlrpc_set_next_timeout(set);
428                 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
429                                   ptlrpc_expired_set, set);
430
431                 lu_context_enter(&env.le_ctx);
432                 lu_context_enter(env.le_ses);
433                 l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
434                 lu_context_exit(&env.le_ctx);
435                 lu_context_exit(env.le_ses);
436
437                 /*
438                  * Abort inflight rpcs for forced stop case.
439                  */
440                 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
441                         if (test_bit(LIOD_FORCE, &pc->pc_flags))
442                                 ptlrpc_abort_set(set);
443                         exit++;
444                 }
445
446                 /*
447                  * Let's make one more loop to make sure that ptlrpcd_check()
448                  * copied all raced new rpcs into the set so we can kill them.
449                  */
450         } while (exit < 2);
451
452         /*
453          * Wait for inflight requests to drain.
454          */
455         if (!list_empty(&set->set_requests))
456                 ptlrpc_set_wait(set);
457         lu_context_fini(&env.le_ctx);
458         lu_context_fini(env.le_ses);
459
460         complete(&pc->pc_finishing);
461
462         return 0;
463 }
464
465 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
466  *      ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
467  *      data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
468  *      CPU core. But binding all ptlrpcd threads maybe cause response delay
469  *      because of some CPU core(s) busy with other loads.
470  *
471  *      For example: "ls -l", some async RPCs for statahead are assigned to
472  *      ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
473  *      with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
474  *      thread, statahead thread, and ptlrpcd thread can run in parallel), under
475  *      such case, the statahead async RPCs can not be processed in time, it is
476  *      unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
477  *      be better. But it breaks former data transfer policy.
478  *
479  *      So we shouldn't be blind for avoiding the data transfer. We make some
480  *      compromise: divide the ptlrpcd threds pool into two parts. One part is
481  *      for bound mode, each ptlrpcd thread in this part is bound to some CPU
482  *      core. The other part is for free mode, all the ptlrpcd threads in the
483  *      part can be scheduled on any CPU core. We specify some partnership
484  *      between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
485  *      and the async RPC load within the partners are shared.
486  *
487  *      It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
488  *      thread can be scheduled in time), and try to guarantee the async RPC
489  *      processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
490  *      on any CPU core).
491  *
492  *      As for how to specify the partnership between bound mode ptlrpcd
493  *      thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
494  *      <free bound> pair. In future, we can specify some more complex
495  *      partnership based on the patches for CPU partition. But before such
496  *      patches are available, we prefer to use the simplest one.
497  */
498 # ifdef CFS_CPU_MODE_NUMA
499 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
500 # endif
501 static int ptlrpcd_bind(int index, int max)
502 {
503         struct ptlrpcd_ctl *pc;
504         int rc = 0;
505 #if defined(CONFIG_NUMA)
506         cpumask_t mask;
507 #endif
508         ENTRY;
509
510         LASSERT(index <= max - 1);
511         pc = &ptlrpcds->pd_threads[index];
512         switch (ptlrpcd_bind_policy) {
513         case PDB_POLICY_NONE:
514                 pc->pc_npartners = -1;
515                 break;
516         case PDB_POLICY_FULL:
517                 pc->pc_npartners = 0;
518                 set_bit(LIOD_BIND, &pc->pc_flags);
519                 break;
520         case PDB_POLICY_PAIR:
521                 LASSERT(max % 2 == 0);
522                 pc->pc_npartners = 1;
523                 break;
524         case PDB_POLICY_NEIGHBOR:
525 #if defined(CONFIG_NUMA)
526         {
527                 int i;
528                 mask = *cpumask_of_node(cpu_to_node(index));
529                 for (i = max; i < num_online_cpus(); i++)
530                         cpu_clear(i, mask);
531                 pc->pc_npartners = cpus_weight(mask) - 1;
532                 set_bit(LIOD_BIND, &pc->pc_flags);
533         }
534 #else
535                 LASSERT(max >= 3);
536                 pc->pc_npartners = 2;
537 #endif
538                 break;
539         default:
540                 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
541                 rc = -EINVAL;
542         }
543
544         if (rc == 0 && pc->pc_npartners > 0) {
545                 OBD_ALLOC(pc->pc_partners,
546                           sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
547                 if (pc->pc_partners == NULL) {
548                         pc->pc_npartners = 0;
549                         rc = -ENOMEM;
550                 } else {
551                         switch (ptlrpcd_bind_policy) {
552                         case PDB_POLICY_PAIR:
553                                 if (index & 0x1) {
554                                         set_bit(LIOD_BIND, &pc->pc_flags);
555                                         pc->pc_partners[0] = &ptlrpcds->
556                                                 pd_threads[index - 1];
557                                         ptlrpcds->pd_threads[index - 1].
558                                                 pc_partners[0] = pc;
559                                 }
560                                 break;
561                         case PDB_POLICY_NEIGHBOR:
562 #if defined(CONFIG_NUMA)
563                         {
564                                 struct ptlrpcd_ctl *ppc;
565                                 int i, pidx;
566                                 /* partners are cores in the same NUMA node.
567                                  * setup partnership only with ptlrpcd threads
568                                  * that are already initialized
569                                  */
570                                 for (pidx = 0, i = 0; i < index; i++) {
571                                         if (cpu_isset(i, mask)) {
572                                                 ppc = &ptlrpcds->pd_threads[i];
573                                                 pc->pc_partners[pidx++] = ppc;
574                                                 ppc->pc_partners[ppc->
575                                                           pc_npartners++] = pc;
576                                         }
577                                 }
578                                 /* adjust number of partners to the number
579                                  * of partnership really setup */
580                                 pc->pc_npartners = pidx;
581                         }
582 #else
583                                 if (index & 0x1)
584                                         set_bit(LIOD_BIND, &pc->pc_flags);
585                                 if (index > 0) {
586                                         pc->pc_partners[0] = &ptlrpcds->
587                                                 pd_threads[index - 1];
588                                         ptlrpcds->pd_threads[index - 1].
589                                                 pc_partners[1] = pc;
590                                         if (index == max - 1) {
591                                                 pc->pc_partners[1] =
592                                                 &ptlrpcds->pd_threads[0];
593                                                 ptlrpcds->pd_threads[0].
594                                                 pc_partners[0] = pc;
595                                         }
596                                 }
597 #endif
598                                 break;
599                         }
600                 }
601         }
602
603         RETURN(rc);
604 }
605
606
607 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
608 {
609         int rc;
610         ENTRY;
611
612         /*
613          * Do not allow start second thread for one pc.
614          */
615         if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
616                 CWARN("Starting second thread (%s) for same pc %p\n",
617                       name, pc);
618                 RETURN(0);
619         }
620
621         pc->pc_index = index;
622         init_completion(&pc->pc_starting);
623         init_completion(&pc->pc_finishing);
624         spin_lock_init(&pc->pc_lock);
625         strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
626         pc->pc_set = ptlrpc_prep_set();
627         if (pc->pc_set == NULL)
628                 GOTO(out, rc = -ENOMEM);
629
630         /*
631          * So far only "client" ptlrpcd uses an environment. In the future,
632          * ptlrpcd thread (or a thread-set) has to be given an argument,
633          * describing its "scope".
634          */
635         rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
636         if (rc != 0)
637                 GOTO(out_set, rc);
638
639         {
640                 struct task_struct *task;
641                 if (index >= 0) {
642                         rc = ptlrpcd_bind(index, max);
643                         if (rc < 0)
644                                 GOTO(out_env, rc);
645                 }
646
647                 task = kthread_run(ptlrpcd, pc, pc->pc_name);
648                 if (IS_ERR(task))
649                         GOTO(out_env, rc = PTR_ERR(task));
650
651                 wait_for_completion(&pc->pc_starting);
652         }
653         RETURN(0);
654
655 out_env:
656         lu_context_fini(&pc->pc_env.le_ctx);
657
658 out_set:
659         if (pc->pc_set != NULL) {
660                 struct ptlrpc_request_set *set = pc->pc_set;
661
662                 spin_lock(&pc->pc_lock);
663                 pc->pc_set = NULL;
664                 spin_unlock(&pc->pc_lock);
665                 ptlrpc_set_destroy(set);
666         }
667         clear_bit(LIOD_BIND, &pc->pc_flags);
668 out:
669         clear_bit(LIOD_START, &pc->pc_flags);
670         RETURN(rc);
671 }
672
673 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
674 {
675         ENTRY;
676
677         if (!test_bit(LIOD_START, &pc->pc_flags)) {
678                 CWARN("Thread for pc %p was not started\n", pc);
679                 goto out;
680         }
681
682         set_bit(LIOD_STOP, &pc->pc_flags);
683         if (force)
684                 set_bit(LIOD_FORCE, &pc->pc_flags);
685         wake_up(&pc->pc_set->set_waitq);
686
687 out:
688         EXIT;
689 }
690
691 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
692 {
693         struct ptlrpc_request_set *set = pc->pc_set;
694         ENTRY;
695
696         if (!test_bit(LIOD_START, &pc->pc_flags)) {
697                 CWARN("Thread for pc %p was not started\n", pc);
698                 goto out;
699         }
700
701         wait_for_completion(&pc->pc_finishing);
702         lu_context_fini(&pc->pc_env.le_ctx);
703
704         spin_lock(&pc->pc_lock);
705         pc->pc_set = NULL;
706         spin_unlock(&pc->pc_lock);
707         ptlrpc_set_destroy(set);
708
709         clear_bit(LIOD_START, &pc->pc_flags);
710         clear_bit(LIOD_STOP, &pc->pc_flags);
711         clear_bit(LIOD_FORCE, &pc->pc_flags);
712         clear_bit(LIOD_BIND, &pc->pc_flags);
713
714 out:
715         if (pc->pc_npartners > 0) {
716                 LASSERT(pc->pc_partners != NULL);
717
718                 OBD_FREE(pc->pc_partners,
719                          sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
720                 pc->pc_partners = NULL;
721         }
722         pc->pc_npartners = 0;
723         EXIT;
724 }
725
726 static void ptlrpcd_fini(void)
727 {
728         int i;
729         ENTRY;
730
731         if (ptlrpcds != NULL) {
732                 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
733                         ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
734                 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
735                         ptlrpcd_free(&ptlrpcds->pd_threads[i]);
736                 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
737                 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
738                 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
739                 ptlrpcds = NULL;
740         }
741
742         EXIT;
743 }
744
745 static int ptlrpcd_init(void)
746 {
747         int     nthreads = num_online_cpus();
748         char    name[16];
749         int     size, i = -1, j, rc = 0;
750         ENTRY;
751
752         if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
753                 nthreads = max_ptlrpcds;
754         if (nthreads < 2)
755                 nthreads = 2;
756         if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
757                 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
758         else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
759                 nthreads &= ~1; /* make sure it is even */
760
761         size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
762         OBD_ALLOC(ptlrpcds, size);
763         if (ptlrpcds == NULL)
764                 GOTO(out, rc = -ENOMEM);
765
766         snprintf(name, 15, "ptlrpcd_rcv");
767         set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
768         rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
769         if (rc < 0)
770                 GOTO(out, rc);
771
772         /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
773          *      non-recovery async RPC to improve overall async RPC efficiency.
774          *
775          *      But there are some issues with async I/O RPCs and async non-I/O
776          *      RPCs processed in the same set under some cases. The ptlrpcd may
777          *      be blocked by some async I/O RPC(s), then will cause other async
778          *      non-I/O RPC(s) can not be processed in time.
779          *
780          *      Maybe we should distinguish blocked async RPCs from non-blocked
781          *      async RPCs, and process them in different ptlrpcd sets to avoid
782          *      unnecessary dependency. But how to distribute async RPCs load
783          *      among all the ptlrpc daemons becomes another trouble. */
784         for (i = 0; i < nthreads; i++) {
785                 snprintf(name, 15, "ptlrpcd_%d", i);
786                 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
787                 if (rc < 0)
788                         GOTO(out, rc);
789         }
790
791         ptlrpcds->pd_size = size;
792         ptlrpcds->pd_index = 0;
793         ptlrpcds->pd_nthreads = nthreads;
794
795 out:
796         if (rc != 0 && ptlrpcds != NULL) {
797                 for (j = 0; j <= i; j++)
798                         ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
799                 for (j = 0; j <= i; j++)
800                         ptlrpcd_free(&ptlrpcds->pd_threads[j]);
801                 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
802                 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
803                 OBD_FREE(ptlrpcds, size);
804                 ptlrpcds = NULL;
805         }
806
807         RETURN(rc);
808 }
809
810 int ptlrpcd_addref(void)
811 {
812         int rc = 0;
813         ENTRY;
814
815         mutex_lock(&ptlrpcd_mutex);
816         if (++ptlrpcd_users == 1) {
817                 rc = ptlrpcd_init();
818                 if (rc < 0)
819                         ptlrpcd_users--;
820         }
821         mutex_unlock(&ptlrpcd_mutex);
822         RETURN(rc);
823 }
824 EXPORT_SYMBOL(ptlrpcd_addref);
825
826 void ptlrpcd_decref(void)
827 {
828         mutex_lock(&ptlrpcd_mutex);
829         if (--ptlrpcd_users == 0)
830                 ptlrpcd_fini();
831         mutex_unlock(&ptlrpcd_mutex);
832 }
833 EXPORT_SYMBOL(ptlrpcd_decref);
834 /** @} ptlrpcd */