Whamcloud - gitweb
LU-1146 build: batch update copyright messages
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, 2012, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  *
38  * lustre/ptlrpc/ptlrpcd.c
39  */
40
41 /** \defgroup ptlrpcd PortalRPC daemon
42  *
43  * ptlrpcd is a special thread with its own set where other user might add
44  * requests when they don't want to wait for their completion.
45  * PtlRPCD will take care of sending such requests and then processing their
46  * replies and calling completion callbacks as necessary.
47  * The callbacks are called directly from ptlrpcd context.
48  * It is important to never significantly block (esp. on RPCs!) within such
49  * completion handler or a deadlock might occur where ptlrpcd enters some
50  * callback that attempts to send another RPC and wait for it to return,
51  * during which time ptlrpcd is completely blocked, so e.g. if import
52  * fails, recovery cannot progress because connection requests are also
53  * sent by ptlrpcd.
54  *
55  * @{
56  */
57
58 #define DEBUG_SUBSYSTEM S_RPC
59
60 #ifdef __KERNEL__
61 # include <libcfs/libcfs.h>
62 #else /* __KERNEL__ */
63 # include <liblustre.h>
64 # include <ctype.h>
65 #endif
66
67 #include <lustre_net.h>
68 # include <lustre_lib.h>
69
70 #include <lustre_ha.h>
71 #include <obd_class.h>   /* for obd_zombie */
72 #include <obd_support.h> /* for OBD_FAIL_CHECK */
73 #include <cl_object.h> /* cl_env_{get,put}() */
74 #include <lprocfs_status.h>
75
76 #include "ptlrpc_internal.h"
77
78 struct ptlrpcd {
79         int                pd_size;
80         int                pd_index;
81         int                pd_nthreads;
82         struct ptlrpcd_ctl pd_thread_rcv;
83         struct ptlrpcd_ctl pd_threads[0];
84 };
85
86 #ifdef __KERNEL__
87 static int max_ptlrpcds;
88 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
89                 "Max ptlrpcd thread count to be started.");
90
91 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
92 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
93                 "Ptlrpcd threads binding mode.");
94 #endif
95 static struct ptlrpcd *ptlrpcds;
96
97 cfs_semaphore_t ptlrpcd_sem;
98 static int ptlrpcd_users = 0;
99
100 void ptlrpcd_wake(struct ptlrpc_request *req)
101 {
102         struct ptlrpc_request_set *rq_set = req->rq_set;
103
104         LASSERT(rq_set != NULL);
105
106         cfs_waitq_signal(&rq_set->set_waitq);
107 }
108
109 static struct ptlrpcd_ctl *
110 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
111 {
112         int idx = 0;
113
114         if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
115                 return &ptlrpcds->pd_thread_rcv;
116
117 #ifdef __KERNEL__
118         switch (policy) {
119         case PDL_POLICY_SAME:
120                 idx = cfs_smp_processor_id() % ptlrpcds->pd_nthreads;
121                 break;
122         case PDL_POLICY_LOCAL:
123                 /* Before CPU partition patches available, process it the same
124                  * as "PDL_POLICY_ROUND". */
125 # ifdef CFS_CPU_MODE_NUMA
126 # warning "fix this code to use new CPU partition APIs"
127 # endif
128                 /* Fall through to PDL_POLICY_ROUND until the CPU
129                  * CPU partition patches are available. */
130                 index = -1;
131         case PDL_POLICY_PREFERRED:
132                 if (index >= 0 && index < cfs_num_online_cpus()) {
133                         idx = index % ptlrpcds->pd_nthreads;
134                         break;
135                 }
136                 /* Fall through to PDL_POLICY_ROUND for bad index. */
137         default:
138                 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
139         case PDL_POLICY_ROUND:
140                 /* We do not care whether it is strict load balance. */
141                 idx = ptlrpcds->pd_index + 1;
142                 if (idx == cfs_smp_processor_id())
143                         idx++;
144                 idx %= ptlrpcds->pd_nthreads;
145                 ptlrpcds->pd_index = idx;
146                 break;
147         }
148 #endif /* __KERNEL__ */
149
150         return &ptlrpcds->pd_threads[idx];
151 }
152
153 /**
154  * Move all request from an existing request set to the ptlrpcd queue.
155  * All requests from the set must be in phase RQ_PHASE_NEW.
156  */
157 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
158 {
159         cfs_list_t *tmp, *pos;
160 #ifdef __KERNEL__
161         struct ptlrpcd_ctl *pc;
162         struct ptlrpc_request_set *new;
163         int count, i;
164
165         pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
166         new = pc->pc_set;
167 #endif
168
169         cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
170                 struct ptlrpc_request *req =
171                         cfs_list_entry(pos, struct ptlrpc_request,
172                                        rq_set_chain);
173
174                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
175 #ifdef __KERNEL__
176                 req->rq_set = new;
177                 req->rq_queued_time = cfs_time_current();
178 #else
179                 cfs_list_del_init(&req->rq_set_chain);
180                 req->rq_set = NULL;
181                 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
182                 cfs_atomic_dec(&set->set_remaining);
183 #endif
184         }
185
186 #ifdef __KERNEL__
187         cfs_spin_lock(&new->set_new_req_lock);
188         cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
189         i = cfs_atomic_read(&set->set_remaining);
190         count = cfs_atomic_add_return(i, &new->set_new_count);
191         cfs_atomic_set(&set->set_remaining, 0);
192         cfs_spin_unlock(&new->set_new_req_lock);
193         if (count == i) {
194                 cfs_waitq_signal(&new->set_waitq);
195
196                 /* XXX: It maybe unnecessary to wakeup all the partners. But to
197                  *      guarantee the async RPC can be processed ASAP, we have
198                  *      no other better choice. It maybe fixed in future. */
199                 for (i = 0; i < pc->pc_npartners; i++)
200                         cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
201         }
202 #endif
203 }
204 EXPORT_SYMBOL(ptlrpcd_add_rqset);
205
206 #ifdef __KERNEL__
207 /**
208  * Return transferred RPCs count.
209  */
210 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
211                                struct ptlrpc_request_set *src)
212 {
213         cfs_list_t *tmp, *pos;
214         struct ptlrpc_request *req;
215         int rc = 0;
216
217         cfs_spin_lock(&src->set_new_req_lock);
218         if (likely(!cfs_list_empty(&src->set_new_requests))) {
219                 cfs_list_for_each_safe(pos, tmp, &src->set_new_requests) {
220                         req = cfs_list_entry(pos, struct ptlrpc_request,
221                                              rq_set_chain);
222                         req->rq_set = des;
223                 }
224                 cfs_list_splice_init(&src->set_new_requests,
225                                      &des->set_requests);
226                 rc = cfs_atomic_read(&src->set_new_count);
227                 cfs_atomic_add(rc, &des->set_remaining);
228                 cfs_atomic_set(&src->set_new_count, 0);
229         }
230         cfs_spin_unlock(&src->set_new_req_lock);
231         return rc;
232 }
233 #endif
234
235 /**
236  * Requests that are added to the ptlrpcd queue are sent via
237  * ptlrpcd_check->ptlrpc_check_set().
238  */
239 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
240 {
241         struct ptlrpcd_ctl *pc;
242
243         cfs_spin_lock(&req->rq_lock);
244         if (req->rq_invalid_rqset) {
245                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
246                                                      back_to_sleep, NULL);
247
248                 req->rq_invalid_rqset = 0;
249                 cfs_spin_unlock(&req->rq_lock);
250                 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
251         } else if (req->rq_set) {
252                 /* If we have a vaid "rq_set", just reuse it to avoid double
253                  * linked. */
254                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
255                 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
256
257                 /* ptlrpc_check_set will decrease the count */
258                 cfs_atomic_inc(&req->rq_set->set_remaining);
259                 cfs_spin_unlock(&req->rq_lock);
260                 cfs_waitq_signal(&req->rq_set->set_waitq);
261                 return;
262         } else {
263                 cfs_spin_unlock(&req->rq_lock);
264         }
265
266         pc = ptlrpcd_select_pc(req, policy, idx);
267
268         DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
269                   req, pc->pc_name, pc->pc_index);
270
271         ptlrpc_set_add_new_req(pc, req);
272 }
273
274 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
275 {
276         cfs_atomic_inc(&set->set_refcount);
277 }
278
279 /**
280  * Check if there is more work to do on ptlrpcd set.
281  * Returns 1 if yes.
282  */
283 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
284 {
285         cfs_list_t *tmp, *pos;
286         struct ptlrpc_request *req;
287         struct ptlrpc_request_set *set = pc->pc_set;
288         int rc = 0;
289         int rc2;
290         ENTRY;
291
292         if (cfs_atomic_read(&set->set_new_count)) {
293                 cfs_spin_lock(&set->set_new_req_lock);
294                 if (likely(!cfs_list_empty(&set->set_new_requests))) {
295                         cfs_list_splice_init(&set->set_new_requests,
296                                              &set->set_requests);
297                         cfs_atomic_add(cfs_atomic_read(&set->set_new_count),
298                                        &set->set_remaining);
299                         cfs_atomic_set(&set->set_new_count, 0);
300                         /*
301                          * Need to calculate its timeout.
302                          */
303                         rc = 1;
304                 }
305                 cfs_spin_unlock(&set->set_new_req_lock);
306         }
307
308         /* We should call lu_env_refill() before handling new requests to make
309          * sure that env key the requests depending on really exists.
310          */
311         rc2 = lu_env_refill(env);
312         if (rc2 != 0) {
313                 /*
314                  * XXX This is very awkward situation, because
315                  * execution can neither continue (request
316                  * interpreters assume that env is set up), nor repeat
317                  * the loop (as this potentially results in a tight
318                  * loop of -ENOMEM's).
319                  *
320                  * Fortunately, refill only ever does something when
321                  * new modules are loaded, i.e., early during boot up.
322                  */
323                 CERROR("Failure to refill session: %d\n", rc2);
324                 RETURN(rc);
325         }
326
327         if (cfs_atomic_read(&set->set_remaining))
328                 rc |= ptlrpc_check_set(env, set);
329
330         if (!cfs_list_empty(&set->set_requests)) {
331                 /*
332                  * XXX: our set never completes, so we prune the completed
333                  * reqs after each iteration. boy could this be smarter.
334                  */
335                 cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
336                         req = cfs_list_entry(pos, struct ptlrpc_request,
337                                              rq_set_chain);
338                         if (req->rq_phase != RQ_PHASE_COMPLETE)
339                                 continue;
340
341                         cfs_list_del_init(&req->rq_set_chain);
342                         req->rq_set = NULL;
343                         ptlrpc_req_finished(req);
344                 }
345         }
346
347         if (rc == 0) {
348                 /*
349                  * If new requests have been added, make sure to wake up.
350                  */
351                 rc = cfs_atomic_read(&set->set_new_count);
352
353 #ifdef __KERNEL__
354                 /* If we have nothing to do, check whether we can take some
355                  * work from our partner threads. */
356                 if (rc == 0 && pc->pc_npartners > 0) {
357                         struct ptlrpcd_ctl *partner;
358                         struct ptlrpc_request_set *ps;
359                         int first = pc->pc_cursor;
360
361                         do {
362                                 partner = pc->pc_partners[pc->pc_cursor++];
363                                 if (pc->pc_cursor >= pc->pc_npartners)
364                                         pc->pc_cursor = 0;
365                                 if (partner == NULL)
366                                         continue;
367
368                                 cfs_spin_lock(&partner->pc_lock);
369                                 ps = partner->pc_set;
370                                 if (ps == NULL) {
371                                         cfs_spin_unlock(&partner->pc_lock);
372                                         continue;
373                                 }
374
375                                 ptlrpc_reqset_get(ps);
376                                 cfs_spin_unlock(&partner->pc_lock);
377
378                                 if (cfs_atomic_read(&ps->set_new_count)) {
379                                         rc = ptlrpcd_steal_rqset(set, ps);
380                                         if (rc > 0)
381                                                 CDEBUG(D_RPCTRACE, "transfer %d"
382                                                        " async RPCs [%d->%d]\n",
383                                                         rc, partner->pc_index,
384                                                         pc->pc_index);
385                                 }
386                                 ptlrpc_reqset_put(ps);
387                         } while (rc == 0 && pc->pc_cursor != first);
388                 }
389 #endif
390         }
391
392         RETURN(rc);
393 }
394
395 #ifdef __KERNEL__
396 /**
397  * Main ptlrpcd thread.
398  * ptlrpc's code paths like to execute in process context, so we have this
399  * thread which spins on a set which contains the rpcs and sends them.
400  *
401  */
402 static int ptlrpcd(void *arg)
403 {
404         struct ptlrpcd_ctl *pc = arg;
405         struct ptlrpc_request_set *set = pc->pc_set;
406         struct lu_env env = { .le_ses = NULL };
407         int rc, exit = 0;
408         ENTRY;
409
410         cfs_daemonize_ctxt(pc->pc_name);
411 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
412         if (cfs_test_bit(LIOD_BIND, &pc->pc_flags)) {
413                 int index = pc->pc_index;
414
415                 if (index >= 0 && index < cfs_num_possible_cpus()) {
416                         while (!cfs_cpu_online(index)) {
417                                 if (++index >= cfs_num_possible_cpus())
418                                         index = 0;
419                         }
420                         cfs_set_cpus_allowed(cfs_current(),
421                                      node_to_cpumask(cpu_to_node(index)));
422                 }
423         }
424 #endif
425         /*
426          * XXX So far only "client" ptlrpcd uses an environment. In
427          * the future, ptlrpcd thread (or a thread-set) has to given
428          * an argument, describing its "scope".
429          */
430         rc = lu_context_init(&env.le_ctx,
431                              LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
432         cfs_complete(&pc->pc_starting);
433
434         if (rc != 0)
435                 RETURN(rc);
436
437         /*
438          * This mainloop strongly resembles ptlrpc_set_wait() except that our
439          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
440          * there are requests in the set. New requests come in on the set's
441          * new_req_list and ptlrpcd_check() moves them into the set.
442          */
443         do {
444                 struct l_wait_info lwi;
445                 int timeout;
446
447                 timeout = ptlrpc_set_next_timeout(set);
448                 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
449                                   ptlrpc_expired_set, set);
450
451                 lu_context_enter(&env.le_ctx);
452                 l_wait_event(set->set_waitq,
453                              ptlrpcd_check(&env, pc), &lwi);
454                 lu_context_exit(&env.le_ctx);
455
456                 /*
457                  * Abort inflight rpcs for forced stop case.
458                  */
459                 if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
460                         if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
461                                 ptlrpc_abort_set(set);
462                         exit++;
463                 }
464
465                 /*
466                  * Let's make one more loop to make sure that ptlrpcd_check()
467                  * copied all raced new rpcs into the set so we can kill them.
468                  */
469         } while (exit < 2);
470
471         /*
472          * Wait for inflight requests to drain.
473          */
474         if (!cfs_list_empty(&set->set_requests))
475                 ptlrpc_set_wait(set);
476         lu_context_fini(&env.le_ctx);
477         cfs_complete(&pc->pc_finishing);
478
479         cfs_clear_bit(LIOD_START, &pc->pc_flags);
480         cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
481         cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
482         cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
483         return 0;
484 }
485
486 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
487  *      ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
488  *      data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
489  *      CPU core. But binding all ptlrpcd threads maybe cause response delay
490  *      because of some CPU core(s) busy with other loads.
491  *
492  *      For example: "ls -l", some async RPCs for statahead are assigned to
493  *      ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
494  *      with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
495  *      thread, statahead thread, and ptlrpcd thread can run in parallel), under
496  *      such case, the statahead async RPCs can not be processed in time, it is
497  *      unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
498  *      be better. But it breaks former data transfer policy.
499  *
500  *      So we shouldn't be blind for avoiding the data transfer. We make some
501  *      compromise: divide the ptlrpcd threds pool into two parts. One part is
502  *      for bound mode, each ptlrpcd thread in this part is bound to some CPU
503  *      core. The other part is for free mode, all the ptlrpcd threads in the
504  *      part can be scheduled on any CPU core. We specify some partnership
505  *      between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
506  *      and the async RPC load within the partners are shared.
507  *
508  *      It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
509  *      thread can be scheduled in time), and try to guarantee the async RPC
510  *      processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
511  *      on any CPU core).
512  *
513  *      As for how to specify the partnership between bound mode ptlrpcd
514  *      thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
515  *      <free bound> pair. In future, we can specify some more complex
516  *      partnership based on the patches for CPU partition. But before such
517  *      patches are available, we prefer to use the simplest one.
518  */
519 # ifdef CFS_CPU_MODE_NUMA
520 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
521 # endif
522 static int ptlrpcd_bind(int index, int max)
523 {
524         struct ptlrpcd_ctl *pc;
525         int rc = 0;
526         ENTRY;
527
528         LASSERT(index <= max - 1);
529         pc = &ptlrpcds->pd_threads[index];
530         switch (ptlrpcd_bind_policy) {
531         case PDB_POLICY_NONE:
532                 pc->pc_npartners = -1;
533                 break;
534         case PDB_POLICY_FULL:
535                 pc->pc_npartners = 0;
536                 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
537                 break;
538         case PDB_POLICY_PAIR:
539                 LASSERT(max % 2 == 0);
540                 pc->pc_npartners = 1;
541                 break;
542         case PDB_POLICY_NEIGHBOR:
543                 LASSERT(max >= 3);
544                 pc->pc_npartners = 2;
545                 break;
546         default:
547                 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
548                 rc = -EINVAL;
549         }
550
551         if (rc == 0 && pc->pc_npartners > 0) {
552                 OBD_ALLOC(pc->pc_partners,
553                           sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
554                 if (pc->pc_partners == NULL) {
555                         pc->pc_npartners = 0;
556                         rc = -ENOMEM;
557                 } else {
558                         if (index & 0x1)
559                                 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
560
561                         switch (ptlrpcd_bind_policy) {
562                         case PDB_POLICY_PAIR:
563                                 if (index & 0x1) {
564                                         pc->pc_partners[0] = &ptlrpcds->
565                                                 pd_threads[index - 1];
566                                         ptlrpcds->pd_threads[index - 1].
567                                                 pc_partners[0] = pc;
568                                 }
569                                 break;
570                         case PDB_POLICY_NEIGHBOR:
571                                 if (index > 0) {
572                                         pc->pc_partners[0] = &ptlrpcds->
573                                                 pd_threads[index - 1];
574                                         ptlrpcds->pd_threads[index - 1].
575                                                 pc_partners[1] = pc;
576                                         if (index == max - 1) {
577                                                 pc->pc_partners[1] =
578                                                 &ptlrpcds->pd_threads[0];
579                                                 ptlrpcds->pd_threads[0].
580                                                 pc_partners[0] = pc;
581                                         }
582                                 }
583                                 break;
584                         }
585                 }
586         }
587
588         RETURN(rc);
589 }
590
591 #else /* !__KERNEL__ */
592
593 /**
594  * In liblustre we do not have separate threads, so this function
595  * is called from time to time all across common code to see
596  * if something needs to be processed on ptlrpcd set.
597  */
598 int ptlrpcd_check_async_rpcs(void *arg)
599 {
600         struct ptlrpcd_ctl *pc = arg;
601         int                 rc = 0;
602
603         /*
604          * Single threaded!!
605          */
606         pc->pc_recurred++;
607
608         if (pc->pc_recurred == 1) {
609                 rc = lu_env_refill(&pc->pc_env);
610                 if (rc == 0) {
611                         lu_context_enter(&pc->pc_env.le_ctx);
612                         rc = ptlrpcd_check(&pc->pc_env, pc);
613                         if (!rc)
614                                 ptlrpc_expired_set(pc->pc_set);
615                         /*
616                          * XXX: send replay requests.
617                          */
618                         if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
619                                 rc = ptlrpcd_check(&pc->pc_env, pc);
620                         lu_context_exit(&pc->pc_env.le_ctx);
621                 }
622         }
623
624         pc->pc_recurred--;
625         return rc;
626 }
627
628 int ptlrpcd_idle(void *arg)
629 {
630         struct ptlrpcd_ctl *pc = arg;
631
632         return (cfs_atomic_read(&pc->pc_set->set_new_count) == 0 &&
633                 cfs_atomic_read(&pc->pc_set->set_remaining) == 0);
634 }
635
636 #endif
637
638 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
639 {
640         int rc;
641         int env = 0;
642         ENTRY;
643
644         /*
645          * Do not allow start second thread for one pc.
646          */
647         if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
648                 CWARN("Starting second thread (%s) for same pc %p\n",
649                        name, pc);
650                 RETURN(0);
651         }
652
653         pc->pc_index = index;
654         cfs_init_completion(&pc->pc_starting);
655         cfs_init_completion(&pc->pc_finishing);
656         cfs_spin_lock_init(&pc->pc_lock);
657         strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
658         pc->pc_set = ptlrpc_prep_set();
659         if (pc->pc_set == NULL)
660                 GOTO(out, rc = -ENOMEM);
661         /*
662          * So far only "client" ptlrpcd uses an environment. In the future,
663          * ptlrpcd thread (or a thread-set) has to be given an argument,
664          * describing its "scope".
665          */
666         rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
667         if (rc != 0)
668                 GOTO(out, rc);
669
670         env = 1;
671 #ifdef __KERNEL__
672         if (index >= 0) {
673                 rc = ptlrpcd_bind(index, max);
674                 if (rc < 0)
675                         GOTO(out, rc);
676         }
677
678         rc = cfs_create_thread(ptlrpcd, pc, 0);
679         if (rc < 0)
680                 GOTO(out, rc);
681
682         rc = 0;
683         cfs_wait_for_completion(&pc->pc_starting);
684 #else
685         pc->pc_wait_callback =
686                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
687                                                  &ptlrpcd_check_async_rpcs, pc);
688         pc->pc_idle_callback =
689                 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
690                                                  &ptlrpcd_idle, pc);
691 #endif
692 out:
693         if (rc) {
694 #ifdef __KERNEL__
695                 if (pc->pc_set != NULL) {
696                         struct ptlrpc_request_set *set = pc->pc_set;
697
698                         cfs_spin_lock(&pc->pc_lock);
699                         pc->pc_set = NULL;
700                         cfs_spin_unlock(&pc->pc_lock);
701                         ptlrpc_set_destroy(set);
702                 }
703                 if (env != 0)
704                         lu_context_fini(&pc->pc_env.le_ctx);
705                 cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
706 #else
707                 SET_BUT_UNUSED(env);
708 #endif
709                 cfs_clear_bit(LIOD_START, &pc->pc_flags);
710         }
711         RETURN(rc);
712 }
713
714 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
715 {
716        struct ptlrpc_request_set *set = pc->pc_set;
717         ENTRY;
718
719         if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
720                 CWARN("Thread for pc %p was not started\n", pc);
721                 goto out;
722         }
723
724         cfs_set_bit(LIOD_STOP, &pc->pc_flags);
725         if (force)
726                 cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
727         cfs_waitq_signal(&pc->pc_set->set_waitq);
728 #ifdef __KERNEL__
729         cfs_wait_for_completion(&pc->pc_finishing);
730 #else
731         liblustre_deregister_wait_callback(pc->pc_wait_callback);
732         liblustre_deregister_idle_callback(pc->pc_idle_callback);
733 #endif
734         lu_context_fini(&pc->pc_env.le_ctx);
735
736         cfs_spin_lock(&pc->pc_lock);
737         pc->pc_set = NULL;
738         cfs_spin_unlock(&pc->pc_lock);
739         ptlrpc_set_destroy(set);
740
741 out:
742 #ifdef __KERNEL__
743         if (pc->pc_npartners > 0) {
744                 LASSERT(pc->pc_partners != NULL);
745
746                 OBD_FREE(pc->pc_partners,
747                          sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
748                 pc->pc_partners = NULL;
749         }
750         pc->pc_npartners = 0;
751 #endif
752         EXIT;
753 }
754
755 static void ptlrpcd_fini(void)
756 {
757         int i;
758         ENTRY;
759
760         if (ptlrpcds != NULL) {
761                 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
762                         ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
763                 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
764                 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
765                 ptlrpcds = NULL;
766         }
767
768         EXIT;
769 }
770
771 static int ptlrpcd_init(void)
772 {
773         int nthreads = cfs_num_online_cpus();
774         char name[16];
775         int size, i = -1, j, rc = 0;
776         ENTRY;
777
778 #ifdef __KERNEL__
779         if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
780                 nthreads = max_ptlrpcds;
781         if (nthreads < 2)
782                 nthreads = 2;
783         if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
784                 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
785         else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
786                 nthreads &= ~1; /* make sure it is even */
787 #else
788         nthreads = 1;
789 #endif
790
791         size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
792         OBD_ALLOC(ptlrpcds, size);
793         if (ptlrpcds == NULL)
794                 GOTO(out, rc = -ENOMEM);
795
796         snprintf(name, 15, "ptlrpcd_rcv");
797         cfs_set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
798         rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
799         if (rc < 0)
800                 GOTO(out, rc);
801
802         /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
803          *      non-recovery async RPC to improve overall async RPC efficiency.
804          *
805          *      But there are some issues with async I/O RPCs and async non-I/O
806          *      RPCs processed in the same set under some cases. The ptlrpcd may
807          *      be blocked by some async I/O RPC(s), then will cause other async
808          *      non-I/O RPC(s) can not be processed in time.
809          *
810          *      Maybe we should distinguish blocked async RPCs from non-blocked
811          *      async RPCs, and process them in different ptlrpcd sets to avoid
812          *      unnecessary dependency. But how to distribute async RPCs load
813          *      among all the ptlrpc daemons becomes another trouble. */
814         for (i = 0; i < nthreads; i++) {
815                 snprintf(name, 15, "ptlrpcd_%d", i);
816                 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
817                 if (rc < 0)
818                         GOTO(out, rc);
819         }
820
821         ptlrpcds->pd_size = size;
822         ptlrpcds->pd_index = 0;
823         ptlrpcds->pd_nthreads = nthreads;
824
825 out:
826         if (rc != 0 && ptlrpcds != NULL) {
827                 for (j = 0; j <= i; j++)
828                         ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
829                 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
830                 OBD_FREE(ptlrpcds, size);
831                 ptlrpcds = NULL;
832         }
833
834         RETURN(0);
835 }
836
837 int ptlrpcd_addref(void)
838 {
839         int rc = 0;
840         ENTRY;
841
842         cfs_mutex_down(&ptlrpcd_sem);
843         if (++ptlrpcd_users == 1)
844                 rc = ptlrpcd_init();
845         cfs_mutex_up(&ptlrpcd_sem);
846         RETURN(rc);
847 }
848
849 void ptlrpcd_decref(void)
850 {
851         cfs_mutex_down(&ptlrpcd_sem);
852         if (--ptlrpcd_users == 0)
853                 ptlrpcd_fini();
854         cfs_mutex_up(&ptlrpcd_sem);
855 }
856 /** @} ptlrpcd */