Whamcloud - gitweb
LU-2850 kernel: 3.8 upstream kills daemonize()
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38
39 /** \defgroup ptlrpcd PortalRPC daemon
40  *
41  * ptlrpcd is a special thread with its own set where other user might add
42  * requests when they don't want to wait for their completion.
43  * PtlRPCD will take care of sending such requests and then processing their
44  * replies and calling completion callbacks as necessary.
45  * The callbacks are called directly from ptlrpcd context.
46  * It is important to never significantly block (esp. on RPCs!) within such
47  * completion handler or a deadlock might occur where ptlrpcd enters some
48  * callback that attempts to send another RPC and wait for it to return,
49  * during which time ptlrpcd is completely blocked, so e.g. if import
50  * fails, recovery cannot progress because connection requests are also
51  * sent by ptlrpcd.
52  *
53  * @{
54  */
55
56 #define DEBUG_SUBSYSTEM S_RPC
57
58 #ifdef __KERNEL__
59 # include <libcfs/libcfs.h>
60 #else /* __KERNEL__ */
61 # include <liblustre.h>
62 # include <ctype.h>
63 #endif
64
65 #include <lustre_net.h>
66 # include <lustre_lib.h>
67
68 #include <lustre_ha.h>
69 #include <obd_class.h>   /* for obd_zombie */
70 #include <obd_support.h> /* for OBD_FAIL_CHECK */
71 #include <cl_object.h> /* cl_env_{get,put}() */
72 #include <lprocfs_status.h>
73
74 #include "ptlrpc_internal.h"
75
76 struct ptlrpcd {
77         int                pd_size;
78         int                pd_index;
79         int                pd_nthreads;
80         struct ptlrpcd_ctl pd_thread_rcv;
81         struct ptlrpcd_ctl pd_threads[0];
82 };
83
84 #ifdef __KERNEL__
85 static int max_ptlrpcds;
86 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
87                 "Max ptlrpcd thread count to be started.");
88
89 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
90 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
91                 "Ptlrpcd threads binding mode.");
92 #endif
93 static struct ptlrpcd *ptlrpcds;
94
95 struct mutex ptlrpcd_mutex;
96 static int ptlrpcd_users = 0;
97
98 void ptlrpcd_wake(struct ptlrpc_request *req)
99 {
100         struct ptlrpc_request_set *rq_set = req->rq_set;
101
102         LASSERT(rq_set != NULL);
103
104         cfs_waitq_signal(&rq_set->set_waitq);
105 }
106 EXPORT_SYMBOL(ptlrpcd_wake);
107
108 static struct ptlrpcd_ctl *
109 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
110 {
111         int idx = 0;
112
113         if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
114                 return &ptlrpcds->pd_thread_rcv;
115
116 #ifdef __KERNEL__
117         switch (policy) {
118         case PDL_POLICY_SAME:
119                 idx = cfs_smp_processor_id() % ptlrpcds->pd_nthreads;
120                 break;
121         case PDL_POLICY_LOCAL:
122                 /* Before CPU partition patches available, process it the same
123                  * as "PDL_POLICY_ROUND". */
124 # ifdef CFS_CPU_MODE_NUMA
125 # warning "fix this code to use new CPU partition APIs"
126 # endif
127                 /* Fall through to PDL_POLICY_ROUND until the CPU
128                  * CPU partition patches are available. */
129                 index = -1;
130         case PDL_POLICY_PREFERRED:
131                 if (index >= 0 && index < cfs_num_online_cpus()) {
132                         idx = index % ptlrpcds->pd_nthreads;
133                         break;
134                 }
135                 /* Fall through to PDL_POLICY_ROUND for bad index. */
136         default:
137                 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
138         case PDL_POLICY_ROUND:
139                 /* We do not care whether it is strict load balance. */
140                 idx = ptlrpcds->pd_index + 1;
141                 if (idx == cfs_smp_processor_id())
142                         idx++;
143                 idx %= ptlrpcds->pd_nthreads;
144                 ptlrpcds->pd_index = idx;
145                 break;
146         }
147 #endif /* __KERNEL__ */
148
149         return &ptlrpcds->pd_threads[idx];
150 }
151
152 /**
153  * Move all request from an existing request set to the ptlrpcd queue.
154  * All requests from the set must be in phase RQ_PHASE_NEW.
155  */
156 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
157 {
158         cfs_list_t *tmp, *pos;
159 #ifdef __KERNEL__
160         struct ptlrpcd_ctl *pc;
161         struct ptlrpc_request_set *new;
162         int count, i;
163
164         pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
165         new = pc->pc_set;
166 #endif
167
168         cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
169                 struct ptlrpc_request *req =
170                         cfs_list_entry(pos, struct ptlrpc_request,
171                                        rq_set_chain);
172
173                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
174 #ifdef __KERNEL__
175                 req->rq_set = new;
176                 req->rq_queued_time = cfs_time_current();
177 #else
178                 cfs_list_del_init(&req->rq_set_chain);
179                 req->rq_set = NULL;
180                 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
181                 cfs_atomic_dec(&set->set_remaining);
182 #endif
183         }
184
185 #ifdef __KERNEL__
186         spin_lock(&new->set_new_req_lock);
187         cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
188         i = cfs_atomic_read(&set->set_remaining);
189         count = cfs_atomic_add_return(i, &new->set_new_count);
190         cfs_atomic_set(&set->set_remaining, 0);
191         spin_unlock(&new->set_new_req_lock);
192         if (count == i) {
193                 cfs_waitq_signal(&new->set_waitq);
194
195                 /* XXX: It maybe unnecessary to wakeup all the partners. But to
196                  *      guarantee the async RPC can be processed ASAP, we have
197                  *      no other better choice. It maybe fixed in future. */
198                 for (i = 0; i < pc->pc_npartners; i++)
199                         cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
200         }
201 #endif
202 }
203 EXPORT_SYMBOL(ptlrpcd_add_rqset);
204
205 #ifdef __KERNEL__
206 /**
207  * Return transferred RPCs count.
208  */
209 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
210                                struct ptlrpc_request_set *src)
211 {
212         cfs_list_t *tmp, *pos;
213         struct ptlrpc_request *req;
214         int rc = 0;
215
216         spin_lock(&src->set_new_req_lock);
217         if (likely(!cfs_list_empty(&src->set_new_requests))) {
218                 cfs_list_for_each_safe(pos, tmp, &src->set_new_requests) {
219                         req = cfs_list_entry(pos, struct ptlrpc_request,
220                                              rq_set_chain);
221                         req->rq_set = des;
222                 }
223                 cfs_list_splice_init(&src->set_new_requests,
224                                      &des->set_requests);
225                 rc = cfs_atomic_read(&src->set_new_count);
226                 cfs_atomic_add(rc, &des->set_remaining);
227                 cfs_atomic_set(&src->set_new_count, 0);
228         }
229         spin_unlock(&src->set_new_req_lock);
230         return rc;
231 }
232 #endif
233
234 /**
235  * Requests that are added to the ptlrpcd queue are sent via
236  * ptlrpcd_check->ptlrpc_check_set().
237  */
238 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
239 {
240         struct ptlrpcd_ctl *pc;
241
242         if (req->rq_reqmsg)
243                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
244
245         spin_lock(&req->rq_lock);
246         if (req->rq_invalid_rqset) {
247                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
248                                                      back_to_sleep, NULL);
249
250                 req->rq_invalid_rqset = 0;
251                 spin_unlock(&req->rq_lock);
252                 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
253         } else if (req->rq_set) {
254                 /* If we have a vaid "rq_set", just reuse it to avoid double
255                  * linked. */
256                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
257                 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
258
259                 /* ptlrpc_check_set will decrease the count */
260                 cfs_atomic_inc(&req->rq_set->set_remaining);
261                 spin_unlock(&req->rq_lock);
262                 cfs_waitq_signal(&req->rq_set->set_waitq);
263                 return;
264         } else {
265                 spin_unlock(&req->rq_lock);
266         }
267
268         pc = ptlrpcd_select_pc(req, policy, idx);
269
270         DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
271                   req, pc->pc_name, pc->pc_index);
272
273         ptlrpc_set_add_new_req(pc, req);
274 }
275 EXPORT_SYMBOL(ptlrpcd_add_req);
276
277 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
278 {
279         cfs_atomic_inc(&set->set_refcount);
280 }
281
282 /**
283  * Check if there is more work to do on ptlrpcd set.
284  * Returns 1 if yes.
285  */
286 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
287 {
288         cfs_list_t *tmp, *pos;
289         struct ptlrpc_request *req;
290         struct ptlrpc_request_set *set = pc->pc_set;
291         int rc = 0;
292         int rc2;
293         ENTRY;
294
295         if (cfs_atomic_read(&set->set_new_count)) {
296                 spin_lock(&set->set_new_req_lock);
297                 if (likely(!cfs_list_empty(&set->set_new_requests))) {
298                         cfs_list_splice_init(&set->set_new_requests,
299                                              &set->set_requests);
300                         cfs_atomic_add(cfs_atomic_read(&set->set_new_count),
301                                        &set->set_remaining);
302                         cfs_atomic_set(&set->set_new_count, 0);
303                         /*
304                          * Need to calculate its timeout.
305                          */
306                         rc = 1;
307                 }
308                 spin_unlock(&set->set_new_req_lock);
309         }
310
311         /* We should call lu_env_refill() before handling new requests to make
312          * sure that env key the requests depending on really exists.
313          */
314         rc2 = lu_env_refill(env);
315         if (rc2 != 0) {
316                 /*
317                  * XXX This is very awkward situation, because
318                  * execution can neither continue (request
319                  * interpreters assume that env is set up), nor repeat
320                  * the loop (as this potentially results in a tight
321                  * loop of -ENOMEM's).
322                  *
323                  * Fortunately, refill only ever does something when
324                  * new modules are loaded, i.e., early during boot up.
325                  */
326                 CERROR("Failure to refill session: %d\n", rc2);
327                 RETURN(rc);
328         }
329
330         if (cfs_atomic_read(&set->set_remaining))
331                 rc |= ptlrpc_check_set(env, set);
332
333         if (!cfs_list_empty(&set->set_requests)) {
334                 /*
335                  * XXX: our set never completes, so we prune the completed
336                  * reqs after each iteration. boy could this be smarter.
337                  */
338                 cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
339                         req = cfs_list_entry(pos, struct ptlrpc_request,
340                                              rq_set_chain);
341                         if (req->rq_phase != RQ_PHASE_COMPLETE)
342                                 continue;
343
344                         cfs_list_del_init(&req->rq_set_chain);
345                         req->rq_set = NULL;
346                         ptlrpc_req_finished(req);
347                 }
348         }
349
350         if (rc == 0) {
351                 /*
352                  * If new requests have been added, make sure to wake up.
353                  */
354                 rc = cfs_atomic_read(&set->set_new_count);
355
356 #ifdef __KERNEL__
357                 /* If we have nothing to do, check whether we can take some
358                  * work from our partner threads. */
359                 if (rc == 0 && pc->pc_npartners > 0) {
360                         struct ptlrpcd_ctl *partner;
361                         struct ptlrpc_request_set *ps;
362                         int first = pc->pc_cursor;
363
364                         do {
365                                 partner = pc->pc_partners[pc->pc_cursor++];
366                                 if (pc->pc_cursor >= pc->pc_npartners)
367                                         pc->pc_cursor = 0;
368                                 if (partner == NULL)
369                                         continue;
370
371                                 spin_lock(&partner->pc_lock);
372                                 ps = partner->pc_set;
373                                 if (ps == NULL) {
374                                         spin_unlock(&partner->pc_lock);
375                                         continue;
376                                 }
377
378                                 ptlrpc_reqset_get(ps);
379                                 spin_unlock(&partner->pc_lock);
380
381                                 if (cfs_atomic_read(&ps->set_new_count)) {
382                                         rc = ptlrpcd_steal_rqset(set, ps);
383                                         if (rc > 0)
384                                                 CDEBUG(D_RPCTRACE, "transfer %d"
385                                                        " async RPCs [%d->%d]\n",
386                                                         rc, partner->pc_index,
387                                                         pc->pc_index);
388                                 }
389                                 ptlrpc_reqset_put(ps);
390                         } while (rc == 0 && pc->pc_cursor != first);
391                 }
392 #endif
393         }
394
395         RETURN(rc);
396 }
397
398 #ifdef __KERNEL__
399 /**
400  * Main ptlrpcd thread.
401  * ptlrpc's code paths like to execute in process context, so we have this
402  * thread which spins on a set which contains the rpcs and sends them.
403  *
404  */
405 static int ptlrpcd(void *arg)
406 {
407         struct ptlrpcd_ctl *pc = arg;
408         struct ptlrpc_request_set *set = pc->pc_set;
409         struct lu_env env = { .le_ses = NULL };
410         int rc, exit = 0;
411         ENTRY;
412
413         unshare_fs_struct();
414 #if defined(CONFIG_SMP) && \
415 (defined(HAVE_CPUMASK_OF_NODE) || defined(HAVE_NODE_TO_CPUMASK))
416         if (test_bit(LIOD_BIND, &pc->pc_flags)) {
417                 int index = pc->pc_index;
418
419                 if (index >= 0 && index < cfs_num_possible_cpus()) {
420                         while (!cpu_online(index)) {
421                                 if (++index >= cfs_num_possible_cpus())
422                                         index = 0;
423                         }
424                         cfs_set_cpus_allowed(cfs_current(),
425                                      *cpumask_of_node(cpu_to_node(index)));
426                 }
427         }
428 #endif
429         /*
430          * XXX So far only "client" ptlrpcd uses an environment. In
431          * the future, ptlrpcd thread (or a thread-set) has to given
432          * an argument, describing its "scope".
433          */
434         rc = lu_context_init(&env.le_ctx,
435                              LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
436         complete(&pc->pc_starting);
437
438         if (rc != 0)
439                 RETURN(rc);
440
441         /*
442          * This mainloop strongly resembles ptlrpc_set_wait() except that our
443          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
444          * there are requests in the set. New requests come in on the set's
445          * new_req_list and ptlrpcd_check() moves them into the set.
446          */
447         do {
448                 struct l_wait_info lwi;
449                 int timeout;
450
451                 timeout = ptlrpc_set_next_timeout(set);
452                 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
453                                   ptlrpc_expired_set, set);
454
455                 lu_context_enter(&env.le_ctx);
456                 l_wait_event(set->set_waitq,
457                              ptlrpcd_check(&env, pc), &lwi);
458                 lu_context_exit(&env.le_ctx);
459
460                 /*
461                  * Abort inflight rpcs for forced stop case.
462                  */
463                 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
464                         if (test_bit(LIOD_FORCE, &pc->pc_flags))
465                                 ptlrpc_abort_set(set);
466                         exit++;
467                 }
468
469                 /*
470                  * Let's make one more loop to make sure that ptlrpcd_check()
471                  * copied all raced new rpcs into the set so we can kill them.
472                  */
473         } while (exit < 2);
474
475         /*
476          * Wait for inflight requests to drain.
477          */
478         if (!cfs_list_empty(&set->set_requests))
479                 ptlrpc_set_wait(set);
480         lu_context_fini(&env.le_ctx);
481
482         complete(&pc->pc_finishing);
483
484         return 0;
485 }
486
487 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
488  *      ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
489  *      data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
490  *      CPU core. But binding all ptlrpcd threads maybe cause response delay
491  *      because of some CPU core(s) busy with other loads.
492  *
493  *      For example: "ls -l", some async RPCs for statahead are assigned to
494  *      ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
495  *      with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
496  *      thread, statahead thread, and ptlrpcd thread can run in parallel), under
497  *      such case, the statahead async RPCs can not be processed in time, it is
498  *      unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
499  *      be better. But it breaks former data transfer policy.
500  *
501  *      So we shouldn't be blind for avoiding the data transfer. We make some
502  *      compromise: divide the ptlrpcd threds pool into two parts. One part is
503  *      for bound mode, each ptlrpcd thread in this part is bound to some CPU
504  *      core. The other part is for free mode, all the ptlrpcd threads in the
505  *      part can be scheduled on any CPU core. We specify some partnership
506  *      between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
507  *      and the async RPC load within the partners are shared.
508  *
509  *      It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
510  *      thread can be scheduled in time), and try to guarantee the async RPC
511  *      processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
512  *      on any CPU core).
513  *
514  *      As for how to specify the partnership between bound mode ptlrpcd
515  *      thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
516  *      <free bound> pair. In future, we can specify some more complex
517  *      partnership based on the patches for CPU partition. But before such
518  *      patches are available, we prefer to use the simplest one.
519  */
520 # ifdef CFS_CPU_MODE_NUMA
521 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
522 # endif
523 static int ptlrpcd_bind(int index, int max)
524 {
525         struct ptlrpcd_ctl *pc;
526         int rc = 0;
527 #if defined(CONFIG_NUMA) && \
528 (defined(HAVE_CPUMASK_OF_NODE) || defined(HAVE_NODE_TO_CPUMASK))
529         cpumask_t mask;
530 #endif
531         ENTRY;
532
533         LASSERT(index <= max - 1);
534         pc = &ptlrpcds->pd_threads[index];
535         switch (ptlrpcd_bind_policy) {
536         case PDB_POLICY_NONE:
537                 pc->pc_npartners = -1;
538                 break;
539         case PDB_POLICY_FULL:
540                 pc->pc_npartners = 0;
541                 set_bit(LIOD_BIND, &pc->pc_flags);
542                 break;
543         case PDB_POLICY_PAIR:
544                 LASSERT(max % 2 == 0);
545                 pc->pc_npartners = 1;
546                 break;
547         case PDB_POLICY_NEIGHBOR:
548 #if defined(CONFIG_NUMA) && \
549 (defined(HAVE_CPUMASK_OF_NODE) || defined(HAVE_NODE_TO_CPUMASK))
550         {
551                 int i;
552                 mask = *cpumask_of_node(cpu_to_node(index));
553                 for (i = max; i < cfs_num_online_cpus(); i++)
554                         cpu_clear(i, mask);
555                 pc->pc_npartners = cpus_weight(mask) - 1;
556                 set_bit(LIOD_BIND, &pc->pc_flags);
557         }
558 #else
559                 LASSERT(max >= 3);
560                 pc->pc_npartners = 2;
561 #endif
562                 break;
563         default:
564                 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
565                 rc = -EINVAL;
566         }
567
568         if (rc == 0 && pc->pc_npartners > 0) {
569                 OBD_ALLOC(pc->pc_partners,
570                           sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
571                 if (pc->pc_partners == NULL) {
572                         pc->pc_npartners = 0;
573                         rc = -ENOMEM;
574                 } else {
575                         switch (ptlrpcd_bind_policy) {
576                         case PDB_POLICY_PAIR:
577                                 if (index & 0x1) {
578                                         set_bit(LIOD_BIND, &pc->pc_flags);
579                                         pc->pc_partners[0] = &ptlrpcds->
580                                                 pd_threads[index - 1];
581                                         ptlrpcds->pd_threads[index - 1].
582                                                 pc_partners[0] = pc;
583                                 }
584                                 break;
585                         case PDB_POLICY_NEIGHBOR:
586 #if defined(CONFIG_NUMA) && \
587 (defined(HAVE_CPUMASK_OF_NODE) || defined(HAVE_NODE_TO_CPUMASK))
588                         {
589                                 struct ptlrpcd_ctl *ppc;
590                                 int i, pidx;
591                                 /* partners are cores in the same NUMA node.
592                                  * setup partnership only with ptlrpcd threads
593                                  * that are already initialized
594                                  */
595                                 for (pidx = 0, i = 0; i < index; i++) {
596                                         if (cpu_isset(i, mask)) {
597                                                 ppc = &ptlrpcds->pd_threads[i];
598                                                 pc->pc_partners[pidx++] = ppc;
599                                                 ppc->pc_partners[ppc->
600                                                           pc_npartners++] = pc;
601                                         }
602                                 }
603                                 /* adjust number of partners to the number
604                                  * of partnership really setup */
605                                 pc->pc_npartners = pidx;
606                         }
607 #else
608                                 if (index & 0x1)
609                                         set_bit(LIOD_BIND, &pc->pc_flags);
610                                 if (index > 0) {
611                                         pc->pc_partners[0] = &ptlrpcds->
612                                                 pd_threads[index - 1];
613                                         ptlrpcds->pd_threads[index - 1].
614                                                 pc_partners[1] = pc;
615                                         if (index == max - 1) {
616                                                 pc->pc_partners[1] =
617                                                 &ptlrpcds->pd_threads[0];
618                                                 ptlrpcds->pd_threads[0].
619                                                 pc_partners[0] = pc;
620                                         }
621                                 }
622 #endif
623                                 break;
624                         }
625                 }
626         }
627
628         RETURN(rc);
629 }
630
631 #else /* !__KERNEL__ */
632
633 /**
634  * In liblustre we do not have separate threads, so this function
635  * is called from time to time all across common code to see
636  * if something needs to be processed on ptlrpcd set.
637  */
638 int ptlrpcd_check_async_rpcs(void *arg)
639 {
640         struct ptlrpcd_ctl *pc = arg;
641         int                 rc = 0;
642
643         /*
644          * Single threaded!!
645          */
646         pc->pc_recurred++;
647
648         if (pc->pc_recurred == 1) {
649                 rc = lu_env_refill(&pc->pc_env);
650                 if (rc == 0) {
651                         lu_context_enter(&pc->pc_env.le_ctx);
652                         rc = ptlrpcd_check(&pc->pc_env, pc);
653                         if (!rc)
654                                 ptlrpc_expired_set(pc->pc_set);
655                         /*
656                          * XXX: send replay requests.
657                          */
658                         if (test_bit(LIOD_RECOVERY, &pc->pc_flags))
659                                 rc = ptlrpcd_check(&pc->pc_env, pc);
660                         lu_context_exit(&pc->pc_env.le_ctx);
661                 }
662         }
663
664         pc->pc_recurred--;
665         return rc;
666 }
667
668 int ptlrpcd_idle(void *arg)
669 {
670         struct ptlrpcd_ctl *pc = arg;
671
672         return (cfs_atomic_read(&pc->pc_set->set_new_count) == 0 &&
673                 cfs_atomic_read(&pc->pc_set->set_remaining) == 0);
674 }
675
676 #endif
677
678 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
679 {
680         int rc;
681         int env = 0;
682         ENTRY;
683
684         /*
685          * Do not allow start second thread for one pc.
686          */
687         if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
688                 CWARN("Starting second thread (%s) for same pc %p\n",
689                       name, pc);
690                 RETURN(0);
691         }
692
693         pc->pc_index = index;
694         init_completion(&pc->pc_starting);
695         init_completion(&pc->pc_finishing);
696         spin_lock_init(&pc->pc_lock);
697         strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
698         pc->pc_set = ptlrpc_prep_set();
699         if (pc->pc_set == NULL)
700                 GOTO(out, rc = -ENOMEM);
701         /*
702          * So far only "client" ptlrpcd uses an environment. In the future,
703          * ptlrpcd thread (or a thread-set) has to be given an argument,
704          * describing its "scope".
705          */
706         rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
707         if (rc != 0)
708                 GOTO(out, rc);
709
710         env = 1;
711 #ifdef __KERNEL__
712         {
713                 cfs_task_t *task;
714                 if (index >= 0) {
715                         rc = ptlrpcd_bind(index, max);
716                         if (rc < 0)
717                                 GOTO(out, rc);
718                 }
719
720                 task = kthread_run(ptlrpcd, pc, pc->pc_name);
721                 if (IS_ERR(task))
722                         GOTO(out, rc = PTR_ERR(task));
723
724                 rc = 0;
725                 wait_for_completion(&pc->pc_starting);
726         }
727 #else
728         pc->pc_wait_callback =
729                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
730                                                  &ptlrpcd_check_async_rpcs, pc);
731         pc->pc_idle_callback =
732                 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
733                                                  &ptlrpcd_idle, pc);
734 #endif
735 out:
736         if (rc) {
737 #ifdef __KERNEL__
738                 if (pc->pc_set != NULL) {
739                         struct ptlrpc_request_set *set = pc->pc_set;
740
741                         spin_lock(&pc->pc_lock);
742                         pc->pc_set = NULL;
743                         spin_unlock(&pc->pc_lock);
744                         ptlrpc_set_destroy(set);
745                 }
746                 if (env != 0)
747                         lu_context_fini(&pc->pc_env.le_ctx);
748                 clear_bit(LIOD_BIND, &pc->pc_flags);
749 #else
750                 SET_BUT_UNUSED(env);
751 #endif
752                 clear_bit(LIOD_START, &pc->pc_flags);
753         }
754         RETURN(rc);
755 }
756
757 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
758 {
759         ENTRY;
760
761         if (!test_bit(LIOD_START, &pc->pc_flags)) {
762                 CWARN("Thread for pc %p was not started\n", pc);
763                 goto out;
764         }
765
766         set_bit(LIOD_STOP, &pc->pc_flags);
767         if (force)
768                 set_bit(LIOD_FORCE, &pc->pc_flags);
769         cfs_waitq_signal(&pc->pc_set->set_waitq);
770
771 out:
772         EXIT;
773 }
774
775 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
776 {
777         struct ptlrpc_request_set *set = pc->pc_set;
778         ENTRY;
779
780         if (!test_bit(LIOD_START, &pc->pc_flags)) {
781                 CWARN("Thread for pc %p was not started\n", pc);
782                 goto out;
783         }
784
785 #ifdef __KERNEL__
786         wait_for_completion(&pc->pc_finishing);
787 #else
788         liblustre_deregister_wait_callback(pc->pc_wait_callback);
789         liblustre_deregister_idle_callback(pc->pc_idle_callback);
790 #endif
791         lu_context_fini(&pc->pc_env.le_ctx);
792
793         spin_lock(&pc->pc_lock);
794         pc->pc_set = NULL;
795         spin_unlock(&pc->pc_lock);
796         ptlrpc_set_destroy(set);
797
798         clear_bit(LIOD_START, &pc->pc_flags);
799         clear_bit(LIOD_STOP, &pc->pc_flags);
800         clear_bit(LIOD_FORCE, &pc->pc_flags);
801         clear_bit(LIOD_BIND, &pc->pc_flags);
802
803 out:
804 #ifdef __KERNEL__
805         if (pc->pc_npartners > 0) {
806                 LASSERT(pc->pc_partners != NULL);
807
808                 OBD_FREE(pc->pc_partners,
809                          sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
810                 pc->pc_partners = NULL;
811         }
812         pc->pc_npartners = 0;
813 #endif
814         EXIT;
815 }
816
817 static void ptlrpcd_fini(void)
818 {
819         int i;
820         ENTRY;
821
822         if (ptlrpcds != NULL) {
823                 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
824                         ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
825                 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
826                         ptlrpcd_free(&ptlrpcds->pd_threads[i]);
827                 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
828                 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
829                 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
830                 ptlrpcds = NULL;
831         }
832
833         EXIT;
834 }
835
836 static int ptlrpcd_init(void)
837 {
838         int nthreads = cfs_num_online_cpus();
839         char name[16];
840         int size, i = -1, j, rc = 0;
841         ENTRY;
842
843 #ifdef __KERNEL__
844         if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
845                 nthreads = max_ptlrpcds;
846         if (nthreads < 2)
847                 nthreads = 2;
848         if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
849                 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
850         else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
851                 nthreads &= ~1; /* make sure it is even */
852 #else
853         nthreads = 1;
854 #endif
855
856         size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
857         OBD_ALLOC(ptlrpcds, size);
858         if (ptlrpcds == NULL)
859                 GOTO(out, rc = -ENOMEM);
860
861         snprintf(name, 15, "ptlrpcd_rcv");
862         set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
863         rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
864         if (rc < 0)
865                 GOTO(out, rc);
866
867         /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
868          *      non-recovery async RPC to improve overall async RPC efficiency.
869          *
870          *      But there are some issues with async I/O RPCs and async non-I/O
871          *      RPCs processed in the same set under some cases. The ptlrpcd may
872          *      be blocked by some async I/O RPC(s), then will cause other async
873          *      non-I/O RPC(s) can not be processed in time.
874          *
875          *      Maybe we should distinguish blocked async RPCs from non-blocked
876          *      async RPCs, and process them in different ptlrpcd sets to avoid
877          *      unnecessary dependency. But how to distribute async RPCs load
878          *      among all the ptlrpc daemons becomes another trouble. */
879         for (i = 0; i < nthreads; i++) {
880                 snprintf(name, 15, "ptlrpcd_%d", i);
881                 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
882                 if (rc < 0)
883                         GOTO(out, rc);
884         }
885
886         ptlrpcds->pd_size = size;
887         ptlrpcds->pd_index = 0;
888         ptlrpcds->pd_nthreads = nthreads;
889
890 out:
891         if (rc != 0 && ptlrpcds != NULL) {
892                 for (j = 0; j <= i; j++)
893                         ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
894                 for (j = 0; j <= i; j++)
895                         ptlrpcd_free(&ptlrpcds->pd_threads[j]);
896                 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
897                 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
898                 OBD_FREE(ptlrpcds, size);
899                 ptlrpcds = NULL;
900         }
901
902         RETURN(0);
903 }
904
905 int ptlrpcd_addref(void)
906 {
907         int rc = 0;
908         ENTRY;
909
910         mutex_lock(&ptlrpcd_mutex);
911         if (++ptlrpcd_users == 1)
912                 rc = ptlrpcd_init();
913         mutex_unlock(&ptlrpcd_mutex);
914         RETURN(rc);
915 }
916 EXPORT_SYMBOL(ptlrpcd_addref);
917
918 void ptlrpcd_decref(void)
919 {
920         mutex_lock(&ptlrpcd_mutex);
921         if (--ptlrpcd_users == 0)
922                 ptlrpcd_fini();
923         mutex_unlock(&ptlrpcd_mutex);
924 }
925 EXPORT_SYMBOL(ptlrpcd_decref);
926 /** @} ptlrpcd */