Whamcloud - gitweb
LU-1303 lod: introduce lod device
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38
39 /** \defgroup ptlrpcd PortalRPC daemon
40  *
41  * ptlrpcd is a special thread with its own set where other user might add
42  * requests when they don't want to wait for their completion.
43  * PtlRPCD will take care of sending such requests and then processing their
44  * replies and calling completion callbacks as necessary.
45  * The callbacks are called directly from ptlrpcd context.
46  * It is important to never significantly block (esp. on RPCs!) within such
47  * completion handler or a deadlock might occur where ptlrpcd enters some
48  * callback that attempts to send another RPC and wait for it to return,
49  * during which time ptlrpcd is completely blocked, so e.g. if import
50  * fails, recovery cannot progress because connection requests are also
51  * sent by ptlrpcd.
52  *
53  * @{
54  */
55
56 #define DEBUG_SUBSYSTEM S_RPC
57
58 #ifdef __KERNEL__
59 # include <libcfs/libcfs.h>
60 #else /* __KERNEL__ */
61 # include <liblustre.h>
62 # include <ctype.h>
63 #endif
64
65 #include <lustre_net.h>
66 # include <lustre_lib.h>
67
68 #include <lustre_ha.h>
69 #include <obd_class.h>   /* for obd_zombie */
70 #include <obd_support.h> /* for OBD_FAIL_CHECK */
71 #include <cl_object.h> /* cl_env_{get,put}() */
72 #include <lprocfs_status.h>
73
74 #include "ptlrpc_internal.h"
75
76 struct ptlrpcd {
77         int                pd_size;
78         int                pd_index;
79         int                pd_nthreads;
80         struct ptlrpcd_ctl pd_thread_rcv;
81         struct ptlrpcd_ctl pd_threads[0];
82 };
83
84 #ifdef __KERNEL__
85 static int max_ptlrpcds;
86 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
87                 "Max ptlrpcd thread count to be started.");
88
89 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
90 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
91                 "Ptlrpcd threads binding mode.");
92 #endif
93 static struct ptlrpcd *ptlrpcds;
94
95 cfs_mutex_t ptlrpcd_mutex;
96 static int ptlrpcd_users = 0;
97
98 void ptlrpcd_wake(struct ptlrpc_request *req)
99 {
100         struct ptlrpc_request_set *rq_set = req->rq_set;
101
102         LASSERT(rq_set != NULL);
103
104         cfs_waitq_signal(&rq_set->set_waitq);
105 }
106 EXPORT_SYMBOL(ptlrpcd_wake);
107
108 static struct ptlrpcd_ctl *
109 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
110 {
111         int idx = 0;
112
113         if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
114                 return &ptlrpcds->pd_thread_rcv;
115
116 #ifdef __KERNEL__
117         switch (policy) {
118         case PDL_POLICY_SAME:
119                 idx = cfs_smp_processor_id() % ptlrpcds->pd_nthreads;
120                 break;
121         case PDL_POLICY_LOCAL:
122                 /* Before CPU partition patches available, process it the same
123                  * as "PDL_POLICY_ROUND". */
124 # ifdef CFS_CPU_MODE_NUMA
125 # warning "fix this code to use new CPU partition APIs"
126 # endif
127                 /* Fall through to PDL_POLICY_ROUND until the CPU
128                  * CPU partition patches are available. */
129                 index = -1;
130         case PDL_POLICY_PREFERRED:
131                 if (index >= 0 && index < cfs_num_online_cpus()) {
132                         idx = index % ptlrpcds->pd_nthreads;
133                         break;
134                 }
135                 /* Fall through to PDL_POLICY_ROUND for bad index. */
136         default:
137                 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
138         case PDL_POLICY_ROUND:
139                 /* We do not care whether it is strict load balance. */
140                 idx = ptlrpcds->pd_index + 1;
141                 if (idx == cfs_smp_processor_id())
142                         idx++;
143                 idx %= ptlrpcds->pd_nthreads;
144                 ptlrpcds->pd_index = idx;
145                 break;
146         }
147 #endif /* __KERNEL__ */
148
149         return &ptlrpcds->pd_threads[idx];
150 }
151
152 /**
153  * Move all request from an existing request set to the ptlrpcd queue.
154  * All requests from the set must be in phase RQ_PHASE_NEW.
155  */
156 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
157 {
158         cfs_list_t *tmp, *pos;
159 #ifdef __KERNEL__
160         struct ptlrpcd_ctl *pc;
161         struct ptlrpc_request_set *new;
162         int count, i;
163
164         pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
165         new = pc->pc_set;
166 #endif
167
168         cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
169                 struct ptlrpc_request *req =
170                         cfs_list_entry(pos, struct ptlrpc_request,
171                                        rq_set_chain);
172
173                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
174 #ifdef __KERNEL__
175                 req->rq_set = new;
176                 req->rq_queued_time = cfs_time_current();
177 #else
178                 cfs_list_del_init(&req->rq_set_chain);
179                 req->rq_set = NULL;
180                 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
181                 cfs_atomic_dec(&set->set_remaining);
182 #endif
183         }
184
185 #ifdef __KERNEL__
186         cfs_spin_lock(&new->set_new_req_lock);
187         cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
188         i = cfs_atomic_read(&set->set_remaining);
189         count = cfs_atomic_add_return(i, &new->set_new_count);
190         cfs_atomic_set(&set->set_remaining, 0);
191         cfs_spin_unlock(&new->set_new_req_lock);
192         if (count == i) {
193                 cfs_waitq_signal(&new->set_waitq);
194
195                 /* XXX: It maybe unnecessary to wakeup all the partners. But to
196                  *      guarantee the async RPC can be processed ASAP, we have
197                  *      no other better choice. It maybe fixed in future. */
198                 for (i = 0; i < pc->pc_npartners; i++)
199                         cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
200         }
201 #endif
202 }
203 EXPORT_SYMBOL(ptlrpcd_add_rqset);
204
205 #ifdef __KERNEL__
206 /**
207  * Return transferred RPCs count.
208  */
209 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
210                                struct ptlrpc_request_set *src)
211 {
212         cfs_list_t *tmp, *pos;
213         struct ptlrpc_request *req;
214         int rc = 0;
215
216         cfs_spin_lock(&src->set_new_req_lock);
217         if (likely(!cfs_list_empty(&src->set_new_requests))) {
218                 cfs_list_for_each_safe(pos, tmp, &src->set_new_requests) {
219                         req = cfs_list_entry(pos, struct ptlrpc_request,
220                                              rq_set_chain);
221                         req->rq_set = des;
222                 }
223                 cfs_list_splice_init(&src->set_new_requests,
224                                      &des->set_requests);
225                 rc = cfs_atomic_read(&src->set_new_count);
226                 cfs_atomic_add(rc, &des->set_remaining);
227                 cfs_atomic_set(&src->set_new_count, 0);
228         }
229         cfs_spin_unlock(&src->set_new_req_lock);
230         return rc;
231 }
232 #endif
233
234 /**
235  * Requests that are added to the ptlrpcd queue are sent via
236  * ptlrpcd_check->ptlrpc_check_set().
237  */
238 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
239 {
240         struct ptlrpcd_ctl *pc;
241
242         if (req->rq_reqmsg)
243                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
244
245         cfs_spin_lock(&req->rq_lock);
246         if (req->rq_invalid_rqset) {
247                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
248                                                      back_to_sleep, NULL);
249
250                 req->rq_invalid_rqset = 0;
251                 cfs_spin_unlock(&req->rq_lock);
252                 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
253         } else if (req->rq_set) {
254                 /* If we have a vaid "rq_set", just reuse it to avoid double
255                  * linked. */
256                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
257                 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
258
259                 /* ptlrpc_check_set will decrease the count */
260                 cfs_atomic_inc(&req->rq_set->set_remaining);
261                 cfs_spin_unlock(&req->rq_lock);
262                 cfs_waitq_signal(&req->rq_set->set_waitq);
263                 return;
264         } else {
265                 cfs_spin_unlock(&req->rq_lock);
266         }
267
268         pc = ptlrpcd_select_pc(req, policy, idx);
269
270         DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
271                   req, pc->pc_name, pc->pc_index);
272
273         ptlrpc_set_add_new_req(pc, req);
274 }
275 EXPORT_SYMBOL(ptlrpcd_add_req);
276
277 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
278 {
279         cfs_atomic_inc(&set->set_refcount);
280 }
281
282 /**
283  * Check if there is more work to do on ptlrpcd set.
284  * Returns 1 if yes.
285  */
286 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
287 {
288         cfs_list_t *tmp, *pos;
289         struct ptlrpc_request *req;
290         struct ptlrpc_request_set *set = pc->pc_set;
291         int rc = 0;
292         int rc2;
293         ENTRY;
294
295         if (cfs_atomic_read(&set->set_new_count)) {
296                 cfs_spin_lock(&set->set_new_req_lock);
297                 if (likely(!cfs_list_empty(&set->set_new_requests))) {
298                         cfs_list_splice_init(&set->set_new_requests,
299                                              &set->set_requests);
300                         cfs_atomic_add(cfs_atomic_read(&set->set_new_count),
301                                        &set->set_remaining);
302                         cfs_atomic_set(&set->set_new_count, 0);
303                         /*
304                          * Need to calculate its timeout.
305                          */
306                         rc = 1;
307                 }
308                 cfs_spin_unlock(&set->set_new_req_lock);
309         }
310
311         /* We should call lu_env_refill() before handling new requests to make
312          * sure that env key the requests depending on really exists.
313          */
314         rc2 = lu_env_refill(env);
315         if (rc2 != 0) {
316                 /*
317                  * XXX This is very awkward situation, because
318                  * execution can neither continue (request
319                  * interpreters assume that env is set up), nor repeat
320                  * the loop (as this potentially results in a tight
321                  * loop of -ENOMEM's).
322                  *
323                  * Fortunately, refill only ever does something when
324                  * new modules are loaded, i.e., early during boot up.
325                  */
326                 CERROR("Failure to refill session: %d\n", rc2);
327                 RETURN(rc);
328         }
329
330         if (cfs_atomic_read(&set->set_remaining))
331                 rc |= ptlrpc_check_set(env, set);
332
333         if (!cfs_list_empty(&set->set_requests)) {
334                 /*
335                  * XXX: our set never completes, so we prune the completed
336                  * reqs after each iteration. boy could this be smarter.
337                  */
338                 cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
339                         req = cfs_list_entry(pos, struct ptlrpc_request,
340                                              rq_set_chain);
341                         if (req->rq_phase != RQ_PHASE_COMPLETE)
342                                 continue;
343
344                         cfs_list_del_init(&req->rq_set_chain);
345                         req->rq_set = NULL;
346                         ptlrpc_req_finished(req);
347                 }
348         }
349
350         if (rc == 0) {
351                 /*
352                  * If new requests have been added, make sure to wake up.
353                  */
354                 rc = cfs_atomic_read(&set->set_new_count);
355
356 #ifdef __KERNEL__
357                 /* If we have nothing to do, check whether we can take some
358                  * work from our partner threads. */
359                 if (rc == 0 && pc->pc_npartners > 0) {
360                         struct ptlrpcd_ctl *partner;
361                         struct ptlrpc_request_set *ps;
362                         int first = pc->pc_cursor;
363
364                         do {
365                                 partner = pc->pc_partners[pc->pc_cursor++];
366                                 if (pc->pc_cursor >= pc->pc_npartners)
367                                         pc->pc_cursor = 0;
368                                 if (partner == NULL)
369                                         continue;
370
371                                 cfs_spin_lock(&partner->pc_lock);
372                                 ps = partner->pc_set;
373                                 if (ps == NULL) {
374                                         cfs_spin_unlock(&partner->pc_lock);
375                                         continue;
376                                 }
377
378                                 ptlrpc_reqset_get(ps);
379                                 cfs_spin_unlock(&partner->pc_lock);
380
381                                 if (cfs_atomic_read(&ps->set_new_count)) {
382                                         rc = ptlrpcd_steal_rqset(set, ps);
383                                         if (rc > 0)
384                                                 CDEBUG(D_RPCTRACE, "transfer %d"
385                                                        " async RPCs [%d->%d]\n",
386                                                         rc, partner->pc_index,
387                                                         pc->pc_index);
388                                 }
389                                 ptlrpc_reqset_put(ps);
390                         } while (rc == 0 && pc->pc_cursor != first);
391                 }
392 #endif
393         }
394
395         RETURN(rc);
396 }
397
398 #ifdef __KERNEL__
399 /**
400  * Main ptlrpcd thread.
401  * ptlrpc's code paths like to execute in process context, so we have this
402  * thread which spins on a set which contains the rpcs and sends them.
403  *
404  */
405 static int ptlrpcd(void *arg)
406 {
407         struct ptlrpcd_ctl *pc = arg;
408         struct ptlrpc_request_set *set = pc->pc_set;
409         struct lu_env env = { .le_ses = NULL };
410         int rc, exit = 0;
411         ENTRY;
412
413         cfs_daemonize_ctxt(pc->pc_name);
414 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
415         if (cfs_test_bit(LIOD_BIND, &pc->pc_flags)) {
416                 int index = pc->pc_index;
417
418                 if (index >= 0 && index < cfs_num_possible_cpus()) {
419                         while (!cpu_online(index)) {
420                                 if (++index >= cfs_num_possible_cpus())
421                                         index = 0;
422                         }
423                         cfs_set_cpus_allowed(cfs_current(),
424                                      node_to_cpumask(cpu_to_node(index)));
425                 }
426         }
427 #endif
428         /*
429          * XXX So far only "client" ptlrpcd uses an environment. In
430          * the future, ptlrpcd thread (or a thread-set) has to given
431          * an argument, describing its "scope".
432          */
433         rc = lu_context_init(&env.le_ctx,
434                              LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
435         cfs_complete(&pc->pc_starting);
436
437         if (rc != 0)
438                 RETURN(rc);
439
440         /*
441          * This mainloop strongly resembles ptlrpc_set_wait() except that our
442          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
443          * there are requests in the set. New requests come in on the set's
444          * new_req_list and ptlrpcd_check() moves them into the set.
445          */
446         do {
447                 struct l_wait_info lwi;
448                 int timeout;
449
450                 timeout = ptlrpc_set_next_timeout(set);
451                 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
452                                   ptlrpc_expired_set, set);
453
454                 lu_context_enter(&env.le_ctx);
455                 l_wait_event(set->set_waitq,
456                              ptlrpcd_check(&env, pc), &lwi);
457                 lu_context_exit(&env.le_ctx);
458
459                 /*
460                  * Abort inflight rpcs for forced stop case.
461                  */
462                 if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
463                         if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
464                                 ptlrpc_abort_set(set);
465                         exit++;
466                 }
467
468                 /*
469                  * Let's make one more loop to make sure that ptlrpcd_check()
470                  * copied all raced new rpcs into the set so we can kill them.
471                  */
472         } while (exit < 2);
473
474         /*
475          * Wait for inflight requests to drain.
476          */
477         if (!cfs_list_empty(&set->set_requests))
478                 ptlrpc_set_wait(set);
479         lu_context_fini(&env.le_ctx);
480         cfs_complete(&pc->pc_finishing);
481
482         cfs_clear_bit(LIOD_START, &pc->pc_flags);
483         cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
484         cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
485         cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
486         return 0;
487 }
488
489 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
490  *      ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
491  *      data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
492  *      CPU core. But binding all ptlrpcd threads maybe cause response delay
493  *      because of some CPU core(s) busy with other loads.
494  *
495  *      For example: "ls -l", some async RPCs for statahead are assigned to
496  *      ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
497  *      with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
498  *      thread, statahead thread, and ptlrpcd thread can run in parallel), under
499  *      such case, the statahead async RPCs can not be processed in time, it is
500  *      unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
501  *      be better. But it breaks former data transfer policy.
502  *
503  *      So we shouldn't be blind for avoiding the data transfer. We make some
504  *      compromise: divide the ptlrpcd threds pool into two parts. One part is
505  *      for bound mode, each ptlrpcd thread in this part is bound to some CPU
506  *      core. The other part is for free mode, all the ptlrpcd threads in the
507  *      part can be scheduled on any CPU core. We specify some partnership
508  *      between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
509  *      and the async RPC load within the partners are shared.
510  *
511  *      It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
512  *      thread can be scheduled in time), and try to guarantee the async RPC
513  *      processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
514  *      on any CPU core).
515  *
516  *      As for how to specify the partnership between bound mode ptlrpcd
517  *      thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
518  *      <free bound> pair. In future, we can specify some more complex
519  *      partnership based on the patches for CPU partition. But before such
520  *      patches are available, we prefer to use the simplest one.
521  */
522 # ifdef CFS_CPU_MODE_NUMA
523 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
524 # endif
525 static int ptlrpcd_bind(int index, int max)
526 {
527         struct ptlrpcd_ctl *pc;
528         int rc = 0;
529 #if defined(CONFIG_NUMA) && defined(HAVE_NODE_TO_CPUMASK)
530         struct ptlrpcd_ctl *ppc;
531         int node, i, pidx;
532         cpumask_t mask;
533 #endif
534         ENTRY;
535
536         LASSERT(index <= max - 1);
537         pc = &ptlrpcds->pd_threads[index];
538         switch (ptlrpcd_bind_policy) {
539         case PDB_POLICY_NONE:
540                 pc->pc_npartners = -1;
541                 break;
542         case PDB_POLICY_FULL:
543                 pc->pc_npartners = 0;
544                 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
545                 break;
546         case PDB_POLICY_PAIR:
547                 LASSERT(max % 2 == 0);
548                 pc->pc_npartners = 1;
549                 break;
550         case PDB_POLICY_NEIGHBOR:
551 #if defined(CONFIG_NUMA) && defined(HAVE_NODE_TO_CPUMASK)
552                 node = cpu_to_node(index);
553                 mask = node_to_cpumask(node);
554                 for (i = max; i < cfs_num_online_cpus(); i++)
555                         cpu_clear(i, mask);
556                 pc->pc_npartners = cpus_weight(mask) - 1;
557                 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
558 #else
559                 LASSERT(max >= 3);
560                 pc->pc_npartners = 2;
561 #endif
562                 break;
563         default:
564                 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
565                 rc = -EINVAL;
566         }
567
568         if (rc == 0 && pc->pc_npartners > 0) {
569                 OBD_ALLOC(pc->pc_partners,
570                           sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
571                 if (pc->pc_partners == NULL) {
572                         pc->pc_npartners = 0;
573                         rc = -ENOMEM;
574                 } else {
575                         switch (ptlrpcd_bind_policy) {
576                         case PDB_POLICY_PAIR:
577                                 if (index & 0x1) {
578                                         cfs_set_bit(LIOD_BIND, &pc->pc_flags);
579                                         pc->pc_partners[0] = &ptlrpcds->
580                                                 pd_threads[index - 1];
581                                         ptlrpcds->pd_threads[index - 1].
582                                                 pc_partners[0] = pc;
583                                 }
584                                 break;
585                         case PDB_POLICY_NEIGHBOR:
586 #if defined(CONFIG_NUMA) && defined(HAVE_NODE_TO_CPUMASK)
587                                 /* partners are cores in the same NUMA node.
588                                  * setup partnership only with ptlrpcd threads
589                                  * that are already initialized
590                                  */
591                                 for (pidx = 0, i = 0; i < index; i++) {
592                                         if (cpu_isset(i, mask)) {
593                                                 ppc = &ptlrpcds->pd_threads[i];
594                                                 pc->pc_partners[pidx++] = ppc;
595                                                 ppc->pc_partners[ppc->
596                                                           pc_npartners++] = pc;
597                                         }
598                                 }
599                                 /* adjust number of partners to the number
600                                  * of partnership really setup */
601                                 pc->pc_npartners = pidx;
602 #else
603                                 if (index & 0x1)
604                                         cfs_set_bit(LIOD_BIND, &pc->pc_flags);
605                                 if (index > 0) {
606                                         pc->pc_partners[0] = &ptlrpcds->
607                                                 pd_threads[index - 1];
608                                         ptlrpcds->pd_threads[index - 1].
609                                                 pc_partners[1] = pc;
610                                         if (index == max - 1) {
611                                                 pc->pc_partners[1] =
612                                                 &ptlrpcds->pd_threads[0];
613                                                 ptlrpcds->pd_threads[0].
614                                                 pc_partners[0] = pc;
615                                         }
616                                 }
617 #endif
618                                 break;
619                         }
620                 }
621         }
622
623         RETURN(rc);
624 }
625
626 #else /* !__KERNEL__ */
627
628 /**
629  * In liblustre we do not have separate threads, so this function
630  * is called from time to time all across common code to see
631  * if something needs to be processed on ptlrpcd set.
632  */
633 int ptlrpcd_check_async_rpcs(void *arg)
634 {
635         struct ptlrpcd_ctl *pc = arg;
636         int                 rc = 0;
637
638         /*
639          * Single threaded!!
640          */
641         pc->pc_recurred++;
642
643         if (pc->pc_recurred == 1) {
644                 rc = lu_env_refill(&pc->pc_env);
645                 if (rc == 0) {
646                         lu_context_enter(&pc->pc_env.le_ctx);
647                         rc = ptlrpcd_check(&pc->pc_env, pc);
648                         if (!rc)
649                                 ptlrpc_expired_set(pc->pc_set);
650                         /*
651                          * XXX: send replay requests.
652                          */
653                         if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
654                                 rc = ptlrpcd_check(&pc->pc_env, pc);
655                         lu_context_exit(&pc->pc_env.le_ctx);
656                 }
657         }
658
659         pc->pc_recurred--;
660         return rc;
661 }
662
663 int ptlrpcd_idle(void *arg)
664 {
665         struct ptlrpcd_ctl *pc = arg;
666
667         return (cfs_atomic_read(&pc->pc_set->set_new_count) == 0 &&
668                 cfs_atomic_read(&pc->pc_set->set_remaining) == 0);
669 }
670
671 #endif
672
673 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
674 {
675         int rc;
676         int env = 0;
677         ENTRY;
678
679         /*
680          * Do not allow start second thread for one pc.
681          */
682         if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
683                 CWARN("Starting second thread (%s) for same pc %p\n",
684                        name, pc);
685                 RETURN(0);
686         }
687
688         pc->pc_index = index;
689         cfs_init_completion(&pc->pc_starting);
690         cfs_init_completion(&pc->pc_finishing);
691         cfs_spin_lock_init(&pc->pc_lock);
692         strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
693         pc->pc_set = ptlrpc_prep_set();
694         if (pc->pc_set == NULL)
695                 GOTO(out, rc = -ENOMEM);
696         /*
697          * So far only "client" ptlrpcd uses an environment. In the future,
698          * ptlrpcd thread (or a thread-set) has to be given an argument,
699          * describing its "scope".
700          */
701         rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
702         if (rc != 0)
703                 GOTO(out, rc);
704
705         env = 1;
706 #ifdef __KERNEL__
707         if (index >= 0) {
708                 rc = ptlrpcd_bind(index, max);
709                 if (rc < 0)
710                         GOTO(out, rc);
711         }
712
713         rc = cfs_create_thread(ptlrpcd, pc, 0);
714         if (rc < 0)
715                 GOTO(out, rc);
716
717         rc = 0;
718         cfs_wait_for_completion(&pc->pc_starting);
719 #else
720         pc->pc_wait_callback =
721                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
722                                                  &ptlrpcd_check_async_rpcs, pc);
723         pc->pc_idle_callback =
724                 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
725                                                  &ptlrpcd_idle, pc);
726 #endif
727 out:
728         if (rc) {
729 #ifdef __KERNEL__
730                 if (pc->pc_set != NULL) {
731                         struct ptlrpc_request_set *set = pc->pc_set;
732
733                         cfs_spin_lock(&pc->pc_lock);
734                         pc->pc_set = NULL;
735                         cfs_spin_unlock(&pc->pc_lock);
736                         ptlrpc_set_destroy(set);
737                 }
738                 if (env != 0)
739                         lu_context_fini(&pc->pc_env.le_ctx);
740                 cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
741 #else
742                 SET_BUT_UNUSED(env);
743 #endif
744                 cfs_clear_bit(LIOD_START, &pc->pc_flags);
745         }
746         RETURN(rc);
747 }
748
749 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
750 {
751        struct ptlrpc_request_set *set = pc->pc_set;
752         ENTRY;
753
754         if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
755                 CWARN("Thread for pc %p was not started\n", pc);
756                 goto out;
757         }
758
759         cfs_set_bit(LIOD_STOP, &pc->pc_flags);
760         if (force)
761                 cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
762         cfs_waitq_signal(&pc->pc_set->set_waitq);
763 #ifdef __KERNEL__
764         cfs_wait_for_completion(&pc->pc_finishing);
765 #else
766         liblustre_deregister_wait_callback(pc->pc_wait_callback);
767         liblustre_deregister_idle_callback(pc->pc_idle_callback);
768 #endif
769         lu_context_fini(&pc->pc_env.le_ctx);
770
771         cfs_spin_lock(&pc->pc_lock);
772         pc->pc_set = NULL;
773         cfs_spin_unlock(&pc->pc_lock);
774         ptlrpc_set_destroy(set);
775
776 out:
777 #ifdef __KERNEL__
778         if (pc->pc_npartners > 0) {
779                 LASSERT(pc->pc_partners != NULL);
780
781                 OBD_FREE(pc->pc_partners,
782                          sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
783                 pc->pc_partners = NULL;
784         }
785         pc->pc_npartners = 0;
786 #endif
787         EXIT;
788 }
789
790 static void ptlrpcd_fini(void)
791 {
792         int i;
793         ENTRY;
794
795         if (ptlrpcds != NULL) {
796                 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
797                         ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
798                 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
799                 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
800                 ptlrpcds = NULL;
801         }
802
803         EXIT;
804 }
805
806 static int ptlrpcd_init(void)
807 {
808         int nthreads = cfs_num_online_cpus();
809         char name[16];
810         int size, i = -1, j, rc = 0;
811         ENTRY;
812
813 #ifdef __KERNEL__
814         if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
815                 nthreads = max_ptlrpcds;
816         if (nthreads < 2)
817                 nthreads = 2;
818         if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
819                 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
820         else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
821                 nthreads &= ~1; /* make sure it is even */
822 #else
823         nthreads = 1;
824 #endif
825
826         size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
827         OBD_ALLOC(ptlrpcds, size);
828         if (ptlrpcds == NULL)
829                 GOTO(out, rc = -ENOMEM);
830
831         snprintf(name, 15, "ptlrpcd_rcv");
832         cfs_set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
833         rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
834         if (rc < 0)
835                 GOTO(out, rc);
836
837         /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
838          *      non-recovery async RPC to improve overall async RPC efficiency.
839          *
840          *      But there are some issues with async I/O RPCs and async non-I/O
841          *      RPCs processed in the same set under some cases. The ptlrpcd may
842          *      be blocked by some async I/O RPC(s), then will cause other async
843          *      non-I/O RPC(s) can not be processed in time.
844          *
845          *      Maybe we should distinguish blocked async RPCs from non-blocked
846          *      async RPCs, and process them in different ptlrpcd sets to avoid
847          *      unnecessary dependency. But how to distribute async RPCs load
848          *      among all the ptlrpc daemons becomes another trouble. */
849         for (i = 0; i < nthreads; i++) {
850                 snprintf(name, 15, "ptlrpcd_%d", i);
851                 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
852                 if (rc < 0)
853                         GOTO(out, rc);
854         }
855
856         ptlrpcds->pd_size = size;
857         ptlrpcds->pd_index = 0;
858         ptlrpcds->pd_nthreads = nthreads;
859
860 out:
861         if (rc != 0 && ptlrpcds != NULL) {
862                 for (j = 0; j <= i; j++)
863                         ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
864                 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
865                 OBD_FREE(ptlrpcds, size);
866                 ptlrpcds = NULL;
867         }
868
869         RETURN(0);
870 }
871
872 int ptlrpcd_addref(void)
873 {
874         int rc = 0;
875         ENTRY;
876
877         cfs_mutex_lock(&ptlrpcd_mutex);
878         if (++ptlrpcd_users == 1)
879                 rc = ptlrpcd_init();
880         cfs_mutex_unlock(&ptlrpcd_mutex);
881         RETURN(rc);
882 }
883 EXPORT_SYMBOL(ptlrpcd_addref);
884
885 void ptlrpcd_decref(void)
886 {
887         cfs_mutex_lock(&ptlrpcd_mutex);
888         if (--ptlrpcd_users == 0)
889                 ptlrpcd_fini();
890         cfs_mutex_unlock(&ptlrpcd_mutex);
891 }
892 EXPORT_SYMBOL(ptlrpcd_decref);
893 /** @} ptlrpcd */