Whamcloud - gitweb
LU-4357 libcfs: restore __GFP_WAIT flag to memalloc calls
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #ifndef __KERNEL__
39 #include <liblustre.h>
40 #endif
41 #include <obd_support.h>
42 #include <obd_class.h>
43 #include <lustre_net.h>
44 #include <lu_object.h>
45 #include <lnet/types.h>
46 #include "ptlrpc_internal.h"
47
48 /* The following are visible and mutable through /sys/module/ptlrpc */
49 int test_req_buffer_pressure = 0;
50 CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
51                 "set non-zero to put pressure on request buffer pools");
52 CFS_MODULE_PARM(at_min, "i", int, 0644,
53                 "Adaptive timeout minimum (sec)");
54 CFS_MODULE_PARM(at_max, "i", int, 0644,
55                 "Adaptive timeout maximum (sec)");
56 CFS_MODULE_PARM(at_history, "i", int, 0644,
57                 "Adaptive timeouts remember the slowest event that took place "
58                 "within this period (sec)");
59 CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
60                 "How soon before an RPC deadline to send an early reply");
61 CFS_MODULE_PARM(at_extra, "i", int, 0644,
62                 "How much extra time to give with each early reply");
63
64
65 /* forward ref */
66 static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
67 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
68 static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
69
70 /** Holds a list of all PTLRPC services */
71 CFS_LIST_HEAD(ptlrpc_all_services);
72 /** Used to protect the \e ptlrpc_all_services list */
73 struct mutex ptlrpc_all_services_mutex;
74
75 struct ptlrpc_request_buffer_desc *
76 ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
77 {
78         struct ptlrpc_service             *svc = svcpt->scp_service;
79         struct ptlrpc_request_buffer_desc *rqbd;
80
81         OBD_CPT_ALLOC_PTR(rqbd, svc->srv_cptable, svcpt->scp_cpt);
82         if (rqbd == NULL)
83                 return NULL;
84
85         rqbd->rqbd_svcpt = svcpt;
86         rqbd->rqbd_refcount = 0;
87         rqbd->rqbd_cbid.cbid_fn = request_in_callback;
88         rqbd->rqbd_cbid.cbid_arg = rqbd;
89         CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs);
90         OBD_CPT_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_cptable,
91                             svcpt->scp_cpt, svc->srv_buf_size);
92         if (rqbd->rqbd_buffer == NULL) {
93                 OBD_FREE_PTR(rqbd);
94                 return NULL;
95         }
96
97         spin_lock(&svcpt->scp_lock);
98         cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
99         svcpt->scp_nrqbds_total++;
100         spin_unlock(&svcpt->scp_lock);
101
102         return rqbd;
103 }
104
105 void
106 ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
107 {
108         struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
109
110         LASSERT(rqbd->rqbd_refcount == 0);
111         LASSERT(cfs_list_empty(&rqbd->rqbd_reqs));
112
113         spin_lock(&svcpt->scp_lock);
114         cfs_list_del(&rqbd->rqbd_list);
115         svcpt->scp_nrqbds_total--;
116         spin_unlock(&svcpt->scp_lock);
117
118         OBD_FREE_LARGE(rqbd->rqbd_buffer, svcpt->scp_service->srv_buf_size);
119         OBD_FREE_PTR(rqbd);
120 }
121
122 int
123 ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
124 {
125         struct ptlrpc_service             *svc = svcpt->scp_service;
126         struct ptlrpc_request_buffer_desc *rqbd;
127         int                                rc = 0;
128         int                                i;
129
130         if (svcpt->scp_rqbd_allocating)
131                 goto try_post;
132
133         spin_lock(&svcpt->scp_lock);
134         /* check again with lock */
135         if (svcpt->scp_rqbd_allocating) {
136                 /* NB: we might allow more than one thread in the future */
137                 LASSERT(svcpt->scp_rqbd_allocating == 1);
138                 spin_unlock(&svcpt->scp_lock);
139                 goto try_post;
140         }
141
142         svcpt->scp_rqbd_allocating++;
143         spin_unlock(&svcpt->scp_lock);
144
145
146         for (i = 0; i < svc->srv_nbuf_per_group; i++) {
147                 /* NB: another thread might have recycled enough rqbds, we
148                  * need to make sure it wouldn't over-allocate, see LU-1212. */
149                 if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group)
150                         break;
151
152                 rqbd = ptlrpc_alloc_rqbd(svcpt);
153
154                 if (rqbd == NULL) {
155                         CERROR("%s: Can't allocate request buffer\n",
156                                svc->srv_name);
157                         rc = -ENOMEM;
158                         break;
159                 }
160         }
161
162         spin_lock(&svcpt->scp_lock);
163
164         LASSERT(svcpt->scp_rqbd_allocating == 1);
165         svcpt->scp_rqbd_allocating--;
166
167         spin_unlock(&svcpt->scp_lock);
168
169         CDEBUG(D_RPCTRACE,
170                "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
171                svc->srv_name, i, svc->srv_buf_size, svcpt->scp_nrqbds_posted,
172                svcpt->scp_nrqbds_total, rc);
173
174  try_post:
175         if (post && rc == 0)
176                 rc = ptlrpc_server_post_idle_rqbds(svcpt);
177
178         return rc;
179 }
180
181 /**
182  * Part of Rep-Ack logic.
183  * Puts a lock and its mode into reply state assotiated to request reply.
184  */
185 void
186 ptlrpc_save_lock(struct ptlrpc_request *req,
187                  struct lustre_handle *lock, int mode, int no_ack)
188 {
189         struct ptlrpc_reply_state *rs = req->rq_reply_state;
190         int                        idx;
191
192         LASSERT(rs != NULL);
193         LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
194
195         if (req->rq_export->exp_disconnected) {
196                 ldlm_lock_decref(lock, mode);
197         } else {
198                 idx = rs->rs_nlocks++;
199                 rs->rs_locks[idx] = *lock;
200                 rs->rs_modes[idx] = mode;
201                 rs->rs_difficult = 1;
202                 rs->rs_no_ack = !!no_ack;
203         }
204 }
205 EXPORT_SYMBOL(ptlrpc_save_lock);
206
207 #ifdef __KERNEL__
208
209 struct ptlrpc_hr_partition;
210
211 struct ptlrpc_hr_thread {
212         int                             hrt_id;         /* thread ID */
213         spinlock_t                      hrt_lock;
214         wait_queue_head_t               hrt_waitq;
215         cfs_list_t                      hrt_queue;      /* RS queue */
216         struct ptlrpc_hr_partition      *hrt_partition;
217 };
218
219 struct ptlrpc_hr_partition {
220         /* # of started threads */
221         cfs_atomic_t                    hrp_nstarted;
222         /* # of stopped threads */
223         cfs_atomic_t                    hrp_nstopped;
224         /* cpu partition id */
225         int                             hrp_cpt;
226         /* round-robin rotor for choosing thread */
227         int                             hrp_rotor;
228         /* total number of threads on this partition */
229         int                             hrp_nthrs;
230         /* threads table */
231         struct ptlrpc_hr_thread         *hrp_thrs;
232 };
233
234 #define HRT_RUNNING 0
235 #define HRT_STOPPING 1
236
237 struct ptlrpc_hr_service {
238         /* CPU partition table, it's just cfs_cpt_table for now */
239         struct cfs_cpt_table            *hr_cpt_table;
240         /** controller sleep waitq */
241         wait_queue_head_t               hr_waitq;
242         unsigned int                    hr_stopping;
243         /** roundrobin rotor for non-affinity service */
244         unsigned int                    hr_rotor;
245         /* partition data */
246         struct ptlrpc_hr_partition      **hr_partitions;
247 };
248
249 struct rs_batch {
250         cfs_list_t                      rsb_replies;
251         unsigned int                    rsb_n_replies;
252         struct ptlrpc_service_part      *rsb_svcpt;
253 };
254
255 /** reply handling service. */
256 static struct ptlrpc_hr_service         ptlrpc_hr;
257
258 /**
259  * maximum mumber of replies scheduled in one batch
260  */
261 #define MAX_SCHEDULED 256
262
263 /**
264  * Initialize a reply batch.
265  *
266  * \param b batch
267  */
268 static void rs_batch_init(struct rs_batch *b)
269 {
270         memset(b, 0, sizeof *b);
271         CFS_INIT_LIST_HEAD(&b->rsb_replies);
272 }
273
274 /**
275  * Choose an hr thread to dispatch requests to.
276  */
277 static struct ptlrpc_hr_thread *
278 ptlrpc_hr_select(struct ptlrpc_service_part *svcpt)
279 {
280         struct ptlrpc_hr_partition      *hrp;
281         unsigned int                    rotor;
282
283         if (svcpt->scp_cpt >= 0 &&
284             svcpt->scp_service->srv_cptable == ptlrpc_hr.hr_cpt_table) {
285                 /* directly match partition */
286                 hrp = ptlrpc_hr.hr_partitions[svcpt->scp_cpt];
287
288         } else {
289                 rotor = ptlrpc_hr.hr_rotor++;
290                 rotor %= cfs_cpt_number(ptlrpc_hr.hr_cpt_table);
291
292                 hrp = ptlrpc_hr.hr_partitions[rotor];
293         }
294
295         rotor = hrp->hrp_rotor++;
296         return &hrp->hrp_thrs[rotor % hrp->hrp_nthrs];
297 }
298
299 /**
300  * Dispatch all replies accumulated in the batch to one from
301  * dedicated reply handling threads.
302  *
303  * \param b batch
304  */
305 static void rs_batch_dispatch(struct rs_batch *b)
306 {
307         if (b->rsb_n_replies != 0) {
308                 struct ptlrpc_hr_thread *hrt;
309
310                 hrt = ptlrpc_hr_select(b->rsb_svcpt);
311
312                 spin_lock(&hrt->hrt_lock);
313                 cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
314                 spin_unlock(&hrt->hrt_lock);
315
316                 wake_up(&hrt->hrt_waitq);
317                 b->rsb_n_replies = 0;
318         }
319 }
320
321 /**
322  * Add a reply to a batch.
323  * Add one reply object to a batch, schedule batched replies if overload.
324  *
325  * \param b batch
326  * \param rs reply
327  */
328 static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
329 {
330         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
331
332         if (svcpt != b->rsb_svcpt || b->rsb_n_replies >= MAX_SCHEDULED) {
333                 if (b->rsb_svcpt != NULL) {
334                         rs_batch_dispatch(b);
335                         spin_unlock(&b->rsb_svcpt->scp_rep_lock);
336                 }
337                 spin_lock(&svcpt->scp_rep_lock);
338                 b->rsb_svcpt = svcpt;
339         }
340         spin_lock(&rs->rs_lock);
341         rs->rs_scheduled_ever = 1;
342         if (rs->rs_scheduled == 0) {
343                 cfs_list_move(&rs->rs_list, &b->rsb_replies);
344                 rs->rs_scheduled = 1;
345                 b->rsb_n_replies++;
346         }
347         rs->rs_committed = 1;
348         spin_unlock(&rs->rs_lock);
349 }
350
351 /**
352  * Reply batch finalization.
353  * Dispatch remaining replies from the batch
354  * and release remaining spinlock.
355  *
356  * \param b batch
357  */
358 static void rs_batch_fini(struct rs_batch *b)
359 {
360         if (b->rsb_svcpt != NULL) {
361                 rs_batch_dispatch(b);
362                 spin_unlock(&b->rsb_svcpt->scp_rep_lock);
363         }
364 }
365
366 #define DECLARE_RS_BATCH(b)     struct rs_batch b
367
368 #else /* __KERNEL__ */
369
370 #define rs_batch_init(b)        do{}while(0)
371 #define rs_batch_fini(b)        do{}while(0)
372 #define rs_batch_add(b, r)      ptlrpc_schedule_difficult_reply(r)
373 #define DECLARE_RS_BATCH(b)
374
375 #endif /* __KERNEL__ */
376
377 /**
378  * Put reply state into a queue for processing because we received
379  * ACK from the client
380  */
381 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
382 {
383 #ifdef __KERNEL__
384         struct ptlrpc_hr_thread *hrt;
385         ENTRY;
386
387         LASSERT(cfs_list_empty(&rs->rs_list));
388
389         hrt = ptlrpc_hr_select(rs->rs_svcpt);
390
391         spin_lock(&hrt->hrt_lock);
392         cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
393         spin_unlock(&hrt->hrt_lock);
394
395         wake_up(&hrt->hrt_waitq);
396         EXIT;
397 #else
398         cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
399 #endif
400 }
401
402 void
403 ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
404 {
405         ENTRY;
406
407         LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
408         LASSERT(spin_is_locked(&rs->rs_lock));
409         LASSERT (rs->rs_difficult);
410         rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
411
412         if (rs->rs_scheduled) {     /* being set up or already notified */
413                 EXIT;
414                 return;
415         }
416
417         rs->rs_scheduled = 1;
418         cfs_list_del_init(&rs->rs_list);
419         ptlrpc_dispatch_difficult_reply(rs);
420         EXIT;
421 }
422 EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
423
424 void ptlrpc_commit_replies(struct obd_export *exp)
425 {
426         struct ptlrpc_reply_state *rs, *nxt;
427         DECLARE_RS_BATCH(batch);
428         ENTRY;
429
430         rs_batch_init(&batch);
431         /* Find any replies that have been committed and get their service
432          * to attend to complete them. */
433
434         /* CAVEAT EMPTOR: spinlock ordering!!! */
435         spin_lock(&exp->exp_uncommitted_replies_lock);
436         cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
437                                      rs_obd_list) {
438                 LASSERT (rs->rs_difficult);
439                 /* VBR: per-export last_committed */
440                 LASSERT(rs->rs_export);
441                 if (rs->rs_transno <= exp->exp_last_committed) {
442                         cfs_list_del_init(&rs->rs_obd_list);
443                         rs_batch_add(&batch, rs);
444                 }
445         }
446         spin_unlock(&exp->exp_uncommitted_replies_lock);
447         rs_batch_fini(&batch);
448         EXIT;
449 }
450 EXPORT_SYMBOL(ptlrpc_commit_replies);
451
452 static int
453 ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
454 {
455         struct ptlrpc_request_buffer_desc *rqbd;
456         int                               rc;
457         int                               posted = 0;
458
459         for (;;) {
460                 spin_lock(&svcpt->scp_lock);
461
462                 if (cfs_list_empty(&svcpt->scp_rqbd_idle)) {
463                         spin_unlock(&svcpt->scp_lock);
464                         return posted;
465                 }
466
467                 rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next,
468                                       struct ptlrpc_request_buffer_desc,
469                                       rqbd_list);
470                 cfs_list_del(&rqbd->rqbd_list);
471
472                 /* assume we will post successfully */
473                 svcpt->scp_nrqbds_posted++;
474                 cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
475
476                 spin_unlock(&svcpt->scp_lock);
477
478                 rc = ptlrpc_register_rqbd(rqbd);
479                 if (rc != 0)
480                         break;
481
482                 posted = 1;
483         }
484
485         spin_lock(&svcpt->scp_lock);
486
487         svcpt->scp_nrqbds_posted--;
488         cfs_list_del(&rqbd->rqbd_list);
489         cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
490
491         /* Don't complain if no request buffers are posted right now; LNET
492          * won't drop requests because we set the portal lazy! */
493
494         spin_unlock(&svcpt->scp_lock);
495
496         return -1;
497 }
498
499 static void ptlrpc_at_timer(unsigned long castmeharder)
500 {
501         struct ptlrpc_service_part *svcpt;
502
503         svcpt = (struct ptlrpc_service_part *)castmeharder;
504
505         svcpt->scp_at_check = 1;
506         svcpt->scp_at_checktime = cfs_time_current();
507         wake_up(&svcpt->scp_waitq);
508 }
509
510 static void
511 ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
512                              struct ptlrpc_service_conf *conf)
513 {
514 #ifdef __KERNEL__
515         struct ptlrpc_service_thr_conf  *tc = &conf->psc_thr;
516         unsigned                        init;
517         unsigned                        total;
518         unsigned                        nthrs;
519         int                             weight;
520
521         /*
522          * Common code for estimating & validating threads number.
523          * CPT affinity service could have percpt thread-pool instead
524          * of a global thread-pool, which means user might not always
525          * get the threads number they give it in conf::tc_nthrs_user
526          * even they did set. It's because we need to validate threads
527          * number for each CPT to guarantee each pool will have enough
528          * threads to keep the service healthy.
529          */
530         init = PTLRPC_NTHRS_INIT + (svc->srv_ops.so_hpreq_handler != NULL);
531         init = max_t(int, init, tc->tc_nthrs_init);
532
533         /* NB: please see comments in lustre_lnet.h for definition
534          * details of these members */
535         LASSERT(tc->tc_nthrs_max != 0);
536
537         if (tc->tc_nthrs_user != 0) {
538                 /* In case there is a reason to test a service with many
539                  * threads, we give a less strict check here, it can
540                  * be up to 8 * nthrs_max */
541                 total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user);
542                 nthrs = total / svc->srv_ncpts;
543                 init  = max(init, nthrs);
544                 goto out;
545         }
546
547         total = tc->tc_nthrs_max;
548         if (tc->tc_nthrs_base == 0) {
549                 /* don't care about base threads number per partition,
550                  * this is most for non-affinity service */
551                 nthrs = total / svc->srv_ncpts;
552                 goto out;
553         }
554
555         nthrs = tc->tc_nthrs_base;
556         if (svc->srv_ncpts == 1) {
557                 int     i;
558
559                 /* NB: Increase the base number if it's single partition
560                  * and total number of cores/HTs is larger or equal to 4.
561                  * result will always < 2 * nthrs_base */
562                 weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY);
563                 for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */
564                             (tc->tc_nthrs_base >> i) != 0; i++)
565                         nthrs += tc->tc_nthrs_base >> i;
566         }
567
568         if (tc->tc_thr_factor != 0) {
569                 int       factor = tc->tc_thr_factor;
570                 const int fade = 4;
571
572                 /*
573                  * User wants to increase number of threads with for
574                  * each CPU core/HT, most likely the factor is larger then
575                  * one thread/core because service threads are supposed to
576                  * be blocked by lock or wait for IO.
577                  */
578                 /*
579                  * Amdahl's law says that adding processors wouldn't give
580                  * a linear increasing of parallelism, so it's nonsense to
581                  * have too many threads no matter how many cores/HTs
582                  * there are.
583                  */
584                 if (cfs_cpu_ht_nsiblings(0) > 1) { /* weight is # of HTs */
585                         /* depress thread factor for hyper-thread */
586                         factor = factor - (factor >> 1) + (factor >> 3);
587                 }
588
589                 weight = cfs_cpt_weight(svc->srv_cptable, 0);
590                 LASSERT(weight > 0);
591
592                 for (; factor > 0 && weight > 0; factor--, weight -= fade)
593                         nthrs += min(weight, fade) * factor;
594         }
595
596         if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
597                 nthrs = max(tc->tc_nthrs_base,
598                             tc->tc_nthrs_max / svc->srv_ncpts);
599         }
600  out:
601         nthrs = max(nthrs, tc->tc_nthrs_init);
602         svc->srv_nthrs_cpt_limit = nthrs;
603         svc->srv_nthrs_cpt_init = init;
604
605         if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
606                 CDEBUG(D_OTHER, "%s: This service may have more threads (%d) "
607                        "than the given soft limit (%d)\n",
608                        svc->srv_name, nthrs * svc->srv_ncpts,
609                        tc->tc_nthrs_max);
610         }
611 #endif
612 }
613
614 /**
615  * Initialize percpt data for a service
616  */
617 static int
618 ptlrpc_service_part_init(struct ptlrpc_service *svc,
619                          struct ptlrpc_service_part *svcpt, int cpt)
620 {
621         struct ptlrpc_at_array  *array;
622         int                     size;
623         int                     index;
624         int                     rc;
625
626         svcpt->scp_cpt = cpt;
627         CFS_INIT_LIST_HEAD(&svcpt->scp_threads);
628
629         /* rqbd and incoming request queue */
630         spin_lock_init(&svcpt->scp_lock);
631         CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
632         CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
633         CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
634         init_waitqueue_head(&svcpt->scp_waitq);
635         /* history request & rqbd list */
636         CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
637         CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
638
639         /* acitve requests and hp requests */
640         spin_lock_init(&svcpt->scp_req_lock);
641
642         /* reply states */
643         spin_lock_init(&svcpt->scp_rep_lock);
644         CFS_INIT_LIST_HEAD(&svcpt->scp_rep_active);
645 #ifndef __KERNEL__
646         CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
647 #endif
648         CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
649         init_waitqueue_head(&svcpt->scp_rep_waitq);
650         cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
651
652         /* adaptive timeout */
653         spin_lock_init(&svcpt->scp_at_lock);
654         array = &svcpt->scp_at_array;
655
656         size = at_est2timeout(at_max);
657         array->paa_size     = size;
658         array->paa_count    = 0;
659         array->paa_deadline = -1;
660
661         /* allocate memory for scp_at_array (ptlrpc_at_array) */
662         OBD_CPT_ALLOC(array->paa_reqs_array,
663                       svc->srv_cptable, cpt, sizeof(cfs_list_t) * size);
664         if (array->paa_reqs_array == NULL)
665                 return -ENOMEM;
666
667         for (index = 0; index < size; index++)
668                 CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
669
670         OBD_CPT_ALLOC(array->paa_reqs_count,
671                       svc->srv_cptable, cpt, sizeof(__u32) * size);
672         if (array->paa_reqs_count == NULL)
673                 goto failed;
674
675         cfs_timer_init(&svcpt->scp_at_timer, ptlrpc_at_timer, svcpt);
676         /* At SOW, service time should be quick; 10s seems generous. If client
677          * timeout is less than this, we'll be sending an early reply. */
678         at_init(&svcpt->scp_at_estimate, 10, 0);
679
680         /* assign this before call ptlrpc_grow_req_bufs */
681         svcpt->scp_service = svc;
682         /* Now allocate the request buffers, but don't post them now */
683         rc = ptlrpc_grow_req_bufs(svcpt, 0);
684         /* We shouldn't be under memory pressure at startup, so
685          * fail if we can't allocate all our buffers at this time. */
686         if (rc != 0)
687                 goto failed;
688
689         return 0;
690
691  failed:
692         if (array->paa_reqs_count != NULL) {
693                 OBD_FREE(array->paa_reqs_count, sizeof(__u32) * size);
694                 array->paa_reqs_count = NULL;
695         }
696
697         if (array->paa_reqs_array != NULL) {
698                 OBD_FREE(array->paa_reqs_array,
699                          sizeof(cfs_list_t) * array->paa_size);
700                 array->paa_reqs_array = NULL;
701         }
702
703         return -ENOMEM;
704 }
705
706 /**
707  * Initialize service on a given portal.
708  * This includes starting serving threads , allocating and posting rqbds and
709  * so on.
710  */
711 struct ptlrpc_service *
712 ptlrpc_register_service(struct ptlrpc_service_conf *conf,
713                         struct proc_dir_entry *proc_entry)
714 {
715         struct ptlrpc_service_cpt_conf  *cconf = &conf->psc_cpt;
716         struct ptlrpc_service           *service;
717         struct ptlrpc_service_part      *svcpt;
718         struct cfs_cpt_table            *cptable;
719         __u32                           *cpts = NULL;
720         int                             ncpts;
721         int                             cpt;
722         int                             rc;
723         int                             i;
724         ENTRY;
725
726         LASSERT(conf->psc_buf.bc_nbufs > 0);
727         LASSERT(conf->psc_buf.bc_buf_size >=
728                 conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD);
729         LASSERT(conf->psc_thr.tc_ctx_tags != 0);
730
731         cptable = cconf->cc_cptable;
732         if (cptable == NULL)
733                 cptable = cfs_cpt_table;
734
735         if (!conf->psc_thr.tc_cpu_affinity) {
736                 ncpts = 1;
737         } else {
738                 ncpts = cfs_cpt_number(cptable);
739                 if (cconf->cc_pattern != NULL) {
740                         struct cfs_expr_list    *el;
741
742                         rc = cfs_expr_list_parse(cconf->cc_pattern,
743                                                  strlen(cconf->cc_pattern),
744                                                  0, ncpts - 1, &el);
745                         if (rc != 0) {
746                                 CERROR("%s: invalid CPT pattern string: %s",
747                                        conf->psc_name, cconf->cc_pattern);
748                                 RETURN(ERR_PTR(-EINVAL));
749                         }
750
751                         rc = cfs_expr_list_values(el, ncpts, &cpts);
752                         cfs_expr_list_free(el);
753                         if (rc <= 0) {
754                                 CERROR("%s: failed to parse CPT array %s: %d\n",
755                                        conf->psc_name, cconf->cc_pattern, rc);
756                                 if (cpts != NULL)
757                                         OBD_FREE(cpts, sizeof(*cpts) * ncpts);
758                                 RETURN(ERR_PTR(rc < 0 ? rc : -EINVAL));
759                         }
760                         ncpts = rc;
761                 }
762         }
763
764         OBD_ALLOC(service, offsetof(struct ptlrpc_service, srv_parts[ncpts]));
765         if (service == NULL) {
766                 if (cpts != NULL)
767                         OBD_FREE(cpts, sizeof(*cpts) * ncpts);
768                 RETURN(ERR_PTR(-ENOMEM));
769         }
770
771         service->srv_cptable            = cptable;
772         service->srv_cpts               = cpts;
773         service->srv_ncpts              = ncpts;
774
775         service->srv_cpt_bits = 0; /* it's zero already, easy to read... */
776         while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable))
777                 service->srv_cpt_bits++;
778
779         /* public members */
780         spin_lock_init(&service->srv_lock);
781         service->srv_name               = conf->psc_name;
782         service->srv_watchdog_factor    = conf->psc_watchdog_factor;
783         CFS_INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
784
785         /* buffer configuration */
786         service->srv_nbuf_per_group     = test_req_buffer_pressure ?
787                                           1 : conf->psc_buf.bc_nbufs;
788         service->srv_max_req_size       = conf->psc_buf.bc_req_max_size +
789                                           SPTLRPC_MAX_PAYLOAD;
790         service->srv_buf_size           = conf->psc_buf.bc_buf_size;
791         service->srv_rep_portal         = conf->psc_buf.bc_rep_portal;
792         service->srv_req_portal         = conf->psc_buf.bc_req_portal;
793
794         /* Increase max reply size to next power of two */
795         service->srv_max_reply_size = 1;
796         while (service->srv_max_reply_size <
797                conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD)
798                 service->srv_max_reply_size <<= 1;
799
800         service->srv_thread_name        = conf->psc_thr.tc_thr_name;
801         service->srv_ctx_tags           = conf->psc_thr.tc_ctx_tags;
802         service->srv_hpreq_ratio        = PTLRPC_SVC_HP_RATIO;
803         service->srv_ops                = conf->psc_ops;
804
805         for (i = 0; i < ncpts; i++) {
806                 if (!conf->psc_thr.tc_cpu_affinity)
807                         cpt = CFS_CPT_ANY;
808                 else
809                         cpt = cpts != NULL ? cpts[i] : i;
810
811                 OBD_CPT_ALLOC(svcpt, cptable, cpt, sizeof(*svcpt));
812                 if (svcpt == NULL)
813                         GOTO(failed, rc = -ENOMEM);
814
815                 service->srv_parts[i] = svcpt;
816                 rc = ptlrpc_service_part_init(service, svcpt, cpt);
817                 if (rc != 0)
818                         GOTO(failed, rc);
819         }
820
821         ptlrpc_server_nthreads_check(service, conf);
822
823         rc = LNetSetLazyPortal(service->srv_req_portal);
824         LASSERT(rc == 0);
825
826         mutex_lock(&ptlrpc_all_services_mutex);
827         cfs_list_add (&service->srv_list, &ptlrpc_all_services);
828         mutex_unlock(&ptlrpc_all_services_mutex);
829
830         if (proc_entry != NULL)
831                 ptlrpc_lprocfs_register_service(proc_entry, service);
832
833         rc = ptlrpc_service_nrs_setup(service);
834         if (rc != 0)
835                 GOTO(failed, rc);
836
837         CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
838                service->srv_name, service->srv_req_portal);
839
840 #ifdef __KERNEL__
841         rc = ptlrpc_start_threads(service);
842         if (rc != 0) {
843                 CERROR("Failed to start threads for service %s: %d\n",
844                        service->srv_name, rc);
845                 GOTO(failed, rc);
846         }
847 #endif
848
849         RETURN(service);
850 failed:
851         ptlrpc_unregister_service(service);
852         RETURN(ERR_PTR(rc));
853 }
854 EXPORT_SYMBOL(ptlrpc_register_service);
855
856 /**
857  * to actually free the request, must be called without holding svc_lock.
858  * note it's caller's responsibility to unlink req->rq_list.
859  */
860 static void ptlrpc_server_free_request(struct ptlrpc_request *req)
861 {
862         LASSERT(cfs_atomic_read(&req->rq_refcount) == 0);
863         LASSERT(cfs_list_empty(&req->rq_timed_list));
864
865          /* DEBUG_REQ() assumes the reply state of a request with a valid
866           * ref will not be destroyed until that reference is dropped. */
867         ptlrpc_req_drop_rs(req);
868
869         sptlrpc_svc_ctx_decref(req);
870
871         if (req != &req->rq_rqbd->rqbd_req) {
872                 /* NB request buffers use an embedded
873                  * req if the incoming req unlinked the
874                  * MD; this isn't one of them! */
875                 ptlrpc_request_cache_free(req);
876         }
877 }
878
879 /**
880  * drop a reference count of the request. if it reaches 0, we either
881  * put it into history list, or free it immediately.
882  */
883 void ptlrpc_server_drop_request(struct ptlrpc_request *req)
884 {
885         struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
886         struct ptlrpc_service_part        *svcpt = rqbd->rqbd_svcpt;
887         struct ptlrpc_service             *svc = svcpt->scp_service;
888         int                                refcount;
889         cfs_list_t                        *tmp;
890         cfs_list_t                        *nxt;
891
892         if (!cfs_atomic_dec_and_test(&req->rq_refcount))
893                 return;
894
895         if (req->rq_session.lc_state == LCS_ENTERED) {
896                 lu_context_exit(&req->rq_session);
897                 lu_context_fini(&req->rq_session);
898         }
899
900         if (req->rq_at_linked) {
901                 spin_lock(&svcpt->scp_at_lock);
902                 /* recheck with lock, in case it's unlinked by
903                  * ptlrpc_at_check_timed() */
904                 if (likely(req->rq_at_linked))
905                         ptlrpc_at_remove_timed(req);
906                 spin_unlock(&svcpt->scp_at_lock);
907         }
908
909         LASSERT(cfs_list_empty(&req->rq_timed_list));
910
911         /* finalize request */
912         if (req->rq_export) {
913                 class_export_put(req->rq_export);
914                 req->rq_export = NULL;
915         }
916
917         spin_lock(&svcpt->scp_lock);
918
919         cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
920
921         refcount = --(rqbd->rqbd_refcount);
922         if (refcount == 0) {
923                 /* request buffer is now idle: add to history */
924                 cfs_list_del(&rqbd->rqbd_list);
925
926                 cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
927                 svcpt->scp_hist_nrqbds++;
928
929                 /* cull some history?
930                  * I expect only about 1 or 2 rqbds need to be recycled here */
931                 while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
932                         rqbd = cfs_list_entry(svcpt->scp_hist_rqbds.next,
933                                               struct ptlrpc_request_buffer_desc,
934                                               rqbd_list);
935
936                         cfs_list_del(&rqbd->rqbd_list);
937                         svcpt->scp_hist_nrqbds--;
938
939                         /* remove rqbd's reqs from svc's req history while
940                          * I've got the service lock */
941                         cfs_list_for_each(tmp, &rqbd->rqbd_reqs) {
942                                 req = cfs_list_entry(tmp, struct ptlrpc_request,
943                                                      rq_list);
944                                 /* Track the highest culled req seq */
945                                 if (req->rq_history_seq >
946                                     svcpt->scp_hist_seq_culled) {
947                                         svcpt->scp_hist_seq_culled =
948                                                 req->rq_history_seq;
949                                 }
950                                 cfs_list_del(&req->rq_history_list);
951                         }
952
953                         spin_unlock(&svcpt->scp_lock);
954
955                         cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
956                                 req = cfs_list_entry(rqbd->rqbd_reqs.next,
957                                                      struct ptlrpc_request,
958                                                      rq_list);
959                                 cfs_list_del(&req->rq_list);
960                                 ptlrpc_server_free_request(req);
961                         }
962
963                         spin_lock(&svcpt->scp_lock);
964                         /*
965                          * now all reqs including the embedded req has been
966                          * disposed, schedule request buffer for re-use.
967                          */
968                         LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) ==
969                                 0);
970                         cfs_list_add_tail(&rqbd->rqbd_list,
971                                           &svcpt->scp_rqbd_idle);
972                 }
973
974                 spin_unlock(&svcpt->scp_lock);
975         } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
976                 /* If we are low on memory, we are not interested in history */
977                 cfs_list_del(&req->rq_list);
978                 cfs_list_del_init(&req->rq_history_list);
979
980                 /* Track the highest culled req seq */
981                 if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
982                         svcpt->scp_hist_seq_culled = req->rq_history_seq;
983
984                 spin_unlock(&svcpt->scp_lock);
985
986                 ptlrpc_server_free_request(req);
987         } else {
988                 spin_unlock(&svcpt->scp_lock);
989         }
990 }
991
992 /** Change request export and move hp request from old export to new */
993 void ptlrpc_request_change_export(struct ptlrpc_request *req,
994                                   struct obd_export *export)
995 {
996         if (req->rq_export != NULL) {
997                 LASSERT(!list_empty(&req->rq_exp_list));
998                 /* remove rq_exp_list from last export */
999                 spin_lock_bh(&req->rq_export->exp_rpc_lock);
1000                 list_del_init(&req->rq_exp_list);
1001                 spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1002                 /* export has one reference already, so it`s safe to
1003                  * add req to export queue here and get another
1004                  * reference for request later */
1005                 spin_lock_bh(&export->exp_rpc_lock);
1006                 if (req->rq_ops != NULL) /* hp request */
1007                         list_add(&req->rq_exp_list, &export->exp_hp_rpcs);
1008                 else
1009                         list_add(&req->rq_exp_list, &export->exp_reg_rpcs);
1010                 spin_unlock_bh(&export->exp_rpc_lock);
1011
1012                 class_export_rpc_dec(req->rq_export);
1013                 class_export_put(req->rq_export);
1014         }
1015
1016         /* request takes one export refcount */
1017         req->rq_export = class_export_get(export);
1018         class_export_rpc_inc(export);
1019
1020         return;
1021 }
1022
1023 /**
1024  * to finish a request: stop sending more early replies, and release
1025  * the request.
1026  */
1027 static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
1028                                          struct ptlrpc_request *req)
1029 {
1030         ptlrpc_server_hpreq_fini(req);
1031
1032         ptlrpc_server_drop_request(req);
1033 }
1034
1035 /**
1036  * to finish a active request: stop sending more early replies, and release
1037  * the request. should be called after we finished handling the request.
1038  */
1039 static void ptlrpc_server_finish_active_request(
1040                                         struct ptlrpc_service_part *svcpt,
1041                                         struct ptlrpc_request *req)
1042 {
1043         spin_lock(&svcpt->scp_req_lock);
1044         ptlrpc_nrs_req_stop_nolock(req);
1045         svcpt->scp_nreqs_active--;
1046         if (req->rq_hp)
1047                 svcpt->scp_nhreqs_active--;
1048         spin_unlock(&svcpt->scp_req_lock);
1049
1050         ptlrpc_nrs_req_finalize(req);
1051
1052         if (req->rq_export != NULL)
1053                 class_export_rpc_dec(req->rq_export);
1054
1055         ptlrpc_server_finish_request(svcpt, req);
1056 }
1057
1058 /**
1059  * This function makes sure dead exports are evicted in a timely manner.
1060  * This function is only called when some export receives a message (i.e.,
1061  * the network is up.)
1062  */
1063 static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
1064 {
1065         struct obd_export *oldest_exp;
1066         time_t oldest_time, new_time;
1067
1068         ENTRY;
1069
1070         LASSERT(exp);
1071
1072         /* Compensate for slow machines, etc, by faking our request time
1073            into the future.  Although this can break the strict time-ordering
1074            of the list, we can be really lazy here - we don't have to evict
1075            at the exact right moment.  Eventually, all silent exports
1076            will make it to the top of the list. */
1077
1078         /* Do not pay attention on 1sec or smaller renewals. */
1079         new_time = cfs_time_current_sec() + extra_delay;
1080         if (exp->exp_last_request_time + 1 /*second */ >= new_time)
1081                 RETURN_EXIT;
1082
1083         exp->exp_last_request_time = new_time;
1084
1085         /* exports may get disconnected from the chain even though the
1086            export has references, so we must keep the spin lock while
1087            manipulating the lists */
1088         spin_lock(&exp->exp_obd->obd_dev_lock);
1089
1090         if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
1091                 /* this one is not timed */
1092                 spin_unlock(&exp->exp_obd->obd_dev_lock);
1093                 RETURN_EXIT;
1094         }
1095
1096         cfs_list_move_tail(&exp->exp_obd_chain_timed,
1097                            &exp->exp_obd->obd_exports_timed);
1098
1099         oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
1100                                     struct obd_export, exp_obd_chain_timed);
1101         oldest_time = oldest_exp->exp_last_request_time;
1102         spin_unlock(&exp->exp_obd->obd_dev_lock);
1103
1104         if (exp->exp_obd->obd_recovering) {
1105                 /* be nice to everyone during recovery */
1106                 EXIT;
1107                 return;
1108         }
1109
1110         /* Note - racing to start/reset the obd_eviction timer is safe */
1111         if (exp->exp_obd->obd_eviction_timer == 0) {
1112                 /* Check if the oldest entry is expired. */
1113                 if (cfs_time_current_sec() > (oldest_time + PING_EVICT_TIMEOUT +
1114                                               extra_delay)) {
1115                         /* We need a second timer, in case the net was down and
1116                          * it just came back. Since the pinger may skip every
1117                          * other PING_INTERVAL (see note in ptlrpc_pinger_main),
1118                          * we better wait for 3. */
1119                         exp->exp_obd->obd_eviction_timer =
1120                                 cfs_time_current_sec() + 3 * PING_INTERVAL;
1121                         CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
1122                                exp->exp_obd->obd_name, 
1123                                obd_export_nid2str(oldest_exp), oldest_time);
1124                 }
1125         } else {
1126                 if (cfs_time_current_sec() >
1127                     (exp->exp_obd->obd_eviction_timer + extra_delay)) {
1128                         /* The evictor won't evict anyone who we've heard from
1129                          * recently, so we don't have to check before we start
1130                          * it. */
1131                         if (!ping_evictor_wake(exp))
1132                                 exp->exp_obd->obd_eviction_timer = 0;
1133                 }
1134         }
1135
1136         EXIT;
1137 }
1138
1139 /**
1140  * Sanity check request \a req.
1141  * Return 0 if all is ok, error code otherwise.
1142  */
1143 static int ptlrpc_check_req(struct ptlrpc_request *req)
1144 {
1145         int rc = 0;
1146
1147         if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
1148                      req->rq_export->exp_conn_cnt)) {
1149                 DEBUG_REQ(D_RPCTRACE, req,
1150                           "DROPPING req from old connection %d < %d",
1151                           lustre_msg_get_conn_cnt(req->rq_reqmsg),
1152                           req->rq_export->exp_conn_cnt);
1153                 return -EEXIST;
1154         }
1155         if (unlikely(req->rq_export->exp_obd &&
1156                      req->rq_export->exp_obd->obd_fail)) {
1157              /* Failing over, don't handle any more reqs, send
1158                 error response instead. */
1159                 CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
1160                        req, req->rq_export->exp_obd->obd_name);
1161                 rc = -ENODEV;
1162         } else if (lustre_msg_get_flags(req->rq_reqmsg) &
1163                    (MSG_REPLAY | MSG_REQ_REPLAY_DONE) &&
1164                    !(req->rq_export->exp_obd->obd_recovering)) {
1165                         DEBUG_REQ(D_ERROR, req,
1166                                   "Invalid replay without recovery");
1167                         class_fail_export(req->rq_export);
1168                         rc = -ENODEV;
1169         } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 &&
1170                    !(req->rq_export->exp_obd->obd_recovering)) {
1171                         DEBUG_REQ(D_ERROR, req, "Invalid req with transno "
1172                                   LPU64" without recovery",
1173                                   lustre_msg_get_transno(req->rq_reqmsg));
1174                         class_fail_export(req->rq_export);
1175                         rc = -ENODEV;
1176         }
1177
1178         if (unlikely(rc < 0)) {
1179                 req->rq_status = rc;
1180                 ptlrpc_error(req);
1181         }
1182         return rc;
1183 }
1184
1185 static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
1186 {
1187         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1188         __s32 next;
1189
1190         if (array->paa_count == 0) {
1191                 cfs_timer_disarm(&svcpt->scp_at_timer);
1192                 return;
1193         }
1194
1195         /* Set timer for closest deadline */
1196         next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
1197                        at_early_margin);
1198         if (next <= 0) {
1199                 ptlrpc_at_timer((unsigned long)svcpt);
1200         } else {
1201                 cfs_timer_arm(&svcpt->scp_at_timer, cfs_time_shift(next));
1202                 CDEBUG(D_INFO, "armed %s at %+ds\n",
1203                        svcpt->scp_service->srv_name, next);
1204         }
1205 }
1206
1207 /* Add rpc to early reply check list */
1208 static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
1209 {
1210         struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
1211         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1212         struct ptlrpc_request *rq = NULL;
1213         __u32 index;
1214
1215         if (AT_OFF)
1216                 return(0);
1217
1218         if (req->rq_no_reply)
1219                 return 0;
1220
1221         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
1222                 return(-ENOSYS);
1223
1224         spin_lock(&svcpt->scp_at_lock);
1225         LASSERT(cfs_list_empty(&req->rq_timed_list));
1226
1227         index = (unsigned long)req->rq_deadline % array->paa_size;
1228         if (array->paa_reqs_count[index] > 0) {
1229                 /* latest rpcs will have the latest deadlines in the list,
1230                  * so search backward. */
1231                 cfs_list_for_each_entry_reverse(rq,
1232                                                 &array->paa_reqs_array[index],
1233                                                 rq_timed_list) {
1234                         if (req->rq_deadline >= rq->rq_deadline) {
1235                                 cfs_list_add(&req->rq_timed_list,
1236                                              &rq->rq_timed_list);
1237                                 break;
1238                         }
1239                 }
1240         }
1241
1242         /* Add the request at the head of the list */
1243         if (cfs_list_empty(&req->rq_timed_list))
1244                 cfs_list_add(&req->rq_timed_list,
1245                              &array->paa_reqs_array[index]);
1246
1247         spin_lock(&req->rq_lock);
1248         req->rq_at_linked = 1;
1249         spin_unlock(&req->rq_lock);
1250         req->rq_at_index = index;
1251         array->paa_reqs_count[index]++;
1252         array->paa_count++;
1253         if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
1254                 array->paa_deadline = req->rq_deadline;
1255                 ptlrpc_at_set_timer(svcpt);
1256         }
1257         spin_unlock(&svcpt->scp_at_lock);
1258
1259         return 0;
1260 }
1261
1262 static void
1263 ptlrpc_at_remove_timed(struct ptlrpc_request *req)
1264 {
1265         struct ptlrpc_at_array *array;
1266
1267         array = &req->rq_rqbd->rqbd_svcpt->scp_at_array;
1268
1269         /* NB: must call with hold svcpt::scp_at_lock */
1270         LASSERT(!cfs_list_empty(&req->rq_timed_list));
1271         cfs_list_del_init(&req->rq_timed_list);
1272
1273         spin_lock(&req->rq_lock);
1274         req->rq_at_linked = 0;
1275         spin_unlock(&req->rq_lock);
1276
1277         array->paa_reqs_count[req->rq_at_index]--;
1278         array->paa_count--;
1279 }
1280
1281 static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
1282 {
1283         struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
1284         struct ptlrpc_request *reqcopy;
1285         struct lustre_msg *reqmsg;
1286         cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
1287         time_t newdl;
1288         int rc;
1289         ENTRY;
1290
1291         /* deadline is when the client expects us to reply, margin is the
1292            difference between clients' and servers' expectations */
1293         DEBUG_REQ(D_ADAPTTO, req,
1294                   "%ssending early reply (deadline %+lds, margin %+lds) for "
1295                   "%d+%d", AT_OFF ? "AT off - not " : "",
1296                   olddl, olddl - at_get(&svcpt->scp_at_estimate),
1297                   at_get(&svcpt->scp_at_estimate), at_extra);
1298
1299         if (AT_OFF)
1300                 RETURN(0);
1301
1302         if (olddl < 0) {
1303                 DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
1304                           "not sending early reply. Consider increasing "
1305                           "at_early_margin (%d)?", olddl, at_early_margin);
1306
1307                 /* Return an error so we're not re-added to the timed list. */
1308                 RETURN(-ETIMEDOUT);
1309         }
1310
1311         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
1312                 DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
1313                           "but no AT support");
1314                 RETURN(-ENOSYS);
1315         }
1316
1317         if (req->rq_export &&
1318             lustre_msg_get_flags(req->rq_reqmsg) &
1319             (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
1320                 /* During recovery, we don't want to send too many early
1321                  * replies, but on the other hand we want to make sure the
1322                  * client has enough time to resend if the rpc is lost. So
1323                  * during the recovery period send at least 4 early replies,
1324                  * spacing them every at_extra if we can. at_estimate should
1325                  * always equal this fixed value during recovery. */
1326                 at_measured(&svcpt->scp_at_estimate, min(at_extra,
1327                             req->rq_export->exp_obd->obd_recovery_timeout / 4));
1328         } else {
1329                 /* Fake our processing time into the future to ask the clients
1330                  * for some extra amount of time */
1331                 at_measured(&svcpt->scp_at_estimate, at_extra +
1332                             cfs_time_current_sec() -
1333                             req->rq_arrival_time.tv_sec);
1334
1335                 /* Check to see if we've actually increased the deadline -
1336                  * we may be past adaptive_max */
1337                 if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
1338                     at_get(&svcpt->scp_at_estimate)) {
1339                         DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
1340                                   "(%ld/%ld), not sending early reply\n",
1341                                   olddl, req->rq_arrival_time.tv_sec +
1342                                   at_get(&svcpt->scp_at_estimate) -
1343                                   cfs_time_current_sec());
1344                         RETURN(-ETIMEDOUT);
1345                 }
1346         }
1347         newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
1348
1349         reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
1350         if (reqcopy == NULL)
1351                 RETURN(-ENOMEM);
1352         OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
1353         if (!reqmsg)
1354                 GOTO(out_free, rc = -ENOMEM);
1355
1356         *reqcopy = *req;
1357         reqcopy->rq_reply_state = NULL;
1358         reqcopy->rq_rep_swab_mask = 0;
1359         reqcopy->rq_pack_bulk = 0;
1360         reqcopy->rq_pack_udesc = 0;
1361         reqcopy->rq_packed_final = 0;
1362         sptlrpc_svc_ctx_addref(reqcopy);
1363         /* We only need the reqmsg for the magic */
1364         reqcopy->rq_reqmsg = reqmsg;
1365         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
1366
1367         LASSERT(cfs_atomic_read(&req->rq_refcount));
1368         /** if it is last refcount then early reply isn't needed */
1369         if (cfs_atomic_read(&req->rq_refcount) == 1) {
1370                 DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
1371                           "abort sending early reply\n");
1372                 GOTO(out, rc = -EINVAL);
1373         }
1374
1375         /* Connection ref */
1376         reqcopy->rq_export = class_conn2export(
1377                                      lustre_msg_get_handle(reqcopy->rq_reqmsg));
1378         if (reqcopy->rq_export == NULL)
1379                 GOTO(out, rc = -ENODEV);
1380
1381         /* RPC ref */
1382         class_export_rpc_inc(reqcopy->rq_export);
1383         if (reqcopy->rq_export->exp_obd &&
1384             reqcopy->rq_export->exp_obd->obd_fail)
1385                 GOTO(out_put, rc = -ENODEV);
1386
1387         rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
1388         if (rc)
1389                 GOTO(out_put, rc);
1390
1391         rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
1392
1393         if (!rc) {
1394                 /* Adjust our own deadline to what we told the client */
1395                 req->rq_deadline = newdl;
1396                 req->rq_early_count++; /* number sent, server side */
1397         } else {
1398                 DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
1399         }
1400
1401         /* Free the (early) reply state from lustre_pack_reply.
1402            (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
1403         ptlrpc_req_drop_rs(reqcopy);
1404
1405 out_put:
1406         class_export_rpc_dec(reqcopy->rq_export);
1407         class_export_put(reqcopy->rq_export);
1408 out:
1409         sptlrpc_svc_ctx_decref(reqcopy);
1410         OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
1411 out_free:
1412         ptlrpc_request_cache_free(reqcopy);
1413         RETURN(rc);
1414 }
1415
1416 /* Send early replies to everybody expiring within at_early_margin
1417    asking for at_extra time */
1418 static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
1419 {
1420         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1421         struct ptlrpc_request *rq, *n;
1422         cfs_list_t work_list;
1423         __u32  index, count;
1424         time_t deadline;
1425         time_t now = cfs_time_current_sec();
1426         cfs_duration_t delay;
1427         int first, counter = 0;
1428         ENTRY;
1429
1430         spin_lock(&svcpt->scp_at_lock);
1431         if (svcpt->scp_at_check == 0) {
1432                 spin_unlock(&svcpt->scp_at_lock);
1433                 RETURN(0);
1434         }
1435         delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
1436         svcpt->scp_at_check = 0;
1437
1438         if (array->paa_count == 0) {
1439                 spin_unlock(&svcpt->scp_at_lock);
1440                 RETURN(0);
1441         }
1442
1443         /* The timer went off, but maybe the nearest rpc already completed. */
1444         first = array->paa_deadline - now;
1445         if (first > at_early_margin) {
1446                 /* We've still got plenty of time.  Reset the timer. */
1447                 ptlrpc_at_set_timer(svcpt);
1448                 spin_unlock(&svcpt->scp_at_lock);
1449                 RETURN(0);
1450         }
1451
1452         /* We're close to a timeout, and we don't know how much longer the
1453            server will take. Send early replies to everyone expiring soon. */
1454         CFS_INIT_LIST_HEAD(&work_list);
1455         deadline = -1;
1456         index = (unsigned long)array->paa_deadline % array->paa_size;
1457         count = array->paa_count;
1458         while (count > 0) {
1459                 count -= array->paa_reqs_count[index];
1460                 cfs_list_for_each_entry_safe(rq, n,
1461                                              &array->paa_reqs_array[index],
1462                                              rq_timed_list) {
1463                         if (rq->rq_deadline > now + at_early_margin) {
1464                                 /* update the earliest deadline */
1465                                 if (deadline == -1 ||
1466                                     rq->rq_deadline < deadline)
1467                                         deadline = rq->rq_deadline;
1468                                 break;
1469                         }
1470
1471                         ptlrpc_at_remove_timed(rq);
1472                         /**
1473                          * ptlrpc_server_drop_request() may drop
1474                          * refcount to 0 already. Let's check this and
1475                          * don't add entry to work_list
1476                          */
1477                         if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount)))
1478                                 cfs_list_add(&rq->rq_timed_list, &work_list);
1479                         counter++;
1480                 }
1481
1482                 if (++index >= array->paa_size)
1483                         index = 0;
1484         }
1485         array->paa_deadline = deadline;
1486         /* we have a new earliest deadline, restart the timer */
1487         ptlrpc_at_set_timer(svcpt);
1488
1489         spin_unlock(&svcpt->scp_at_lock);
1490
1491         CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
1492                "replies\n", first, at_extra, counter);
1493         if (first < 0) {
1494                 /* We're already past request deadlines before we even get a
1495                    chance to send early replies */
1496                 LCONSOLE_WARN("%s: This server is not able to keep up with "
1497                               "request traffic (cpu-bound).\n",
1498                               svcpt->scp_service->srv_name);
1499                 CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
1500                       "delay="CFS_DURATION_T"(jiff)\n",
1501                       counter, svcpt->scp_nreqs_incoming,
1502                       svcpt->scp_nreqs_active,
1503                       at_get(&svcpt->scp_at_estimate), delay);
1504         }
1505
1506         /* we took additional refcount so entries can't be deleted from list, no
1507          * locking is needed */
1508         while (!cfs_list_empty(&work_list)) {
1509                 rq = cfs_list_entry(work_list.next, struct ptlrpc_request,
1510                                     rq_timed_list);
1511                 cfs_list_del_init(&rq->rq_timed_list);
1512
1513                 if (ptlrpc_at_send_early_reply(rq) == 0)
1514                         ptlrpc_at_add_timed(rq);
1515
1516                 ptlrpc_server_drop_request(rq);
1517         }
1518
1519         RETURN(1); /* return "did_something" for liblustre */
1520 }
1521
1522 /* Check if we are already handling earlier incarnation of this request.
1523  * Called under &req->rq_export->exp_rpc_lock locked */
1524 static int ptlrpc_server_check_resend_in_progress(struct ptlrpc_request *req)
1525 {
1526         struct ptlrpc_request   *tmp = NULL;
1527
1528         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ||
1529             (cfs_atomic_read(&req->rq_export->exp_rpc_count) == 0))
1530                 return 0;
1531
1532         /* bulk request are aborted upon reconnect, don't try to
1533          * find a match */
1534         if (req->rq_bulk_write || req->rq_bulk_read)
1535                 return 0;
1536
1537         /* This list should not be longer than max_requests in
1538          * flights on the client, so it is not all that long.
1539          * Also we only hit this codepath in case of a resent
1540          * request which makes it even more rarely hit */
1541         cfs_list_for_each_entry(tmp, &req->rq_export->exp_reg_rpcs,
1542                                 rq_exp_list) {
1543                 /* Found duplicate one */
1544                 if (tmp->rq_xid == req->rq_xid)
1545                         goto found;
1546         }
1547         cfs_list_for_each_entry(tmp, &req->rq_export->exp_hp_rpcs,
1548                                 rq_exp_list) {
1549                 /* Found duplicate one */
1550                 if (tmp->rq_xid == req->rq_xid)
1551                         goto found;
1552         }
1553         return 0;
1554
1555 found:
1556         DEBUG_REQ(D_HA, req, "Found duplicate req in processing\n");
1557         DEBUG_REQ(D_HA, tmp, "Request being processed\n");
1558         return -EBUSY;
1559 }
1560
1561 /**
1562  * Put the request to the export list if the request may become
1563  * a high priority one.
1564  */
1565 static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
1566                                     struct ptlrpc_request *req)
1567 {
1568         cfs_list_t      *list;
1569         int              rc, hp = 0;
1570
1571         ENTRY;
1572
1573         if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
1574                 rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
1575                 if (rc < 0)
1576                         RETURN(rc);
1577                 LASSERT(rc == 0);
1578         }
1579         if (req->rq_export) {
1580                 if (req->rq_ops) {
1581                         /* Perform request specific check. We should do this
1582                          * check before the request is added into exp_hp_rpcs
1583                          * list otherwise it may hit swab race at LU-1044. */
1584                         if (req->rq_ops->hpreq_check) {
1585                                 rc = req->rq_ops->hpreq_check(req);
1586                                 /**
1587                                  * XXX: Out of all current
1588                                  * ptlrpc_hpreq_ops::hpreq_check(), only
1589                                  * ldlm_cancel_hpreq_check() can return an
1590                                  * error code; other functions assert in
1591                                  * similar places, which seems odd.
1592                                  * What also does not seem right is that
1593                                  * handlers for those RPCs do not assert
1594                                  * on the same checks, but rather handle the
1595                                  * error cases. e.g. see ost_rw_hpreq_check(),
1596                                  * and ost_brw_read(), ost_brw_write().
1597                                  */
1598                                 if (rc < 0)
1599                                         RETURN(rc);
1600                                 LASSERT(rc == 0 || rc == 1);
1601                                 hp = rc;
1602                         }
1603                         list = &req->rq_export->exp_hp_rpcs;
1604                 } else {
1605                         list = &req->rq_export->exp_reg_rpcs;
1606                 }
1607
1608                 /* do search for duplicated xid and the adding to the list
1609                  * atomically */
1610                 spin_lock_bh(&req->rq_export->exp_rpc_lock);
1611                 rc = ptlrpc_server_check_resend_in_progress(req);
1612                 if (rc < 0) {
1613                         spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1614                         RETURN(rc);
1615                 }
1616                 cfs_list_add(&req->rq_exp_list, list);
1617                 spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1618         }
1619
1620         ptlrpc_nrs_req_initialize(svcpt, req, !!hp);
1621
1622         RETURN(hp);
1623 }
1624
1625 /** Remove the request from the export list. */
1626 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
1627 {
1628         ENTRY;
1629         if (req->rq_export) {
1630                 /* refresh lock timeout again so that client has more
1631                  * room to send lock cancel RPC. */
1632                 if (req->rq_ops && req->rq_ops->hpreq_fini)
1633                         req->rq_ops->hpreq_fini(req);
1634
1635                 spin_lock_bh(&req->rq_export->exp_rpc_lock);
1636                 cfs_list_del_init(&req->rq_exp_list);
1637                 spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1638         }
1639         EXIT;
1640 }
1641
1642 static int ptlrpc_hpreq_check(struct ptlrpc_request *req)
1643 {
1644         return 1;
1645 }
1646
1647 static struct ptlrpc_hpreq_ops ptlrpc_hpreq_common = {
1648         .hpreq_check       = ptlrpc_hpreq_check,
1649 };
1650
1651 /* Hi-Priority RPC check by RPC operation code. */
1652 int ptlrpc_hpreq_handler(struct ptlrpc_request *req)
1653 {
1654         int opc = lustre_msg_get_opc(req->rq_reqmsg);
1655
1656         /* Check for export to let only reconnects for not yet evicted
1657          * export to become a HP rpc. */
1658         if ((req->rq_export != NULL) &&
1659             (opc == OBD_PING || opc == MDS_CONNECT || opc == OST_CONNECT))
1660                 req->rq_ops = &ptlrpc_hpreq_common;
1661
1662         return 0;
1663 }
1664 EXPORT_SYMBOL(ptlrpc_hpreq_handler);
1665
1666 static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
1667                                      struct ptlrpc_request *req)
1668 {
1669         int     rc;
1670         ENTRY;
1671
1672         rc = ptlrpc_server_hpreq_init(svcpt, req);
1673         if (rc < 0)
1674                 RETURN(rc);
1675
1676         /* the current thread is not the processing thread for this request
1677          * since that, but request is in exp_hp_list and can be find there.
1678          * Remove all relations between request and old thread. */
1679         req->rq_svc_thread->t_env->le_ses = NULL;
1680         req->rq_svc_thread = NULL;
1681         req->rq_session.lc_thread = NULL;
1682
1683         ptlrpc_nrs_req_add(svcpt, req, !!rc);
1684
1685         RETURN(0);
1686 }
1687
1688 /**
1689  * Allow to handle high priority request
1690  * User can call it w/o any lock but need to hold
1691  * ptlrpc_service_part::scp_req_lock to get reliable result
1692  */
1693 static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
1694                                      bool force)
1695 {
1696         int running = svcpt->scp_nthrs_running;
1697
1698         if (!nrs_svcpt_has_hp(svcpt))
1699                 return false;
1700
1701         if (force)
1702                 return true;
1703
1704         if (ptlrpc_nrs_req_throttling_nolock(svcpt, true))
1705                 return false;
1706
1707         if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
1708                      CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
1709                 /* leave just 1 thread for normal RPCs */
1710                 running = PTLRPC_NTHRS_INIT;
1711                 if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
1712                         running += 1;
1713         }
1714
1715         if (svcpt->scp_nreqs_active >= running - 1)
1716                 return false;
1717
1718         if (svcpt->scp_nhreqs_active == 0)
1719                 return true;
1720
1721         return !ptlrpc_nrs_req_pending_nolock(svcpt, false) ||
1722                svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio;
1723 }
1724
1725 static bool ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
1726                                        bool force)
1727 {
1728         return ptlrpc_server_allow_high(svcpt, force) &&
1729                ptlrpc_nrs_req_pending_nolock(svcpt, true);
1730 }
1731
1732 /**
1733  * Only allow normal priority requests on a service that has a high-priority
1734  * queue if forced (i.e. cleanup), if there are other high priority requests
1735  * already being processed (i.e. those threads can service more high-priority
1736  * requests), or if there are enough idle threads that a later thread can do
1737  * a high priority request.
1738  * User can call it w/o any lock but need to hold
1739  * ptlrpc_service_part::scp_req_lock to get reliable result
1740  */
1741 static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
1742                                        bool force)
1743 {
1744         int running = svcpt->scp_nthrs_running;
1745 #ifndef __KERNEL__
1746         if (1) /* always allow to handle normal request for liblustre */
1747                 return true;
1748 #endif
1749         if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
1750                      CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
1751                 /* leave just 1 thread for normal RPCs */
1752                 running = PTLRPC_NTHRS_INIT;
1753                 if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
1754                         running += 1;
1755         }
1756
1757         if (force)
1758                 return true;
1759
1760         if (ptlrpc_nrs_req_throttling_nolock(svcpt, false))
1761                 return false;
1762
1763         if (svcpt->scp_nreqs_active < running - 2)
1764                 return true;
1765
1766         if (svcpt->scp_nreqs_active >= running - 1)
1767                 return false;
1768
1769         return svcpt->scp_nhreqs_active > 0 || !nrs_svcpt_has_hp(svcpt);
1770 }
1771
1772 static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
1773                                          bool force)
1774 {
1775         return ptlrpc_server_allow_normal(svcpt, force) &&
1776                ptlrpc_nrs_req_pending_nolock(svcpt, false);
1777 }
1778
1779 /**
1780  * Returns true if there are requests available in incoming
1781  * request queue for processing and it is allowed to fetch them.
1782  * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
1783  * to get reliable result
1784  * \see ptlrpc_server_allow_normal
1785  * \see ptlrpc_server_allow high
1786  */
1787 static inline bool
1788 ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force)
1789 {
1790         return ptlrpc_server_high_pending(svcpt, force) ||
1791                ptlrpc_server_normal_pending(svcpt, force);
1792 }
1793
1794 /**
1795  * Fetch a request for processing from queue of unprocessed requests.
1796  * Favors high-priority requests.
1797  * Returns a pointer to fetched request.
1798  */
1799 static struct ptlrpc_request *
1800 ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
1801 {
1802         struct ptlrpc_request *req = NULL;
1803         ENTRY;
1804
1805         spin_lock(&svcpt->scp_req_lock);
1806 #ifndef __KERNEL__
1807         /* !@%$# liblustre only has 1 thread */
1808         if (cfs_atomic_read(&svcpt->scp_nreps_difficult) != 0) {
1809                 spin_unlock(&svcpt->scp_req_lock);
1810                 RETURN(NULL);
1811         }
1812 #endif
1813
1814         if (ptlrpc_server_high_pending(svcpt, force)) {
1815                 req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
1816                 if (req != NULL) {
1817                         svcpt->scp_hreq_count++;
1818                         goto got_request;
1819                 }
1820         }
1821
1822         if (ptlrpc_server_normal_pending(svcpt, force)) {
1823                 req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
1824                 if (req != NULL) {
1825                         svcpt->scp_hreq_count = 0;
1826                         goto got_request;
1827                 }
1828         }
1829
1830         spin_unlock(&svcpt->scp_req_lock);
1831         RETURN(NULL);
1832
1833 got_request:
1834         svcpt->scp_nreqs_active++;
1835         if (req->rq_hp)
1836                 svcpt->scp_nhreqs_active++;
1837
1838         spin_unlock(&svcpt->scp_req_lock);
1839
1840         if (likely(req->rq_export))
1841                 class_export_rpc_inc(req->rq_export);
1842
1843         RETURN(req);
1844 }
1845
1846 /**
1847  * Handle freshly incoming reqs, add to timed early reply list,
1848  * pass on to regular request queue.
1849  * All incoming requests pass through here before getting into
1850  * ptlrpc_server_handle_req later on.
1851  */
1852 static int
1853 ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
1854                             struct ptlrpc_thread *thread)
1855 {
1856         struct ptlrpc_service   *svc = svcpt->scp_service;
1857         struct ptlrpc_request   *req;
1858         __u32                   deadline;
1859         int                     rc;
1860         ENTRY;
1861
1862         spin_lock(&svcpt->scp_lock);
1863         if (cfs_list_empty(&svcpt->scp_req_incoming)) {
1864                 spin_unlock(&svcpt->scp_lock);
1865                 RETURN(0);
1866         }
1867
1868         req = cfs_list_entry(svcpt->scp_req_incoming.next,
1869                              struct ptlrpc_request, rq_list);
1870         cfs_list_del_init(&req->rq_list);
1871         svcpt->scp_nreqs_incoming--;
1872         /* Consider this still a "queued" request as far as stats are
1873          * concerned */
1874         spin_unlock(&svcpt->scp_lock);
1875
1876         /* go through security check/transform */
1877         rc = sptlrpc_svc_unwrap_request(req);
1878         switch (rc) {
1879         case SECSVC_OK:
1880                 break;
1881         case SECSVC_COMPLETE:
1882                 target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
1883                 goto err_req;
1884         case SECSVC_DROP:
1885                 goto err_req;
1886         default:
1887                 LBUG();
1888         }
1889
1890         /*
1891          * for null-flavored rpc, msg has been unpacked by sptlrpc, although
1892          * redo it wouldn't be harmful.
1893          */
1894         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
1895                 rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
1896                 if (rc != 0) {
1897                         CERROR("error unpacking request: ptl %d from %s "
1898                                "x"LPU64"\n", svc->srv_req_portal,
1899                                libcfs_id2str(req->rq_peer), req->rq_xid);
1900                         goto err_req;
1901                 }
1902         }
1903
1904         rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
1905         if (rc) {
1906                 CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
1907                         LPU64"\n", svc->srv_req_portal,
1908                         libcfs_id2str(req->rq_peer), req->rq_xid);
1909                 goto err_req;
1910         }
1911
1912         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
1913             lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
1914                 CERROR("drop incoming rpc opc %u, x"LPU64"\n",
1915                        cfs_fail_val, req->rq_xid);
1916                 goto err_req;
1917         }
1918
1919         rc = -EINVAL;
1920         if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
1921                 CERROR("wrong packet type received (type=%u) from %s\n",
1922                        lustre_msg_get_type(req->rq_reqmsg),
1923                        libcfs_id2str(req->rq_peer));
1924                 goto err_req;
1925         }
1926
1927         switch(lustre_msg_get_opc(req->rq_reqmsg)) {
1928         case MDS_WRITEPAGE:
1929         case OST_WRITE:
1930                 req->rq_bulk_write = 1;
1931                 break;
1932         case MDS_READPAGE:
1933         case OST_READ:
1934         case MGS_CONFIG_READ:
1935                 req->rq_bulk_read = 1;
1936                 break;
1937         }
1938
1939         CDEBUG(D_RPCTRACE, "got req x"LPU64"\n", req->rq_xid);
1940
1941         req->rq_export = class_conn2export(
1942                 lustre_msg_get_handle(req->rq_reqmsg));
1943         if (req->rq_export) {
1944                 rc = ptlrpc_check_req(req);
1945                 if (rc == 0) {
1946                         rc = sptlrpc_target_export_check(req->rq_export, req);
1947                         if (rc)
1948                                 DEBUG_REQ(D_ERROR, req, "DROPPING req with "
1949                                           "illegal security flavor,");
1950                 }
1951
1952                 if (rc)
1953                         goto err_req;
1954                 ptlrpc_update_export_timer(req->rq_export, 0);
1955         }
1956
1957         /* req_in handling should/must be fast */
1958         if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
1959                 DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
1960                           cfs_time_sub(cfs_time_current_sec(),
1961                                        req->rq_arrival_time.tv_sec));
1962
1963         /* Set rpc server deadline and add it to the timed list */
1964         deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
1965                     MSGHDR_AT_SUPPORT) ?
1966                    /* The max time the client expects us to take */
1967                    lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
1968         req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
1969         if (unlikely(deadline == 0)) {
1970                 DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
1971                 goto err_req;
1972         }
1973
1974         req->rq_svc_thread = thread;
1975         if (thread != NULL) {
1976                 /* initialize request session, it is needed for request
1977                  * processing by target */
1978                 rc = lu_context_init(&req->rq_session, LCT_SERVER_SESSION |
1979                                                        LCT_NOREF);
1980                 if (rc) {
1981                         CERROR("%s: failure to initialize session: rc = %d\n",
1982                                thread->t_name, rc);
1983                         goto err_req;
1984                 }
1985                 req->rq_session.lc_thread = thread;
1986                 lu_context_enter(&req->rq_session);
1987                 thread->t_env->le_ses = &req->rq_session;
1988         }
1989
1990         ptlrpc_at_add_timed(req);
1991
1992         /* Move it over to the request processing queue */
1993         rc = ptlrpc_server_request_add(svcpt, req);
1994         if (rc)
1995                 GOTO(err_req, rc);
1996
1997         wake_up(&svcpt->scp_waitq);
1998         RETURN(1);
1999
2000 err_req:
2001         ptlrpc_server_finish_request(svcpt, req);
2002
2003         RETURN(1);
2004 }
2005
2006 /**
2007  * Main incoming request handling logic.
2008  * Calls handler function from service to do actual processing.
2009  */
2010 static int
2011 ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
2012                              struct ptlrpc_thread *thread)
2013 {
2014         struct ptlrpc_service   *svc = svcpt->scp_service;
2015         struct ptlrpc_request   *request;
2016         struct timeval           work_start;
2017         struct timeval           work_end;
2018         long                     timediff;
2019         int                      fail_opc = 0;
2020
2021         ENTRY;
2022
2023         request = ptlrpc_server_request_get(svcpt, false);
2024         if (request == NULL)
2025                 RETURN(0);
2026
2027         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
2028                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
2029         else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
2030                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
2031
2032         if (unlikely(fail_opc)) {
2033                 if (request->rq_export && request->rq_ops)
2034                         OBD_FAIL_TIMEOUT(fail_opc, 4);
2035         }
2036
2037         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
2038
2039         if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
2040                 libcfs_debug_dumplog();
2041
2042         do_gettimeofday(&work_start);
2043         timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
2044         if (likely(svc->srv_stats != NULL)) {
2045                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
2046                                     timediff);
2047                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
2048                                     svcpt->scp_nreqs_incoming);
2049                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
2050                                     svcpt->scp_nreqs_active);
2051                 lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
2052                                     at_get(&svcpt->scp_at_estimate));
2053         }
2054
2055         if (likely(request->rq_export)) {
2056                 if (unlikely(ptlrpc_check_req(request)))
2057                         goto put_conn;
2058                 ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
2059         }
2060
2061         /* Discard requests queued for longer than the deadline.
2062            The deadline is increased if we send an early reply. */
2063         if (cfs_time_current_sec() > request->rq_deadline) {
2064                 DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
2065                           ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
2066                           libcfs_id2str(request->rq_peer),
2067                           cfs_time_sub(request->rq_deadline,
2068                           request->rq_arrival_time.tv_sec),
2069                           cfs_time_sub(cfs_time_current_sec(),
2070                           request->rq_deadline));
2071                 goto put_conn;
2072         }
2073
2074         CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
2075                "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
2076                (request->rq_export ?
2077                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
2078                (request->rq_export ?
2079                 cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
2080                lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
2081                libcfs_id2str(request->rq_peer),
2082                lustre_msg_get_opc(request->rq_reqmsg));
2083
2084         if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
2085                 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
2086
2087         CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
2088
2089         /* re-assign request and sesson thread to the current one */
2090         request->rq_svc_thread = thread;
2091         if (thread != NULL) {
2092                 LASSERT(request->rq_session.lc_thread == NULL);
2093                 request->rq_session.lc_thread = thread;
2094                 thread->t_env->le_ses = &request->rq_session;
2095         }
2096         svc->srv_ops.so_req_handler(request);
2097
2098         ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
2099
2100 put_conn:
2101         if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
2102                      DEBUG_REQ(D_WARNING, request, "Request took longer "
2103                                "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
2104                                " client may timeout.",
2105                                cfs_time_sub(request->rq_deadline,
2106                                             request->rq_arrival_time.tv_sec),
2107                                cfs_time_sub(cfs_time_current_sec(),
2108                                             request->rq_deadline));
2109         }
2110
2111         do_gettimeofday(&work_end);
2112         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
2113         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
2114                "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
2115                "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
2116                 current_comm(),
2117                 (request->rq_export ?
2118                  (char *)request->rq_export->exp_client_uuid.uuid : "0"),
2119                 (request->rq_export ?
2120                  cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
2121                 lustre_msg_get_status(request->rq_reqmsg),
2122                 request->rq_xid,
2123                 libcfs_id2str(request->rq_peer),
2124                 lustre_msg_get_opc(request->rq_reqmsg),
2125                 timediff,
2126                 cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
2127                 (request->rq_repmsg ?
2128                  lustre_msg_get_transno(request->rq_repmsg) :
2129                  request->rq_transno),
2130                 request->rq_status,
2131                 (request->rq_repmsg ?
2132                  lustre_msg_get_status(request->rq_repmsg) : -999));
2133         if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
2134                 __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
2135                 int opc = opcode_offset(op);
2136                 if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
2137                         LASSERT(opc < LUSTRE_MAX_OPCODES);
2138                         lprocfs_counter_add(svc->srv_stats,
2139                                             opc + EXTRA_MAX_OPCODES,
2140                                             timediff);
2141                 }
2142         }
2143         if (unlikely(request->rq_early_count)) {
2144                 DEBUG_REQ(D_ADAPTTO, request,
2145                           "sent %d early replies before finishing in "
2146                           CFS_DURATION_T"s",
2147                           request->rq_early_count,
2148                           cfs_time_sub(work_end.tv_sec,
2149                           request->rq_arrival_time.tv_sec));
2150         }
2151
2152         ptlrpc_server_finish_active_request(svcpt, request);
2153
2154         RETURN(1);
2155 }
2156
2157 /**
2158  * An internal function to process a single reply state object.
2159  */
2160 static int
2161 ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
2162 {
2163         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
2164         struct ptlrpc_service     *svc = svcpt->scp_service;
2165         struct obd_export         *exp;
2166         int                        nlocks;
2167         int                        been_handled;
2168         ENTRY;
2169
2170         exp = rs->rs_export;
2171
2172         LASSERT (rs->rs_difficult);
2173         LASSERT (rs->rs_scheduled);
2174         LASSERT (cfs_list_empty(&rs->rs_list));
2175
2176         spin_lock(&exp->exp_lock);
2177         /* Noop if removed already */
2178         cfs_list_del_init (&rs->rs_exp_list);
2179         spin_unlock(&exp->exp_lock);
2180
2181         /* The disk commit callback holds exp_uncommitted_replies_lock while it
2182          * iterates over newly committed replies, removing them from
2183          * exp_uncommitted_replies.  It then drops this lock and schedules the
2184          * replies it found for handling here.
2185          *
2186          * We can avoid contention for exp_uncommitted_replies_lock between the
2187          * HRT threads and further commit callbacks by checking rs_committed
2188          * which is set in the commit callback while it holds both
2189          * rs_lock and exp_uncommitted_reples.
2190          *
2191          * If we see rs_committed clear, the commit callback _may_ not have
2192          * handled this reply yet and we race with it to grab
2193          * exp_uncommitted_replies_lock before removing the reply from
2194          * exp_uncommitted_replies.  Note that if we lose the race and the
2195          * reply has already been removed, list_del_init() is a noop.
2196          *
2197          * If we see rs_committed set, we know the commit callback is handling,
2198          * or has handled this reply since store reordering might allow us to
2199          * see rs_committed set out of sequence.  But since this is done
2200          * holding rs_lock, we can be sure it has all completed once we hold
2201          * rs_lock, which we do right next.
2202          */
2203         if (!rs->rs_committed) {
2204                 spin_lock(&exp->exp_uncommitted_replies_lock);
2205                 cfs_list_del_init(&rs->rs_obd_list);
2206                 spin_unlock(&exp->exp_uncommitted_replies_lock);
2207         }
2208
2209         spin_lock(&rs->rs_lock);
2210
2211         been_handled = rs->rs_handled;
2212         rs->rs_handled = 1;
2213
2214         nlocks = rs->rs_nlocks;                 /* atomic "steal", but */
2215         rs->rs_nlocks = 0;                      /* locks still on rs_locks! */
2216
2217         if (nlocks == 0 && !been_handled) {
2218                 /* If we see this, we should already have seen the warning
2219                  * in mds_steal_ack_locks()  */
2220                 CDEBUG(D_HA, "All locks stolen from rs %p x"LPD64".t"LPD64
2221                        " o%d NID %s\n",
2222                        rs,
2223                        rs->rs_xid, rs->rs_transno, rs->rs_opc,
2224                        libcfs_nid2str(exp->exp_connection->c_peer.nid));
2225         }
2226
2227         if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
2228                 spin_unlock(&rs->rs_lock);
2229
2230                 if (!been_handled && rs->rs_on_net) {
2231                         LNetMDUnlink(rs->rs_md_h);
2232                         /* Ignore return code; we're racing with completion */
2233                 }
2234
2235                 while (nlocks-- > 0)
2236                         ldlm_lock_decref(&rs->rs_locks[nlocks],
2237                                          rs->rs_modes[nlocks]);
2238
2239                 spin_lock(&rs->rs_lock);
2240         }
2241
2242         rs->rs_scheduled = 0;
2243
2244         if (!rs->rs_on_net) {
2245                 /* Off the net */
2246                 spin_unlock(&rs->rs_lock);
2247
2248                 class_export_put (exp);
2249                 rs->rs_export = NULL;
2250                 ptlrpc_rs_decref (rs);
2251                 if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
2252                     svc->srv_is_stopping)
2253                         wake_up_all(&svcpt->scp_waitq);
2254                 RETURN(1);
2255         }
2256
2257         /* still on the net; callback will schedule */
2258         spin_unlock(&rs->rs_lock);
2259         RETURN(1);
2260 }
2261
2262 #ifndef __KERNEL__
2263
2264 /**
2265  * Check whether given service has a reply available for processing
2266  * and process it.
2267  *
2268  * \param svc a ptlrpc service
2269  * \retval 0 no replies processed
2270  * \retval 1 one reply processed
2271  */
2272 static int
2273 ptlrpc_server_handle_reply(struct ptlrpc_service_part *svcpt)
2274 {
2275         struct ptlrpc_reply_state *rs = NULL;
2276         ENTRY;
2277
2278         spin_lock(&svcpt->scp_rep_lock);
2279         if (!cfs_list_empty(&svcpt->scp_rep_queue)) {
2280                 rs = cfs_list_entry(svcpt->scp_rep_queue.prev,
2281                                     struct ptlrpc_reply_state,
2282                                     rs_list);
2283                 cfs_list_del_init(&rs->rs_list);
2284         }
2285         spin_unlock(&svcpt->scp_rep_lock);
2286         if (rs != NULL)
2287                 ptlrpc_handle_rs(rs);
2288         RETURN(rs != NULL);
2289 }
2290
2291 /* FIXME make use of timeout later */
2292 int
2293 liblustre_check_services (void *arg)
2294 {
2295         int  did_something = 0;
2296         int  rc;
2297         cfs_list_t *tmp, *nxt;
2298         ENTRY;
2299
2300         /* I'm relying on being single threaded, not to have to lock
2301          * ptlrpc_all_services etc */
2302         cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
2303                 struct ptlrpc_service *svc =
2304                         cfs_list_entry (tmp, struct ptlrpc_service, srv_list);
2305                 struct ptlrpc_service_part *svcpt;
2306
2307                 LASSERT(svc->srv_ncpts == 1);
2308                 svcpt = svc->srv_parts[0];
2309
2310                 if (svcpt->scp_nthrs_running != 0)     /* I've recursed */
2311                         continue;
2312
2313                 /* service threads can block for bulk, so this limits us
2314                  * (arbitrarily) to recursing 1 stack frame per service.
2315                  * Note that the problem with recursion is that we have to
2316                  * unwind completely before our caller can resume. */
2317
2318                 svcpt->scp_nthrs_running++;
2319
2320                 do {
2321                         rc = ptlrpc_server_handle_req_in(svcpt, NULL);
2322                         rc |= ptlrpc_server_handle_reply(svcpt);
2323                         rc |= ptlrpc_at_check_timed(svcpt);
2324                         rc |= ptlrpc_server_handle_request(svcpt, NULL);
2325                         rc |= (ptlrpc_server_post_idle_rqbds(svcpt) > 0);
2326                         did_something |= rc;
2327                 } while (rc);
2328
2329                 svcpt->scp_nthrs_running--;
2330         }
2331
2332         RETURN(did_something);
2333 }
2334 #define ptlrpc_stop_all_threads(s) do {} while (0)
2335
2336 #else /* __KERNEL__ */
2337
2338 static void
2339 ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
2340 {
2341         int avail = svcpt->scp_nrqbds_posted;
2342         int low_water = test_req_buffer_pressure ? 0 :
2343                         svcpt->scp_service->srv_nbuf_per_group / 2;
2344
2345         /* NB I'm not locking; just looking. */
2346
2347         /* CAVEAT EMPTOR: We might be allocating buffers here because we've
2348          * allowed the request history to grow out of control.  We could put a
2349          * sanity check on that here and cull some history if we need the
2350          * space. */
2351
2352         if (avail <= low_water)
2353                 ptlrpc_grow_req_bufs(svcpt, 1);
2354
2355         if (svcpt->scp_service->srv_stats) {
2356                 lprocfs_counter_add(svcpt->scp_service->srv_stats,
2357                                     PTLRPC_REQBUF_AVAIL_CNTR, avail);
2358         }
2359 }
2360
2361 static int
2362 ptlrpc_retry_rqbds(void *arg)
2363 {
2364         struct ptlrpc_service_part *svcpt = (struct ptlrpc_service_part *)arg;
2365
2366         svcpt->scp_rqbd_timeout = 0;
2367         return -ETIMEDOUT;
2368 }
2369
2370 static inline int
2371 ptlrpc_threads_enough(struct ptlrpc_service_part *svcpt)
2372 {
2373         return svcpt->scp_nreqs_active <
2374                svcpt->scp_nthrs_running - 1 -
2375                (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL);
2376 }
2377
2378 /**
2379  * allowed to create more threads
2380  * user can call it w/o any lock but need to hold
2381  * ptlrpc_service_part::scp_lock to get reliable result
2382  */
2383 static inline int
2384 ptlrpc_threads_increasable(struct ptlrpc_service_part *svcpt)
2385 {
2386         return svcpt->scp_nthrs_running +
2387                svcpt->scp_nthrs_starting <
2388                svcpt->scp_service->srv_nthrs_cpt_limit;
2389 }
2390
2391 /**
2392  * too many requests and allowed to create more threads
2393  */
2394 static inline int
2395 ptlrpc_threads_need_create(struct ptlrpc_service_part *svcpt)
2396 {
2397         return !ptlrpc_threads_enough(svcpt) &&
2398                 ptlrpc_threads_increasable(svcpt);
2399 }
2400
2401 static inline int
2402 ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
2403 {
2404         return thread_is_stopping(thread) ||
2405                thread->t_svcpt->scp_service->srv_is_stopping;
2406 }
2407
2408 static inline int
2409 ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt)
2410 {
2411         return !cfs_list_empty(&svcpt->scp_rqbd_idle) &&
2412                svcpt->scp_rqbd_timeout == 0;
2413 }
2414
2415 static inline int
2416 ptlrpc_at_check(struct ptlrpc_service_part *svcpt)
2417 {
2418         return svcpt->scp_at_check;
2419 }
2420
2421 /**
2422  * requests wait on preprocessing
2423  * user can call it w/o any lock but need to hold
2424  * ptlrpc_service_part::scp_lock to get reliable result
2425  */
2426 static inline int
2427 ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
2428 {
2429         return !cfs_list_empty(&svcpt->scp_req_incoming);
2430 }
2431
2432 static __attribute__((__noinline__)) int
2433 ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
2434                   struct ptlrpc_thread *thread)
2435 {
2436         /* Don't exit while there are replies to be handled */
2437         struct l_wait_info lwi = LWI_TIMEOUT(svcpt->scp_rqbd_timeout,
2438                                              ptlrpc_retry_rqbds, svcpt);
2439
2440         lc_watchdog_disable(thread->t_watchdog);
2441
2442         cond_resched();
2443
2444         l_wait_event_exclusive_head(svcpt->scp_waitq,
2445                                 ptlrpc_thread_stopping(thread) ||
2446                                 ptlrpc_server_request_incoming(svcpt) ||
2447                                 ptlrpc_server_request_pending(svcpt, false) ||
2448                                 ptlrpc_rqbd_pending(svcpt) ||
2449                                 ptlrpc_at_check(svcpt), &lwi);
2450
2451         if (ptlrpc_thread_stopping(thread))
2452                 return -EINTR;
2453
2454         lc_watchdog_touch(thread->t_watchdog,
2455                           ptlrpc_server_get_timeout(svcpt));
2456         return 0;
2457 }
2458
2459 /**
2460  * Main thread body for service threads.
2461  * Waits in a loop waiting for new requests to process to appear.
2462  * Every time an incoming requests is added to its queue, a waitq
2463  * is woken up and one of the threads will handle it.
2464  */
2465 static int ptlrpc_main(void *arg)
2466 {
2467         struct ptlrpc_thread            *thread = (struct ptlrpc_thread *)arg;
2468         struct ptlrpc_service_part      *svcpt = thread->t_svcpt;
2469         struct ptlrpc_service           *svc = svcpt->scp_service;
2470         struct ptlrpc_reply_state       *rs;
2471 #ifdef WITH_GROUP_INFO
2472         struct group_info *ginfo = NULL;
2473 #endif
2474         struct lu_env *env;
2475         int counter = 0, rc = 0;
2476         ENTRY;
2477
2478         thread->t_pid = current_pid();
2479         unshare_fs_struct();
2480
2481         /* NB: we will call cfs_cpt_bind() for all threads, because we
2482          * might want to run lustre server only on a subset of system CPUs,
2483          * in that case ->scp_cpt is CFS_CPT_ANY */
2484         rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
2485         if (rc != 0) {
2486                 CWARN("%s: failed to bind %s on CPT %d\n",
2487                       svc->srv_name, thread->t_name, svcpt->scp_cpt);
2488         }
2489
2490 #ifdef WITH_GROUP_INFO
2491         ginfo = groups_alloc(0);
2492         if (!ginfo) {
2493                 rc = -ENOMEM;
2494                 goto out;
2495         }
2496
2497         set_current_groups(ginfo);
2498         put_group_info(ginfo);
2499 #endif
2500
2501         if (svc->srv_ops.so_thr_init != NULL) {
2502                 rc = svc->srv_ops.so_thr_init(thread);
2503                 if (rc)
2504                         goto out;
2505         }
2506
2507         OBD_ALLOC_PTR(env);
2508         if (env == NULL) {
2509                 rc = -ENOMEM;
2510                 goto out_srv_fini;
2511         }
2512
2513         rc = lu_context_init(&env->le_ctx,
2514                              svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
2515         if (rc)
2516                 goto out_srv_fini;
2517
2518         thread->t_env = env;
2519         env->le_ctx.lc_thread = thread;
2520         env->le_ctx.lc_cookie = 0x6;
2521
2522         while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) {
2523                 rc = ptlrpc_server_post_idle_rqbds(svcpt);
2524                 if (rc >= 0)
2525                         continue;
2526
2527                 CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
2528                         svc->srv_name, svcpt->scp_cpt, rc);
2529                 goto out_srv_fini;
2530         }
2531
2532         /* Alloc reply state structure for this one */
2533         OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
2534         if (!rs) {
2535                 rc = -ENOMEM;
2536                 goto out_srv_fini;
2537         }
2538
2539         spin_lock(&svcpt->scp_lock);
2540
2541         LASSERT(thread_is_starting(thread));
2542         thread_clear_flags(thread, SVC_STARTING);
2543
2544         LASSERT(svcpt->scp_nthrs_starting == 1);
2545         svcpt->scp_nthrs_starting--;
2546
2547         /* SVC_STOPPING may already be set here if someone else is trying
2548          * to stop the service while this new thread has been dynamically
2549          * forked. We still set SVC_RUNNING to let our creator know that
2550          * we are now running, however we will exit as soon as possible */
2551         thread_add_flags(thread, SVC_RUNNING);
2552         svcpt->scp_nthrs_running++;
2553         spin_unlock(&svcpt->scp_lock);
2554
2555         /* wake up our creator in case he's still waiting. */
2556         wake_up(&thread->t_ctl_waitq);
2557
2558         thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
2559                                              NULL, NULL);
2560
2561         spin_lock(&svcpt->scp_rep_lock);
2562         cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
2563         wake_up(&svcpt->scp_rep_waitq);
2564         spin_unlock(&svcpt->scp_rep_lock);
2565
2566         CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
2567                svcpt->scp_nthrs_running);
2568
2569         /* XXX maintain a list of all managed devices: insert here */
2570         while (!ptlrpc_thread_stopping(thread)) {
2571                 if (ptlrpc_wait_event(svcpt, thread))
2572                         break;
2573
2574                 ptlrpc_check_rqbd_pool(svcpt);
2575
2576                 if (ptlrpc_threads_need_create(svcpt)) {
2577                         /* Ignore return code - we tried... */
2578                         ptlrpc_start_thread(svcpt, 0);
2579                 }
2580
2581                 /* reset le_ses to initial state */
2582                 env->le_ses = NULL;
2583                 /* Process all incoming reqs before handling any */
2584                 if (ptlrpc_server_request_incoming(svcpt)) {
2585                         lu_context_enter(&env->le_ctx);
2586                         ptlrpc_server_handle_req_in(svcpt, thread);
2587                         lu_context_exit(&env->le_ctx);
2588
2589                         /* but limit ourselves in case of flood */
2590                         if (counter++ < 100)
2591                                 continue;
2592                         counter = 0;
2593                 }
2594
2595                 if (ptlrpc_at_check(svcpt))
2596                         ptlrpc_at_check_timed(svcpt);
2597
2598                 if (ptlrpc_server_request_pending(svcpt, false)) {
2599                         lu_context_enter(&env->le_ctx);
2600                         ptlrpc_server_handle_request(svcpt, thread);
2601                         lu_context_exit(&env->le_ctx);
2602                 }
2603
2604                 if (ptlrpc_rqbd_pending(svcpt) &&
2605                     ptlrpc_server_post_idle_rqbds(svcpt) < 0) {
2606                         /* I just failed to repost request buffers.
2607                          * Wait for a timeout (unless something else
2608                          * happens) before I try again */
2609                         svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
2610                         CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
2611                                svcpt->scp_nrqbds_posted);
2612                 }
2613         }
2614
2615         lc_watchdog_delete(thread->t_watchdog);
2616         thread->t_watchdog = NULL;
2617
2618 out_srv_fini:
2619         /*
2620          * deconstruct service specific state created by ptlrpc_start_thread()
2621          */
2622         if (svc->srv_ops.so_thr_done != NULL)
2623                 svc->srv_ops.so_thr_done(thread);
2624
2625         if (env != NULL) {
2626                 lu_context_fini(&env->le_ctx);
2627                 OBD_FREE_PTR(env);
2628         }
2629 out:
2630         CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
2631                thread, thread->t_pid, thread->t_id, rc);
2632
2633         spin_lock(&svcpt->scp_lock);
2634         if (thread_test_and_clear_flags(thread, SVC_STARTING))
2635                 svcpt->scp_nthrs_starting--;
2636
2637         if (thread_test_and_clear_flags(thread, SVC_RUNNING)) {
2638                 /* must know immediately */
2639                 svcpt->scp_nthrs_running--;
2640         }
2641
2642         thread->t_id = rc;
2643         thread_add_flags(thread, SVC_STOPPED);
2644
2645         wake_up(&thread->t_ctl_waitq);
2646         spin_unlock(&svcpt->scp_lock);
2647
2648         return rc;
2649 }
2650
2651 static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
2652                           cfs_list_t *replies)
2653 {
2654         int result;
2655
2656         spin_lock(&hrt->hrt_lock);
2657
2658         cfs_list_splice_init(&hrt->hrt_queue, replies);
2659         result = ptlrpc_hr.hr_stopping || !cfs_list_empty(replies);
2660
2661         spin_unlock(&hrt->hrt_lock);
2662         return result;
2663 }
2664
2665 /**
2666  * Main body of "handle reply" function.
2667  * It processes acked reply states
2668  */
2669 static int ptlrpc_hr_main(void *arg)
2670 {
2671         struct ptlrpc_hr_thread         *hrt = (struct ptlrpc_hr_thread *)arg;
2672         struct ptlrpc_hr_partition      *hrp = hrt->hrt_partition;
2673         CFS_LIST_HEAD                   (replies);
2674         char                            threadname[20];
2675         int                             rc;
2676
2677         snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
2678                  hrp->hrp_cpt, hrt->hrt_id);
2679         unshare_fs_struct();
2680
2681         rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
2682         if (rc != 0) {
2683                 CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
2684                       threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
2685         }
2686
2687         cfs_atomic_inc(&hrp->hrp_nstarted);
2688         wake_up(&ptlrpc_hr.hr_waitq);
2689
2690         while (!ptlrpc_hr.hr_stopping) {
2691                 l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
2692
2693                 while (!cfs_list_empty(&replies)) {
2694                         struct ptlrpc_reply_state *rs;
2695
2696                         rs = cfs_list_entry(replies.prev,
2697                                             struct ptlrpc_reply_state,
2698                                             rs_list);
2699                         cfs_list_del_init(&rs->rs_list);
2700                         ptlrpc_handle_rs(rs);
2701                 }
2702         }
2703
2704         cfs_atomic_inc(&hrp->hrp_nstopped);
2705         wake_up(&ptlrpc_hr.hr_waitq);
2706
2707         return 0;
2708 }
2709
2710 static void ptlrpc_stop_hr_threads(void)
2711 {
2712         struct ptlrpc_hr_partition      *hrp;
2713         int                             i;
2714         int                             j;
2715
2716         ptlrpc_hr.hr_stopping = 1;
2717
2718         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2719                 if (hrp->hrp_thrs == NULL)
2720                         continue; /* uninitialized */
2721                 for (j = 0; j < hrp->hrp_nthrs; j++)
2722                         wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
2723         }
2724
2725         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2726                 if (hrp->hrp_thrs == NULL)
2727                         continue; /* uninitialized */
2728                 wait_event(ptlrpc_hr.hr_waitq,
2729                                cfs_atomic_read(&hrp->hrp_nstopped) ==
2730                                cfs_atomic_read(&hrp->hrp_nstarted));
2731         }
2732 }
2733
2734 static int ptlrpc_start_hr_threads(void)
2735 {
2736         struct ptlrpc_hr_partition      *hrp;
2737         int                             i;
2738         int                             j;
2739         ENTRY;
2740
2741         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2742                 int     rc = 0;
2743
2744                 for (j = 0; j < hrp->hrp_nthrs; j++) {
2745                         struct  ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
2746                         rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
2747                                                  &hrp->hrp_thrs[j],
2748                                                  "ptlrpc_hr%02d_%03d",
2749                                                  hrp->hrp_cpt,
2750                                                  hrt->hrt_id));
2751                         if (IS_ERR_VALUE(rc))
2752                                 break;
2753                 }
2754                 wait_event(ptlrpc_hr.hr_waitq,
2755                                cfs_atomic_read(&hrp->hrp_nstarted) == j);
2756                 if (!IS_ERR_VALUE(rc))
2757                         continue;
2758
2759                 CERROR("Reply handling thread %d:%d Failed on starting: "
2760                        "rc = %d\n", i, j, rc);
2761                 ptlrpc_stop_hr_threads();
2762                 RETURN(rc);
2763         }
2764         RETURN(0);
2765 }
2766
2767 static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
2768 {
2769         struct l_wait_info      lwi = { 0 };
2770         struct ptlrpc_thread    *thread;
2771         CFS_LIST_HEAD           (zombie);
2772
2773         ENTRY;
2774
2775         CDEBUG(D_INFO, "Stopping threads for service %s\n",
2776                svcpt->scp_service->srv_name);
2777
2778         spin_lock(&svcpt->scp_lock);
2779         /* let the thread know that we would like it to stop asap */
2780         list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
2781                 CDEBUG(D_INFO, "Stopping thread %s #%u\n",
2782                        svcpt->scp_service->srv_thread_name, thread->t_id);
2783                 thread_add_flags(thread, SVC_STOPPING);
2784         }
2785
2786         wake_up_all(&svcpt->scp_waitq);
2787
2788         while (!cfs_list_empty(&svcpt->scp_threads)) {
2789                 thread = cfs_list_entry(svcpt->scp_threads.next,
2790                                         struct ptlrpc_thread, t_link);
2791                 if (thread_is_stopped(thread)) {
2792                         cfs_list_del(&thread->t_link);
2793                         cfs_list_add(&thread->t_link, &zombie);
2794                         continue;
2795                 }
2796                 spin_unlock(&svcpt->scp_lock);
2797
2798                 CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
2799                        svcpt->scp_service->srv_thread_name, thread->t_id);
2800                 l_wait_event(thread->t_ctl_waitq,
2801                              thread_is_stopped(thread), &lwi);
2802
2803                 spin_lock(&svcpt->scp_lock);
2804         }
2805
2806         spin_unlock(&svcpt->scp_lock);
2807
2808         while (!cfs_list_empty(&zombie)) {
2809                 thread = cfs_list_entry(zombie.next,
2810                                         struct ptlrpc_thread, t_link);
2811                 cfs_list_del(&thread->t_link);
2812                 OBD_FREE_PTR(thread);
2813         }
2814         EXIT;
2815 }
2816
2817 /**
2818  * Stops all threads of a particular service \a svc
2819  */
2820 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
2821 {
2822         struct ptlrpc_service_part *svcpt;
2823         int                        i;
2824         ENTRY;
2825
2826         ptlrpc_service_for_each_part(svcpt, i, svc) {
2827                 if (svcpt->scp_service != NULL)
2828                         ptlrpc_svcpt_stop_threads(svcpt);
2829         }
2830
2831         EXIT;
2832 }
2833 EXPORT_SYMBOL(ptlrpc_stop_all_threads);
2834
2835 int ptlrpc_start_threads(struct ptlrpc_service *svc)
2836 {
2837         int     rc = 0;
2838         int     i;
2839         int     j;
2840         ENTRY;
2841
2842         /* We require 2 threads min, see note in ptlrpc_server_handle_request */
2843         LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT);
2844
2845         for (i = 0; i < svc->srv_ncpts; i++) {
2846                 for (j = 0; j < svc->srv_nthrs_cpt_init; j++) {
2847                         rc = ptlrpc_start_thread(svc->srv_parts[i], 1);
2848                         if (rc == 0)
2849                                 continue;
2850
2851                         if (rc != -EMFILE)
2852                                 goto failed;
2853                         /* We have enough threads, don't start more. b=15759 */
2854                         break;
2855                 }
2856         }
2857
2858         RETURN(0);
2859  failed:
2860         CERROR("cannot start %s thread #%d_%d: rc %d\n",
2861                svc->srv_thread_name, i, j, rc);
2862         ptlrpc_stop_all_threads(svc);
2863         RETURN(rc);
2864 }
2865 EXPORT_SYMBOL(ptlrpc_start_threads);
2866
2867 int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
2868 {
2869         struct l_wait_info      lwi = { 0 };
2870         struct ptlrpc_thread    *thread;
2871         struct ptlrpc_service   *svc;
2872         int                     rc;
2873         ENTRY;
2874
2875         LASSERT(svcpt != NULL);
2876
2877         svc = svcpt->scp_service;
2878
2879         CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n",
2880                svc->srv_name, svcpt->scp_cpt, svcpt->scp_nthrs_running,
2881                svc->srv_nthrs_cpt_init, svc->srv_nthrs_cpt_limit);
2882
2883  again:
2884         if (unlikely(svc->srv_is_stopping))
2885                 RETURN(-ESRCH);
2886
2887         if (!ptlrpc_threads_increasable(svcpt) ||
2888             (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
2889              svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1))
2890                 RETURN(-EMFILE);
2891
2892         OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
2893         if (thread == NULL)
2894                 RETURN(-ENOMEM);
2895         init_waitqueue_head(&thread->t_ctl_waitq);
2896
2897         spin_lock(&svcpt->scp_lock);
2898         if (!ptlrpc_threads_increasable(svcpt)) {
2899                 spin_unlock(&svcpt->scp_lock);
2900                 OBD_FREE_PTR(thread);
2901                 RETURN(-EMFILE);
2902         }
2903
2904         if (svcpt->scp_nthrs_starting != 0) {
2905                 /* serialize starting because some modules (obdfilter)
2906                  * might require unique and contiguous t_id */
2907                 LASSERT(svcpt->scp_nthrs_starting == 1);
2908                 spin_unlock(&svcpt->scp_lock);
2909                 OBD_FREE_PTR(thread);
2910                 if (wait) {
2911                         CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
2912                                svc->srv_thread_name, svcpt->scp_thr_nextid);
2913                         schedule();
2914                         goto again;
2915                 }
2916
2917                 CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n",
2918                        svc->srv_thread_name, svcpt->scp_thr_nextid);
2919                 RETURN(-EAGAIN);
2920         }
2921
2922         svcpt->scp_nthrs_starting++;
2923         thread->t_id = svcpt->scp_thr_nextid++;
2924         thread_add_flags(thread, SVC_STARTING);
2925         thread->t_svcpt = svcpt;
2926
2927         cfs_list_add(&thread->t_link, &svcpt->scp_threads);
2928         spin_unlock(&svcpt->scp_lock);
2929
2930         if (svcpt->scp_cpt >= 0) {
2931                 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d",
2932                          svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
2933         } else {
2934                 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d",
2935                          svc->srv_thread_name, thread->t_id);
2936         }
2937
2938         CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
2939         rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name));
2940         if (IS_ERR_VALUE(rc)) {
2941                 CERROR("cannot start thread '%s': rc %d\n",
2942                        thread->t_name, rc);
2943                 spin_lock(&svcpt->scp_lock);
2944                 --svcpt->scp_nthrs_starting;
2945                 if (thread_is_stopping(thread)) {
2946                         /* this ptlrpc_thread is being hanled
2947                          * by ptlrpc_svcpt_stop_threads now
2948                          */
2949                         thread_add_flags(thread, SVC_STOPPED);
2950                         wake_up(&thread->t_ctl_waitq);
2951                         spin_unlock(&svcpt->scp_lock);
2952                 } else {
2953                         cfs_list_del(&thread->t_link);
2954                         spin_unlock(&svcpt->scp_lock);
2955                         OBD_FREE_PTR(thread);
2956                 }
2957                 RETURN(rc);
2958         }
2959
2960         if (!wait)
2961                 RETURN(0);
2962
2963         l_wait_event(thread->t_ctl_waitq,
2964                      thread_is_running(thread) || thread_is_stopped(thread),
2965                      &lwi);
2966
2967         rc = thread_is_stopped(thread) ? thread->t_id : 0;
2968         RETURN(rc);
2969 }
2970
2971 int ptlrpc_hr_init(void)
2972 {
2973         struct ptlrpc_hr_partition      *hrp;
2974         struct ptlrpc_hr_thread         *hrt;
2975         int                             rc;
2976         int                             i;
2977         int                             j;
2978         ENTRY;
2979
2980         memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
2981         ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
2982
2983         ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
2984                                                    sizeof(*hrp));
2985         if (ptlrpc_hr.hr_partitions == NULL)
2986                 RETURN(-ENOMEM);
2987
2988         init_waitqueue_head(&ptlrpc_hr.hr_waitq);
2989
2990         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2991                 hrp->hrp_cpt = i;
2992
2993                 cfs_atomic_set(&hrp->hrp_nstarted, 0);
2994                 cfs_atomic_set(&hrp->hrp_nstopped, 0);
2995
2996                 hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
2997                 hrp->hrp_nthrs /= cfs_cpu_ht_nsiblings(0);
2998
2999                 LASSERT(hrp->hrp_nthrs > 0);
3000                 OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i,
3001                               hrp->hrp_nthrs * sizeof(*hrt));
3002                 if (hrp->hrp_thrs == NULL)
3003                         GOTO(out, rc = -ENOMEM);
3004
3005                 for (j = 0; j < hrp->hrp_nthrs; j++) {
3006                         hrt = &hrp->hrp_thrs[j];
3007
3008                         hrt->hrt_id = j;
3009                         hrt->hrt_partition = hrp;
3010                         init_waitqueue_head(&hrt->hrt_waitq);
3011                         spin_lock_init(&hrt->hrt_lock);
3012                         CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
3013                 }
3014         }
3015
3016         rc = ptlrpc_start_hr_threads();
3017 out:
3018         if (rc != 0)
3019                 ptlrpc_hr_fini();
3020         RETURN(rc);
3021 }
3022
3023 void ptlrpc_hr_fini(void)
3024 {
3025         struct ptlrpc_hr_partition      *hrp;
3026         int                             i;
3027
3028         if (ptlrpc_hr.hr_partitions == NULL)
3029                 return;
3030
3031         ptlrpc_stop_hr_threads();
3032
3033         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
3034                 if (hrp->hrp_thrs != NULL) {
3035                         OBD_FREE(hrp->hrp_thrs,
3036                                  hrp->hrp_nthrs * sizeof(hrp->hrp_thrs[0]));
3037                 }
3038         }
3039
3040         cfs_percpt_free(ptlrpc_hr.hr_partitions);
3041         ptlrpc_hr.hr_partitions = NULL;
3042 }
3043
3044 #endif /* __KERNEL__ */
3045
3046 /**
3047  * Wait until all already scheduled replies are processed.
3048  */
3049 static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
3050 {
3051         while (1) {
3052                 int rc;
3053                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
3054                                                      NULL, NULL);
3055
3056                 rc = l_wait_event(svcpt->scp_waitq,
3057                      cfs_atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
3058                 if (rc == 0)
3059                         break;
3060                 CWARN("Unexpectedly long timeout %s %p\n",
3061                       svcpt->scp_service->srv_name, svcpt->scp_service);
3062         }
3063 }
3064
3065 static void
3066 ptlrpc_service_del_atimer(struct ptlrpc_service *svc)
3067 {
3068         struct ptlrpc_service_part      *svcpt;
3069         int                             i;
3070
3071         /* early disarm AT timer... */
3072         ptlrpc_service_for_each_part(svcpt, i, svc) {
3073                 if (svcpt->scp_service != NULL)
3074                         cfs_timer_disarm(&svcpt->scp_at_timer);
3075         }
3076 }
3077
3078 static void
3079 ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
3080 {
3081         struct ptlrpc_service_part        *svcpt;
3082         struct ptlrpc_request_buffer_desc *rqbd;
3083         struct l_wait_info                lwi;
3084         int                               rc;
3085         int                               i;
3086
3087         /* All history will be culled when the next request buffer is
3088          * freed in ptlrpc_service_purge_all() */
3089         svc->srv_hist_nrqbds_cpt_max = 0;
3090
3091         rc = LNetClearLazyPortal(svc->srv_req_portal);
3092         LASSERT(rc == 0);
3093
3094         ptlrpc_service_for_each_part(svcpt, i, svc) {
3095                 if (svcpt->scp_service == NULL)
3096                         break;
3097
3098                 /* Unlink all the request buffers.  This forces a 'final'
3099                  * event with its 'unlink' flag set for each posted rqbd */
3100                 cfs_list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
3101                                         rqbd_list) {
3102                         rc = LNetMDUnlink(rqbd->rqbd_md_h);
3103                         LASSERT(rc == 0 || rc == -ENOENT);
3104                 }
3105         }
3106
3107         ptlrpc_service_for_each_part(svcpt, i, svc) {
3108                 if (svcpt->scp_service == NULL)
3109                         break;
3110
3111                 /* Wait for the network to release any buffers
3112                  * it's currently filling */
3113                 spin_lock(&svcpt->scp_lock);
3114                 while (svcpt->scp_nrqbds_posted != 0) {
3115                         spin_unlock(&svcpt->scp_lock);
3116                         /* Network access will complete in finite time but
3117                          * the HUGE timeout lets us CWARN for visibility
3118                          * of sluggish NALs */
3119                         lwi = LWI_TIMEOUT_INTERVAL(
3120                                         cfs_time_seconds(LONG_UNLINK),
3121                                         cfs_time_seconds(1), NULL, NULL);
3122                         rc = l_wait_event(svcpt->scp_waitq,
3123                                           svcpt->scp_nrqbds_posted == 0, &lwi);
3124                         if (rc == -ETIMEDOUT) {
3125                                 CWARN("Service %s waiting for "
3126                                       "request buffers\n",
3127                                       svcpt->scp_service->srv_name);
3128                         }
3129                         spin_lock(&svcpt->scp_lock);
3130                 }
3131                 spin_unlock(&svcpt->scp_lock);
3132         }
3133 }
3134
3135 static void
3136 ptlrpc_service_purge_all(struct ptlrpc_service *svc)
3137 {
3138         struct ptlrpc_service_part              *svcpt;
3139         struct ptlrpc_request_buffer_desc       *rqbd;
3140         struct ptlrpc_request                   *req;
3141         struct ptlrpc_reply_state               *rs;
3142         int                                     i;
3143
3144         ptlrpc_service_for_each_part(svcpt, i, svc) {
3145                 if (svcpt->scp_service == NULL)
3146                         break;
3147
3148                 spin_lock(&svcpt->scp_rep_lock);
3149                 while (!cfs_list_empty(&svcpt->scp_rep_active)) {
3150                         rs = cfs_list_entry(svcpt->scp_rep_active.next,
3151                                             struct ptlrpc_reply_state, rs_list);
3152                         spin_lock(&rs->rs_lock);
3153                         ptlrpc_schedule_difficult_reply(rs);
3154                         spin_unlock(&rs->rs_lock);
3155                 }
3156                 spin_unlock(&svcpt->scp_rep_lock);
3157
3158                 /* purge the request queue.  NB No new replies (rqbds
3159                  * all unlinked) and no service threads, so I'm the only
3160                  * thread noodling the request queue now */
3161                 while (!cfs_list_empty(&svcpt->scp_req_incoming)) {
3162                         req = cfs_list_entry(svcpt->scp_req_incoming.next,
3163                                              struct ptlrpc_request, rq_list);
3164
3165                         cfs_list_del(&req->rq_list);
3166                         svcpt->scp_nreqs_incoming--;
3167                         ptlrpc_server_finish_request(svcpt, req);
3168                 }
3169
3170                 while (ptlrpc_server_request_pending(svcpt, true)) {
3171                         req = ptlrpc_server_request_get(svcpt, true);
3172                         ptlrpc_server_finish_active_request(svcpt, req);
3173                 }
3174
3175                 LASSERT(cfs_list_empty(&svcpt->scp_rqbd_posted));
3176                 LASSERT(svcpt->scp_nreqs_incoming == 0);
3177                 LASSERT(svcpt->scp_nreqs_active == 0);
3178                 /* history should have been culled by
3179                  * ptlrpc_server_finish_request */
3180                 LASSERT(svcpt->scp_hist_nrqbds == 0);
3181
3182                 /* Now free all the request buffers since nothing
3183                  * references them any more... */
3184
3185                 while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) {
3186                         rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next,
3187                                               struct ptlrpc_request_buffer_desc,
3188                                               rqbd_list);
3189                         ptlrpc_free_rqbd(rqbd);
3190                 }
3191                 ptlrpc_wait_replies(svcpt);
3192
3193                 while (!cfs_list_empty(&svcpt->scp_rep_idle)) {
3194                         rs = cfs_list_entry(svcpt->scp_rep_idle.next,
3195                                             struct ptlrpc_reply_state,
3196                                             rs_list);
3197                         cfs_list_del(&rs->rs_list);
3198                         OBD_FREE_LARGE(rs, svc->srv_max_reply_size);
3199                 }
3200         }
3201 }
3202
3203 static void
3204 ptlrpc_service_free(struct ptlrpc_service *svc)
3205 {
3206         struct ptlrpc_service_part      *svcpt;
3207         struct ptlrpc_at_array          *array;
3208         int                             i;
3209
3210         ptlrpc_service_for_each_part(svcpt, i, svc) {
3211                 if (svcpt->scp_service == NULL)
3212                         break;
3213
3214                 /* In case somebody rearmed this in the meantime */
3215                 cfs_timer_disarm(&svcpt->scp_at_timer);
3216                 array = &svcpt->scp_at_array;
3217
3218                 if (array->paa_reqs_array != NULL) {
3219                         OBD_FREE(array->paa_reqs_array,
3220                                  sizeof(cfs_list_t) * array->paa_size);
3221                         array->paa_reqs_array = NULL;
3222                 }
3223
3224                 if (array->paa_reqs_count != NULL) {
3225                         OBD_FREE(array->paa_reqs_count,
3226                                  sizeof(__u32) * array->paa_size);
3227                         array->paa_reqs_count = NULL;
3228                 }
3229         }
3230
3231         ptlrpc_service_for_each_part(svcpt, i, svc)
3232                 OBD_FREE_PTR(svcpt);
3233
3234         if (svc->srv_cpts != NULL)
3235                 cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts);
3236
3237         OBD_FREE(svc, offsetof(struct ptlrpc_service,
3238                                srv_parts[svc->srv_ncpts]));
3239 }
3240
3241 int ptlrpc_unregister_service(struct ptlrpc_service *service)
3242 {
3243         ENTRY;
3244
3245         CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
3246
3247         service->srv_is_stopping = 1;
3248
3249         mutex_lock(&ptlrpc_all_services_mutex);
3250         cfs_list_del_init(&service->srv_list);
3251         mutex_unlock(&ptlrpc_all_services_mutex);
3252
3253         ptlrpc_service_del_atimer(service);
3254         ptlrpc_stop_all_threads(service);
3255
3256         ptlrpc_service_unlink_rqbd(service);
3257         ptlrpc_service_purge_all(service);
3258         ptlrpc_service_nrs_cleanup(service);
3259
3260         ptlrpc_lprocfs_unregister_service(service);
3261
3262         ptlrpc_service_free(service);
3263
3264         RETURN(0);
3265 }
3266 EXPORT_SYMBOL(ptlrpc_unregister_service);
3267
3268 /**
3269  * Returns 0 if the service is healthy.
3270  *
3271  * Right now, it just checks to make sure that requests aren't languishing
3272  * in the queue.  We'll use this health check to govern whether a node needs
3273  * to be shot, so it's intentionally non-aggressive. */
3274 int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
3275 {
3276         struct ptlrpc_request           *request = NULL;
3277         struct timeval                  right_now;
3278         long                            timediff;
3279
3280         do_gettimeofday(&right_now);
3281
3282         spin_lock(&svcpt->scp_req_lock);
3283         /* How long has the next entry been waiting? */
3284         if (ptlrpc_server_high_pending(svcpt, true))
3285                 request = ptlrpc_nrs_req_peek_nolock(svcpt, true);
3286         else if (ptlrpc_server_normal_pending(svcpt, true))
3287                 request = ptlrpc_nrs_req_peek_nolock(svcpt, false);
3288
3289         if (request == NULL) {
3290                 spin_unlock(&svcpt->scp_req_lock);
3291                 return 0;
3292         }
3293
3294         timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
3295         spin_unlock(&svcpt->scp_req_lock);
3296
3297         if ((timediff / ONE_MILLION) >
3298             (AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
3299                 CERROR("%s: unhealthy - request has been waiting %lds\n",
3300                        svcpt->scp_service->srv_name, timediff / ONE_MILLION);
3301                 return -1;
3302         }
3303
3304         return 0;
3305 }
3306
3307 int
3308 ptlrpc_service_health_check(struct ptlrpc_service *svc)
3309 {
3310         struct ptlrpc_service_part      *svcpt;
3311         int                             i;
3312
3313         if (svc == NULL)
3314                 return 0;
3315
3316         ptlrpc_service_for_each_part(svcpt, i, svc) {
3317                 int rc = ptlrpc_svcpt_health_check(svcpt);
3318
3319                 if (rc != 0)
3320                         return rc;
3321         }
3322         return 0;
3323 }
3324 EXPORT_SYMBOL(ptlrpc_service_health_check);