Whamcloud - gitweb
LU-3321 clio: add pages into writeback cache in batch
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #ifndef __KERNEL__
39 #include <liblustre.h>
40 #endif
41 #include <obd_support.h>
42 #include <obd_class.h>
43 #include <lustre_net.h>
44 #include <lu_object.h>
45 #include <lnet/types.h>
46 #include "ptlrpc_internal.h"
47
48 /* The following are visible and mutable through /sys/module/ptlrpc */
49 int test_req_buffer_pressure = 0;
50 CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
51                 "set non-zero to put pressure on request buffer pools");
52 CFS_MODULE_PARM(at_min, "i", int, 0644,
53                 "Adaptive timeout minimum (sec)");
54 CFS_MODULE_PARM(at_max, "i", int, 0644,
55                 "Adaptive timeout maximum (sec)");
56 CFS_MODULE_PARM(at_history, "i", int, 0644,
57                 "Adaptive timeouts remember the slowest event that took place "
58                 "within this period (sec)");
59 CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
60                 "How soon before an RPC deadline to send an early reply");
61 CFS_MODULE_PARM(at_extra, "i", int, 0644,
62                 "How much extra time to give with each early reply");
63
64
65 /* forward ref */
66 static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
67 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
68 static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
69
70 /** Holds a list of all PTLRPC services */
71 CFS_LIST_HEAD(ptlrpc_all_services);
72 /** Used to protect the \e ptlrpc_all_services list */
73 struct mutex ptlrpc_all_services_mutex;
74
75 struct ptlrpc_request_buffer_desc *
76 ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
77 {
78         struct ptlrpc_service             *svc = svcpt->scp_service;
79         struct ptlrpc_request_buffer_desc *rqbd;
80
81         OBD_CPT_ALLOC_PTR(rqbd, svc->srv_cptable, svcpt->scp_cpt);
82         if (rqbd == NULL)
83                 return NULL;
84
85         rqbd->rqbd_svcpt = svcpt;
86         rqbd->rqbd_refcount = 0;
87         rqbd->rqbd_cbid.cbid_fn = request_in_callback;
88         rqbd->rqbd_cbid.cbid_arg = rqbd;
89         CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs);
90         OBD_CPT_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_cptable,
91                             svcpt->scp_cpt, svc->srv_buf_size);
92         if (rqbd->rqbd_buffer == NULL) {
93                 OBD_FREE_PTR(rqbd);
94                 return NULL;
95         }
96
97         spin_lock(&svcpt->scp_lock);
98         cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
99         svcpt->scp_nrqbds_total++;
100         spin_unlock(&svcpt->scp_lock);
101
102         return rqbd;
103 }
104
105 void
106 ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
107 {
108         struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
109
110         LASSERT(rqbd->rqbd_refcount == 0);
111         LASSERT(cfs_list_empty(&rqbd->rqbd_reqs));
112
113         spin_lock(&svcpt->scp_lock);
114         cfs_list_del(&rqbd->rqbd_list);
115         svcpt->scp_nrqbds_total--;
116         spin_unlock(&svcpt->scp_lock);
117
118         OBD_FREE_LARGE(rqbd->rqbd_buffer, svcpt->scp_service->srv_buf_size);
119         OBD_FREE_PTR(rqbd);
120 }
121
122 int
123 ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
124 {
125         struct ptlrpc_service             *svc = svcpt->scp_service;
126         struct ptlrpc_request_buffer_desc *rqbd;
127         int                                rc = 0;
128         int                                i;
129
130         if (svcpt->scp_rqbd_allocating)
131                 goto try_post;
132
133         spin_lock(&svcpt->scp_lock);
134         /* check again with lock */
135         if (svcpt->scp_rqbd_allocating) {
136                 /* NB: we might allow more than one thread in the future */
137                 LASSERT(svcpt->scp_rqbd_allocating == 1);
138                 spin_unlock(&svcpt->scp_lock);
139                 goto try_post;
140         }
141
142         svcpt->scp_rqbd_allocating++;
143         spin_unlock(&svcpt->scp_lock);
144
145
146         for (i = 0; i < svc->srv_nbuf_per_group; i++) {
147                 /* NB: another thread might have recycled enough rqbds, we
148                  * need to make sure it wouldn't over-allocate, see LU-1212. */
149                 if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group)
150                         break;
151
152                 rqbd = ptlrpc_alloc_rqbd(svcpt);
153
154                 if (rqbd == NULL) {
155                         CERROR("%s: Can't allocate request buffer\n",
156                                svc->srv_name);
157                         rc = -ENOMEM;
158                         break;
159                 }
160         }
161
162         spin_lock(&svcpt->scp_lock);
163
164         LASSERT(svcpt->scp_rqbd_allocating == 1);
165         svcpt->scp_rqbd_allocating--;
166
167         spin_unlock(&svcpt->scp_lock);
168
169         CDEBUG(D_RPCTRACE,
170                "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
171                svc->srv_name, i, svc->srv_buf_size, svcpt->scp_nrqbds_posted,
172                svcpt->scp_nrqbds_total, rc);
173
174  try_post:
175         if (post && rc == 0)
176                 rc = ptlrpc_server_post_idle_rqbds(svcpt);
177
178         return rc;
179 }
180
181 /**
182  * Part of Rep-Ack logic.
183  * Puts a lock and its mode into reply state assotiated to request reply.
184  */
185 void
186 ptlrpc_save_lock(struct ptlrpc_request *req,
187                  struct lustre_handle *lock, int mode, int no_ack)
188 {
189         struct ptlrpc_reply_state *rs = req->rq_reply_state;
190         int                        idx;
191
192         LASSERT(rs != NULL);
193         LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
194
195         if (req->rq_export->exp_disconnected) {
196                 ldlm_lock_decref(lock, mode);
197         } else {
198                 idx = rs->rs_nlocks++;
199                 rs->rs_locks[idx] = *lock;
200                 rs->rs_modes[idx] = mode;
201                 rs->rs_difficult = 1;
202                 rs->rs_no_ack = !!no_ack;
203         }
204 }
205 EXPORT_SYMBOL(ptlrpc_save_lock);
206
207 #ifdef __KERNEL__
208
209 struct ptlrpc_hr_partition;
210
211 struct ptlrpc_hr_thread {
212         int                             hrt_id;         /* thread ID */
213         spinlock_t                      hrt_lock;
214         wait_queue_head_t               hrt_waitq;
215         cfs_list_t                      hrt_queue;      /* RS queue */
216         struct ptlrpc_hr_partition      *hrt_partition;
217 };
218
219 struct ptlrpc_hr_partition {
220         /* # of started threads */
221         cfs_atomic_t                    hrp_nstarted;
222         /* # of stopped threads */
223         cfs_atomic_t                    hrp_nstopped;
224         /* cpu partition id */
225         int                             hrp_cpt;
226         /* round-robin rotor for choosing thread */
227         int                             hrp_rotor;
228         /* total number of threads on this partition */
229         int                             hrp_nthrs;
230         /* threads table */
231         struct ptlrpc_hr_thread         *hrp_thrs;
232 };
233
234 #define HRT_RUNNING 0
235 #define HRT_STOPPING 1
236
237 struct ptlrpc_hr_service {
238         /* CPU partition table, it's just cfs_cpt_table for now */
239         struct cfs_cpt_table            *hr_cpt_table;
240         /** controller sleep waitq */
241         wait_queue_head_t               hr_waitq;
242         unsigned int                    hr_stopping;
243         /** roundrobin rotor for non-affinity service */
244         unsigned int                    hr_rotor;
245         /* partition data */
246         struct ptlrpc_hr_partition      **hr_partitions;
247 };
248
249 struct rs_batch {
250         cfs_list_t                      rsb_replies;
251         unsigned int                    rsb_n_replies;
252         struct ptlrpc_service_part      *rsb_svcpt;
253 };
254
255 /** reply handling service. */
256 static struct ptlrpc_hr_service         ptlrpc_hr;
257
258 /**
259  * maximum mumber of replies scheduled in one batch
260  */
261 #define MAX_SCHEDULED 256
262
263 /**
264  * Initialize a reply batch.
265  *
266  * \param b batch
267  */
268 static void rs_batch_init(struct rs_batch *b)
269 {
270         memset(b, 0, sizeof *b);
271         CFS_INIT_LIST_HEAD(&b->rsb_replies);
272 }
273
274 /**
275  * Choose an hr thread to dispatch requests to.
276  */
277 static struct ptlrpc_hr_thread *
278 ptlrpc_hr_select(struct ptlrpc_service_part *svcpt)
279 {
280         struct ptlrpc_hr_partition      *hrp;
281         unsigned int                    rotor;
282
283         if (svcpt->scp_cpt >= 0 &&
284             svcpt->scp_service->srv_cptable == ptlrpc_hr.hr_cpt_table) {
285                 /* directly match partition */
286                 hrp = ptlrpc_hr.hr_partitions[svcpt->scp_cpt];
287
288         } else {
289                 rotor = ptlrpc_hr.hr_rotor++;
290                 rotor %= cfs_cpt_number(ptlrpc_hr.hr_cpt_table);
291
292                 hrp = ptlrpc_hr.hr_partitions[rotor];
293         }
294
295         rotor = hrp->hrp_rotor++;
296         return &hrp->hrp_thrs[rotor % hrp->hrp_nthrs];
297 }
298
299 /**
300  * Dispatch all replies accumulated in the batch to one from
301  * dedicated reply handling threads.
302  *
303  * \param b batch
304  */
305 static void rs_batch_dispatch(struct rs_batch *b)
306 {
307         if (b->rsb_n_replies != 0) {
308                 struct ptlrpc_hr_thread *hrt;
309
310                 hrt = ptlrpc_hr_select(b->rsb_svcpt);
311
312                 spin_lock(&hrt->hrt_lock);
313                 cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
314                 spin_unlock(&hrt->hrt_lock);
315
316                 wake_up(&hrt->hrt_waitq);
317                 b->rsb_n_replies = 0;
318         }
319 }
320
321 /**
322  * Add a reply to a batch.
323  * Add one reply object to a batch, schedule batched replies if overload.
324  *
325  * \param b batch
326  * \param rs reply
327  */
328 static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
329 {
330         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
331
332         if (svcpt != b->rsb_svcpt || b->rsb_n_replies >= MAX_SCHEDULED) {
333                 if (b->rsb_svcpt != NULL) {
334                         rs_batch_dispatch(b);
335                         spin_unlock(&b->rsb_svcpt->scp_rep_lock);
336                 }
337                 spin_lock(&svcpt->scp_rep_lock);
338                 b->rsb_svcpt = svcpt;
339         }
340         spin_lock(&rs->rs_lock);
341         rs->rs_scheduled_ever = 1;
342         if (rs->rs_scheduled == 0) {
343                 cfs_list_move(&rs->rs_list, &b->rsb_replies);
344                 rs->rs_scheduled = 1;
345                 b->rsb_n_replies++;
346         }
347         rs->rs_committed = 1;
348         spin_unlock(&rs->rs_lock);
349 }
350
351 /**
352  * Reply batch finalization.
353  * Dispatch remaining replies from the batch
354  * and release remaining spinlock.
355  *
356  * \param b batch
357  */
358 static void rs_batch_fini(struct rs_batch *b)
359 {
360         if (b->rsb_svcpt != NULL) {
361                 rs_batch_dispatch(b);
362                 spin_unlock(&b->rsb_svcpt->scp_rep_lock);
363         }
364 }
365
366 #define DECLARE_RS_BATCH(b)     struct rs_batch b
367
368 #else /* __KERNEL__ */
369
370 #define rs_batch_init(b)        do{}while(0)
371 #define rs_batch_fini(b)        do{}while(0)
372 #define rs_batch_add(b, r)      ptlrpc_schedule_difficult_reply(r)
373 #define DECLARE_RS_BATCH(b)
374
375 #endif /* __KERNEL__ */
376
377 /**
378  * Put reply state into a queue for processing because we received
379  * ACK from the client
380  */
381 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
382 {
383 #ifdef __KERNEL__
384         struct ptlrpc_hr_thread *hrt;
385         ENTRY;
386
387         LASSERT(cfs_list_empty(&rs->rs_list));
388
389         hrt = ptlrpc_hr_select(rs->rs_svcpt);
390
391         spin_lock(&hrt->hrt_lock);
392         cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
393         spin_unlock(&hrt->hrt_lock);
394
395         wake_up(&hrt->hrt_waitq);
396         EXIT;
397 #else
398         cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
399 #endif
400 }
401
402 void
403 ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
404 {
405         ENTRY;
406
407         LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
408         LASSERT(spin_is_locked(&rs->rs_lock));
409         LASSERT (rs->rs_difficult);
410         rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
411
412         if (rs->rs_scheduled) {     /* being set up or already notified */
413                 EXIT;
414                 return;
415         }
416
417         rs->rs_scheduled = 1;
418         cfs_list_del_init(&rs->rs_list);
419         ptlrpc_dispatch_difficult_reply(rs);
420         EXIT;
421 }
422 EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
423
424 void ptlrpc_commit_replies(struct obd_export *exp)
425 {
426         struct ptlrpc_reply_state *rs, *nxt;
427         DECLARE_RS_BATCH(batch);
428         ENTRY;
429
430         rs_batch_init(&batch);
431         /* Find any replies that have been committed and get their service
432          * to attend to complete them. */
433
434         /* CAVEAT EMPTOR: spinlock ordering!!! */
435         spin_lock(&exp->exp_uncommitted_replies_lock);
436         cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
437                                      rs_obd_list) {
438                 LASSERT (rs->rs_difficult);
439                 /* VBR: per-export last_committed */
440                 LASSERT(rs->rs_export);
441                 if (rs->rs_transno <= exp->exp_last_committed) {
442                         cfs_list_del_init(&rs->rs_obd_list);
443                         rs_batch_add(&batch, rs);
444                 }
445         }
446         spin_unlock(&exp->exp_uncommitted_replies_lock);
447         rs_batch_fini(&batch);
448         EXIT;
449 }
450 EXPORT_SYMBOL(ptlrpc_commit_replies);
451
452 static int
453 ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
454 {
455         struct ptlrpc_request_buffer_desc *rqbd;
456         int                               rc;
457         int                               posted = 0;
458
459         for (;;) {
460                 spin_lock(&svcpt->scp_lock);
461
462                 if (cfs_list_empty(&svcpt->scp_rqbd_idle)) {
463                         spin_unlock(&svcpt->scp_lock);
464                         return posted;
465                 }
466
467                 rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next,
468                                       struct ptlrpc_request_buffer_desc,
469                                       rqbd_list);
470                 cfs_list_del(&rqbd->rqbd_list);
471
472                 /* assume we will post successfully */
473                 svcpt->scp_nrqbds_posted++;
474                 cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
475
476                 spin_unlock(&svcpt->scp_lock);
477
478                 rc = ptlrpc_register_rqbd(rqbd);
479                 if (rc != 0)
480                         break;
481
482                 posted = 1;
483         }
484
485         spin_lock(&svcpt->scp_lock);
486
487         svcpt->scp_nrqbds_posted--;
488         cfs_list_del(&rqbd->rqbd_list);
489         cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
490
491         /* Don't complain if no request buffers are posted right now; LNET
492          * won't drop requests because we set the portal lazy! */
493
494         spin_unlock(&svcpt->scp_lock);
495
496         return -1;
497 }
498
499 static void ptlrpc_at_timer(unsigned long castmeharder)
500 {
501         struct ptlrpc_service_part *svcpt;
502
503         svcpt = (struct ptlrpc_service_part *)castmeharder;
504
505         svcpt->scp_at_check = 1;
506         svcpt->scp_at_checktime = cfs_time_current();
507         wake_up(&svcpt->scp_waitq);
508 }
509
510 static void
511 ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
512                              struct ptlrpc_service_conf *conf)
513 {
514 #ifdef __KERNEL__
515         struct ptlrpc_service_thr_conf  *tc = &conf->psc_thr;
516         unsigned                        init;
517         unsigned                        total;
518         unsigned                        nthrs;
519         int                             weight;
520
521         /*
522          * Common code for estimating & validating threads number.
523          * CPT affinity service could have percpt thread-pool instead
524          * of a global thread-pool, which means user might not always
525          * get the threads number they give it in conf::tc_nthrs_user
526          * even they did set. It's because we need to validate threads
527          * number for each CPT to guarantee each pool will have enough
528          * threads to keep the service healthy.
529          */
530         init = PTLRPC_NTHRS_INIT + (svc->srv_ops.so_hpreq_handler != NULL);
531         init = max_t(int, init, tc->tc_nthrs_init);
532
533         /* NB: please see comments in lustre_lnet.h for definition
534          * details of these members */
535         LASSERT(tc->tc_nthrs_max != 0);
536
537         if (tc->tc_nthrs_user != 0) {
538                 /* In case there is a reason to test a service with many
539                  * threads, we give a less strict check here, it can
540                  * be up to 8 * nthrs_max */
541                 total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user);
542                 nthrs = total / svc->srv_ncpts;
543                 init  = max(init, nthrs);
544                 goto out;
545         }
546
547         total = tc->tc_nthrs_max;
548         if (tc->tc_nthrs_base == 0) {
549                 /* don't care about base threads number per partition,
550                  * this is most for non-affinity service */
551                 nthrs = total / svc->srv_ncpts;
552                 goto out;
553         }
554
555         nthrs = tc->tc_nthrs_base;
556         if (svc->srv_ncpts == 1) {
557                 int     i;
558
559                 /* NB: Increase the base number if it's single partition
560                  * and total number of cores/HTs is larger or equal to 4.
561                  * result will always < 2 * nthrs_base */
562                 weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY);
563                 for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */
564                             (tc->tc_nthrs_base >> i) != 0; i++)
565                         nthrs += tc->tc_nthrs_base >> i;
566         }
567
568         if (tc->tc_thr_factor != 0) {
569                 int       factor = tc->tc_thr_factor;
570                 const int fade = 4;
571
572                 /*
573                  * User wants to increase number of threads with for
574                  * each CPU core/HT, most likely the factor is larger then
575                  * one thread/core because service threads are supposed to
576                  * be blocked by lock or wait for IO.
577                  */
578                 /*
579                  * Amdahl's law says that adding processors wouldn't give
580                  * a linear increasing of parallelism, so it's nonsense to
581                  * have too many threads no matter how many cores/HTs
582                  * there are.
583                  */
584                 if (cfs_cpu_ht_nsiblings(0) > 1) { /* weight is # of HTs */
585                         /* depress thread factor for hyper-thread */
586                         factor = factor - (factor >> 1) + (factor >> 3);
587                 }
588
589                 weight = cfs_cpt_weight(svc->srv_cptable, 0);
590                 LASSERT(weight > 0);
591
592                 for (; factor > 0 && weight > 0; factor--, weight -= fade)
593                         nthrs += min(weight, fade) * factor;
594         }
595
596         if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
597                 nthrs = max(tc->tc_nthrs_base,
598                             tc->tc_nthrs_max / svc->srv_ncpts);
599         }
600  out:
601         nthrs = max(nthrs, tc->tc_nthrs_init);
602         svc->srv_nthrs_cpt_limit = nthrs;
603         svc->srv_nthrs_cpt_init = init;
604
605         if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
606                 CDEBUG(D_OTHER, "%s: This service may have more threads (%d) "
607                        "than the given soft limit (%d)\n",
608                        svc->srv_name, nthrs * svc->srv_ncpts,
609                        tc->tc_nthrs_max);
610         }
611 #endif
612 }
613
614 /**
615  * Initialize percpt data for a service
616  */
617 static int
618 ptlrpc_service_part_init(struct ptlrpc_service *svc,
619                          struct ptlrpc_service_part *svcpt, int cpt)
620 {
621         struct ptlrpc_at_array  *array;
622         int                     size;
623         int                     index;
624         int                     rc;
625
626         svcpt->scp_cpt = cpt;
627         CFS_INIT_LIST_HEAD(&svcpt->scp_threads);
628
629         /* rqbd and incoming request queue */
630         spin_lock_init(&svcpt->scp_lock);
631         CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
632         CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
633         CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
634         init_waitqueue_head(&svcpt->scp_waitq);
635         /* history request & rqbd list */
636         CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
637         CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
638
639         /* acitve requests and hp requests */
640         spin_lock_init(&svcpt->scp_req_lock);
641
642         /* reply states */
643         spin_lock_init(&svcpt->scp_rep_lock);
644         CFS_INIT_LIST_HEAD(&svcpt->scp_rep_active);
645 #ifndef __KERNEL__
646         CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
647 #endif
648         CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
649         init_waitqueue_head(&svcpt->scp_rep_waitq);
650         cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
651
652         /* adaptive timeout */
653         spin_lock_init(&svcpt->scp_at_lock);
654         array = &svcpt->scp_at_array;
655
656         size = at_est2timeout(at_max);
657         array->paa_size     = size;
658         array->paa_count    = 0;
659         array->paa_deadline = -1;
660
661         /* allocate memory for scp_at_array (ptlrpc_at_array) */
662         OBD_CPT_ALLOC(array->paa_reqs_array,
663                       svc->srv_cptable, cpt, sizeof(cfs_list_t) * size);
664         if (array->paa_reqs_array == NULL)
665                 return -ENOMEM;
666
667         for (index = 0; index < size; index++)
668                 CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
669
670         OBD_CPT_ALLOC(array->paa_reqs_count,
671                       svc->srv_cptable, cpt, sizeof(__u32) * size);
672         if (array->paa_reqs_count == NULL)
673                 goto failed;
674
675         cfs_timer_init(&svcpt->scp_at_timer, ptlrpc_at_timer, svcpt);
676         /* At SOW, service time should be quick; 10s seems generous. If client
677          * timeout is less than this, we'll be sending an early reply. */
678         at_init(&svcpt->scp_at_estimate, 10, 0);
679
680         /* assign this before call ptlrpc_grow_req_bufs */
681         svcpt->scp_service = svc;
682         /* Now allocate the request buffers, but don't post them now */
683         rc = ptlrpc_grow_req_bufs(svcpt, 0);
684         /* We shouldn't be under memory pressure at startup, so
685          * fail if we can't allocate all our buffers at this time. */
686         if (rc != 0)
687                 goto failed;
688
689         return 0;
690
691  failed:
692         if (array->paa_reqs_count != NULL) {
693                 OBD_FREE(array->paa_reqs_count, sizeof(__u32) * size);
694                 array->paa_reqs_count = NULL;
695         }
696
697         if (array->paa_reqs_array != NULL) {
698                 OBD_FREE(array->paa_reqs_array,
699                          sizeof(cfs_list_t) * array->paa_size);
700                 array->paa_reqs_array = NULL;
701         }
702
703         return -ENOMEM;
704 }
705
706 /**
707  * Initialize service on a given portal.
708  * This includes starting serving threads , allocating and posting rqbds and
709  * so on.
710  */
711 struct ptlrpc_service *
712 ptlrpc_register_service(struct ptlrpc_service_conf *conf,
713                         cfs_proc_dir_entry_t *proc_entry)
714 {
715         struct ptlrpc_service_cpt_conf  *cconf = &conf->psc_cpt;
716         struct ptlrpc_service           *service;
717         struct ptlrpc_service_part      *svcpt;
718         struct cfs_cpt_table            *cptable;
719         __u32                           *cpts = NULL;
720         int                             ncpts;
721         int                             cpt;
722         int                             rc;
723         int                             i;
724         ENTRY;
725
726         LASSERT(conf->psc_buf.bc_nbufs > 0);
727         LASSERT(conf->psc_buf.bc_buf_size >=
728                 conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD);
729         LASSERT(conf->psc_thr.tc_ctx_tags != 0);
730
731         cptable = cconf->cc_cptable;
732         if (cptable == NULL)
733                 cptable = cfs_cpt_table;
734
735         if (!conf->psc_thr.tc_cpu_affinity) {
736                 ncpts = 1;
737         } else {
738                 ncpts = cfs_cpt_number(cptable);
739                 if (cconf->cc_pattern != NULL) {
740                         struct cfs_expr_list    *el;
741
742                         rc = cfs_expr_list_parse(cconf->cc_pattern,
743                                                  strlen(cconf->cc_pattern),
744                                                  0, ncpts - 1, &el);
745                         if (rc != 0) {
746                                 CERROR("%s: invalid CPT pattern string: %s",
747                                        conf->psc_name, cconf->cc_pattern);
748                                 RETURN(ERR_PTR(-EINVAL));
749                         }
750
751                         rc = cfs_expr_list_values(el, ncpts, &cpts);
752                         cfs_expr_list_free(el);
753                         if (rc <= 0) {
754                                 CERROR("%s: failed to parse CPT array %s: %d\n",
755                                        conf->psc_name, cconf->cc_pattern, rc);
756                                 if (cpts != NULL)
757                                         OBD_FREE(cpts, sizeof(*cpts) * ncpts);
758                                 RETURN(ERR_PTR(rc < 0 ? rc : -EINVAL));
759                         }
760                         ncpts = rc;
761                 }
762         }
763
764         OBD_ALLOC(service, offsetof(struct ptlrpc_service, srv_parts[ncpts]));
765         if (service == NULL) {
766                 if (cpts != NULL)
767                         OBD_FREE(cpts, sizeof(*cpts) * ncpts);
768                 RETURN(ERR_PTR(-ENOMEM));
769         }
770
771         service->srv_cptable            = cptable;
772         service->srv_cpts               = cpts;
773         service->srv_ncpts              = ncpts;
774
775         service->srv_cpt_bits = 0; /* it's zero already, easy to read... */
776         while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable))
777                 service->srv_cpt_bits++;
778
779         /* public members */
780         spin_lock_init(&service->srv_lock);
781         service->srv_name               = conf->psc_name;
782         service->srv_watchdog_factor    = conf->psc_watchdog_factor;
783         CFS_INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
784
785         /* buffer configuration */
786         service->srv_nbuf_per_group     = test_req_buffer_pressure ?
787                                           1 : conf->psc_buf.bc_nbufs;
788         service->srv_max_req_size       = conf->psc_buf.bc_req_max_size +
789                                           SPTLRPC_MAX_PAYLOAD;
790         service->srv_buf_size           = conf->psc_buf.bc_buf_size;
791         service->srv_rep_portal         = conf->psc_buf.bc_rep_portal;
792         service->srv_req_portal         = conf->psc_buf.bc_req_portal;
793
794         /* Increase max reply size to next power of two */
795         service->srv_max_reply_size = 1;
796         while (service->srv_max_reply_size <
797                conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD)
798                 service->srv_max_reply_size <<= 1;
799
800         service->srv_thread_name        = conf->psc_thr.tc_thr_name;
801         service->srv_ctx_tags           = conf->psc_thr.tc_ctx_tags;
802         service->srv_hpreq_ratio        = PTLRPC_SVC_HP_RATIO;
803         service->srv_ops                = conf->psc_ops;
804
805         for (i = 0; i < ncpts; i++) {
806                 if (!conf->psc_thr.tc_cpu_affinity)
807                         cpt = CFS_CPT_ANY;
808                 else
809                         cpt = cpts != NULL ? cpts[i] : i;
810
811                 OBD_CPT_ALLOC(svcpt, cptable, cpt, sizeof(*svcpt));
812                 if (svcpt == NULL)
813                         GOTO(failed, rc = -ENOMEM);
814
815                 service->srv_parts[i] = svcpt;
816                 rc = ptlrpc_service_part_init(service, svcpt, cpt);
817                 if (rc != 0)
818                         GOTO(failed, rc);
819         }
820
821         ptlrpc_server_nthreads_check(service, conf);
822
823         rc = LNetSetLazyPortal(service->srv_req_portal);
824         LASSERT(rc == 0);
825
826         mutex_lock(&ptlrpc_all_services_mutex);
827         cfs_list_add (&service->srv_list, &ptlrpc_all_services);
828         mutex_unlock(&ptlrpc_all_services_mutex);
829
830         if (proc_entry != NULL)
831                 ptlrpc_lprocfs_register_service(proc_entry, service);
832
833         rc = ptlrpc_service_nrs_setup(service);
834         if (rc != 0)
835                 GOTO(failed, rc);
836
837         CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
838                service->srv_name, service->srv_req_portal);
839
840 #ifdef __KERNEL__
841         rc = ptlrpc_start_threads(service);
842         if (rc != 0) {
843                 CERROR("Failed to start threads for service %s: %d\n",
844                        service->srv_name, rc);
845                 GOTO(failed, rc);
846         }
847 #endif
848
849         RETURN(service);
850 failed:
851         ptlrpc_unregister_service(service);
852         RETURN(ERR_PTR(rc));
853 }
854 EXPORT_SYMBOL(ptlrpc_register_service);
855
856 /**
857  * to actually free the request, must be called without holding svc_lock.
858  * note it's caller's responsibility to unlink req->rq_list.
859  */
860 static void ptlrpc_server_free_request(struct ptlrpc_request *req)
861 {
862         LASSERT(cfs_atomic_read(&req->rq_refcount) == 0);
863         LASSERT(cfs_list_empty(&req->rq_timed_list));
864
865          /* DEBUG_REQ() assumes the reply state of a request with a valid
866           * ref will not be destroyed until that reference is dropped. */
867         ptlrpc_req_drop_rs(req);
868
869         sptlrpc_svc_ctx_decref(req);
870
871         if (req != &req->rq_rqbd->rqbd_req) {
872                 /* NB request buffers use an embedded
873                  * req if the incoming req unlinked the
874                  * MD; this isn't one of them! */
875                 ptlrpc_request_cache_free(req);
876         }
877 }
878
879 /**
880  * drop a reference count of the request. if it reaches 0, we either
881  * put it into history list, or free it immediately.
882  */
883 void ptlrpc_server_drop_request(struct ptlrpc_request *req)
884 {
885         struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
886         struct ptlrpc_service_part        *svcpt = rqbd->rqbd_svcpt;
887         struct ptlrpc_service             *svc = svcpt->scp_service;
888         int                                refcount;
889         cfs_list_t                        *tmp;
890         cfs_list_t                        *nxt;
891
892         if (!cfs_atomic_dec_and_test(&req->rq_refcount))
893                 return;
894
895         if (req->rq_at_linked) {
896                 spin_lock(&svcpt->scp_at_lock);
897                 /* recheck with lock, in case it's unlinked by
898                  * ptlrpc_at_check_timed() */
899                 if (likely(req->rq_at_linked))
900                         ptlrpc_at_remove_timed(req);
901                 spin_unlock(&svcpt->scp_at_lock);
902         }
903
904         LASSERT(cfs_list_empty(&req->rq_timed_list));
905
906         /* finalize request */
907         if (req->rq_export) {
908                 class_export_put(req->rq_export);
909                 req->rq_export = NULL;
910         }
911
912         spin_lock(&svcpt->scp_lock);
913
914         cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
915
916         refcount = --(rqbd->rqbd_refcount);
917         if (refcount == 0) {
918                 /* request buffer is now idle: add to history */
919                 cfs_list_del(&rqbd->rqbd_list);
920
921                 cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
922                 svcpt->scp_hist_nrqbds++;
923
924                 /* cull some history?
925                  * I expect only about 1 or 2 rqbds need to be recycled here */
926                 while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
927                         rqbd = cfs_list_entry(svcpt->scp_hist_rqbds.next,
928                                               struct ptlrpc_request_buffer_desc,
929                                               rqbd_list);
930
931                         cfs_list_del(&rqbd->rqbd_list);
932                         svcpt->scp_hist_nrqbds--;
933
934                         /* remove rqbd's reqs from svc's req history while
935                          * I've got the service lock */
936                         cfs_list_for_each(tmp, &rqbd->rqbd_reqs) {
937                                 req = cfs_list_entry(tmp, struct ptlrpc_request,
938                                                      rq_list);
939                                 /* Track the highest culled req seq */
940                                 if (req->rq_history_seq >
941                                     svcpt->scp_hist_seq_culled) {
942                                         svcpt->scp_hist_seq_culled =
943                                                 req->rq_history_seq;
944                                 }
945                                 cfs_list_del(&req->rq_history_list);
946                         }
947
948                         spin_unlock(&svcpt->scp_lock);
949
950                         cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
951                                 req = cfs_list_entry(rqbd->rqbd_reqs.next,
952                                                      struct ptlrpc_request,
953                                                      rq_list);
954                                 cfs_list_del(&req->rq_list);
955                                 ptlrpc_server_free_request(req);
956                         }
957
958                         spin_lock(&svcpt->scp_lock);
959                         /*
960                          * now all reqs including the embedded req has been
961                          * disposed, schedule request buffer for re-use.
962                          */
963                         LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) ==
964                                 0);
965                         cfs_list_add_tail(&rqbd->rqbd_list,
966                                           &svcpt->scp_rqbd_idle);
967                 }
968
969                 spin_unlock(&svcpt->scp_lock);
970         } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
971                 /* If we are low on memory, we are not interested in history */
972                 cfs_list_del(&req->rq_list);
973                 cfs_list_del_init(&req->rq_history_list);
974
975                 /* Track the highest culled req seq */
976                 if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
977                         svcpt->scp_hist_seq_culled = req->rq_history_seq;
978
979                 spin_unlock(&svcpt->scp_lock);
980
981                 ptlrpc_server_free_request(req);
982         } else {
983                 spin_unlock(&svcpt->scp_lock);
984         }
985 }
986
987 /** Change request export and move hp request from old export to new */
988 void ptlrpc_request_change_export(struct ptlrpc_request *req,
989                                   struct obd_export *export)
990 {
991         if (req->rq_export != NULL) {
992                 if (!cfs_list_empty(&req->rq_exp_list)) {
993                         /* remove rq_exp_list from last export */
994                         spin_lock_bh(&req->rq_export->exp_rpc_lock);
995                         cfs_list_del_init(&req->rq_exp_list);
996                         spin_unlock_bh(&req->rq_export->exp_rpc_lock);
997
998                         /* export has one reference already, so it`s safe to
999                          * add req to export queue here and get another
1000                          * reference for request later */
1001                         spin_lock_bh(&export->exp_rpc_lock);
1002                         cfs_list_add(&req->rq_exp_list, &export->exp_hp_rpcs);
1003                         spin_unlock_bh(&export->exp_rpc_lock);
1004                 }
1005                 class_export_rpc_dec(req->rq_export);
1006                 class_export_put(req->rq_export);
1007         }
1008
1009         /* request takes one export refcount */
1010         req->rq_export = class_export_get(export);
1011         class_export_rpc_inc(export);
1012
1013         return;
1014 }
1015
1016 /**
1017  * to finish a request: stop sending more early replies, and release
1018  * the request.
1019  */
1020 static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
1021                                          struct ptlrpc_request *req)
1022 {
1023         ptlrpc_server_hpreq_fini(req);
1024
1025         if (req->rq_session.lc_thread != NULL) {
1026                 lu_context_exit(&req->rq_session);
1027                 lu_context_fini(&req->rq_session);
1028         }
1029
1030         ptlrpc_server_drop_request(req);
1031 }
1032
1033 /**
1034  * to finish a active request: stop sending more early replies, and release
1035  * the request. should be called after we finished handling the request.
1036  */
1037 static void ptlrpc_server_finish_active_request(
1038                                         struct ptlrpc_service_part *svcpt,
1039                                         struct ptlrpc_request *req)
1040 {
1041         spin_lock(&svcpt->scp_req_lock);
1042         ptlrpc_nrs_req_stop_nolock(req);
1043         svcpt->scp_nreqs_active--;
1044         if (req->rq_hp)
1045                 svcpt->scp_nhreqs_active--;
1046         spin_unlock(&svcpt->scp_req_lock);
1047
1048         ptlrpc_nrs_req_finalize(req);
1049
1050         if (req->rq_export != NULL)
1051                 class_export_rpc_dec(req->rq_export);
1052
1053         ptlrpc_server_finish_request(svcpt, req);
1054 }
1055
1056 /**
1057  * This function makes sure dead exports are evicted in a timely manner.
1058  * This function is only called when some export receives a message (i.e.,
1059  * the network is up.)
1060  */
1061 static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
1062 {
1063         struct obd_export *oldest_exp;
1064         time_t oldest_time, new_time;
1065
1066         ENTRY;
1067
1068         LASSERT(exp);
1069
1070         /* Compensate for slow machines, etc, by faking our request time
1071            into the future.  Although this can break the strict time-ordering
1072            of the list, we can be really lazy here - we don't have to evict
1073            at the exact right moment.  Eventually, all silent exports
1074            will make it to the top of the list. */
1075
1076         /* Do not pay attention on 1sec or smaller renewals. */
1077         new_time = cfs_time_current_sec() + extra_delay;
1078         if (exp->exp_last_request_time + 1 /*second */ >= new_time)
1079                 RETURN_EXIT;
1080
1081         exp->exp_last_request_time = new_time;
1082         CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n",
1083                exp->exp_client_uuid.uuid,
1084                exp->exp_last_request_time, exp);
1085
1086         /* exports may get disconnected from the chain even though the
1087            export has references, so we must keep the spin lock while
1088            manipulating the lists */
1089         spin_lock(&exp->exp_obd->obd_dev_lock);
1090
1091         if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
1092                 /* this one is not timed */
1093                 spin_unlock(&exp->exp_obd->obd_dev_lock);
1094                 RETURN_EXIT;
1095         }
1096
1097         cfs_list_move_tail(&exp->exp_obd_chain_timed,
1098                            &exp->exp_obd->obd_exports_timed);
1099
1100         oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
1101                                     struct obd_export, exp_obd_chain_timed);
1102         oldest_time = oldest_exp->exp_last_request_time;
1103         spin_unlock(&exp->exp_obd->obd_dev_lock);
1104
1105         if (exp->exp_obd->obd_recovering) {
1106                 /* be nice to everyone during recovery */
1107                 EXIT;
1108                 return;
1109         }
1110
1111         /* Note - racing to start/reset the obd_eviction timer is safe */
1112         if (exp->exp_obd->obd_eviction_timer == 0) {
1113                 /* Check if the oldest entry is expired. */
1114                 if (cfs_time_current_sec() > (oldest_time + PING_EVICT_TIMEOUT +
1115                                               extra_delay)) {
1116                         /* We need a second timer, in case the net was down and
1117                          * it just came back. Since the pinger may skip every
1118                          * other PING_INTERVAL (see note in ptlrpc_pinger_main),
1119                          * we better wait for 3. */
1120                         exp->exp_obd->obd_eviction_timer =
1121                                 cfs_time_current_sec() + 3 * PING_INTERVAL;
1122                         CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
1123                                exp->exp_obd->obd_name, 
1124                                obd_export_nid2str(oldest_exp), oldest_time);
1125                 }
1126         } else {
1127                 if (cfs_time_current_sec() >
1128                     (exp->exp_obd->obd_eviction_timer + extra_delay)) {
1129                         /* The evictor won't evict anyone who we've heard from
1130                          * recently, so we don't have to check before we start
1131                          * it. */
1132                         if (!ping_evictor_wake(exp))
1133                                 exp->exp_obd->obd_eviction_timer = 0;
1134                 }
1135         }
1136
1137         EXIT;
1138 }
1139
1140 /**
1141  * Sanity check request \a req.
1142  * Return 0 if all is ok, error code otherwise.
1143  */
1144 static int ptlrpc_check_req(struct ptlrpc_request *req)
1145 {
1146         int rc = 0;
1147
1148         if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
1149                      req->rq_export->exp_conn_cnt)) {
1150                 DEBUG_REQ(D_RPCTRACE, req,
1151                           "DROPPING req from old connection %d < %d",
1152                           lustre_msg_get_conn_cnt(req->rq_reqmsg),
1153                           req->rq_export->exp_conn_cnt);
1154                 return -EEXIST;
1155         }
1156         if (unlikely(req->rq_export->exp_obd &&
1157                      req->rq_export->exp_obd->obd_fail)) {
1158              /* Failing over, don't handle any more reqs, send
1159                 error response instead. */
1160                 CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
1161                        req, req->rq_export->exp_obd->obd_name);
1162                 rc = -ENODEV;
1163         } else if (lustre_msg_get_flags(req->rq_reqmsg) &
1164                    (MSG_REPLAY | MSG_REQ_REPLAY_DONE) &&
1165                    !(req->rq_export->exp_obd->obd_recovering)) {
1166                         DEBUG_REQ(D_ERROR, req,
1167                                   "Invalid replay without recovery");
1168                         class_fail_export(req->rq_export);
1169                         rc = -ENODEV;
1170         } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 &&
1171                    !(req->rq_export->exp_obd->obd_recovering)) {
1172                         DEBUG_REQ(D_ERROR, req, "Invalid req with transno "
1173                                   LPU64" without recovery",
1174                                   lustre_msg_get_transno(req->rq_reqmsg));
1175                         class_fail_export(req->rq_export);
1176                         rc = -ENODEV;
1177         }
1178
1179         if (unlikely(rc < 0)) {
1180                 req->rq_status = rc;
1181                 ptlrpc_error(req);
1182         }
1183         return rc;
1184 }
1185
1186 static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
1187 {
1188         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1189         __s32 next;
1190
1191         if (array->paa_count == 0) {
1192                 cfs_timer_disarm(&svcpt->scp_at_timer);
1193                 return;
1194         }
1195
1196         /* Set timer for closest deadline */
1197         next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
1198                        at_early_margin);
1199         if (next <= 0) {
1200                 ptlrpc_at_timer((unsigned long)svcpt);
1201         } else {
1202                 cfs_timer_arm(&svcpt->scp_at_timer, cfs_time_shift(next));
1203                 CDEBUG(D_INFO, "armed %s at %+ds\n",
1204                        svcpt->scp_service->srv_name, next);
1205         }
1206 }
1207
1208 /* Add rpc to early reply check list */
1209 static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
1210 {
1211         struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
1212         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1213         struct ptlrpc_request *rq = NULL;
1214         __u32 index;
1215
1216         if (AT_OFF)
1217                 return(0);
1218
1219         if (req->rq_no_reply)
1220                 return 0;
1221
1222         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
1223                 return(-ENOSYS);
1224
1225         spin_lock(&svcpt->scp_at_lock);
1226         LASSERT(cfs_list_empty(&req->rq_timed_list));
1227
1228         index = (unsigned long)req->rq_deadline % array->paa_size;
1229         if (array->paa_reqs_count[index] > 0) {
1230                 /* latest rpcs will have the latest deadlines in the list,
1231                  * so search backward. */
1232                 cfs_list_for_each_entry_reverse(rq,
1233                                                 &array->paa_reqs_array[index],
1234                                                 rq_timed_list) {
1235                         if (req->rq_deadline >= rq->rq_deadline) {
1236                                 cfs_list_add(&req->rq_timed_list,
1237                                              &rq->rq_timed_list);
1238                                 break;
1239                         }
1240                 }
1241         }
1242
1243         /* Add the request at the head of the list */
1244         if (cfs_list_empty(&req->rq_timed_list))
1245                 cfs_list_add(&req->rq_timed_list,
1246                              &array->paa_reqs_array[index]);
1247
1248         spin_lock(&req->rq_lock);
1249         req->rq_at_linked = 1;
1250         spin_unlock(&req->rq_lock);
1251         req->rq_at_index = index;
1252         array->paa_reqs_count[index]++;
1253         array->paa_count++;
1254         if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
1255                 array->paa_deadline = req->rq_deadline;
1256                 ptlrpc_at_set_timer(svcpt);
1257         }
1258         spin_unlock(&svcpt->scp_at_lock);
1259
1260         return 0;
1261 }
1262
1263 static void
1264 ptlrpc_at_remove_timed(struct ptlrpc_request *req)
1265 {
1266         struct ptlrpc_at_array *array;
1267
1268         array = &req->rq_rqbd->rqbd_svcpt->scp_at_array;
1269
1270         /* NB: must call with hold svcpt::scp_at_lock */
1271         LASSERT(!cfs_list_empty(&req->rq_timed_list));
1272         cfs_list_del_init(&req->rq_timed_list);
1273
1274         spin_lock(&req->rq_lock);
1275         req->rq_at_linked = 0;
1276         spin_unlock(&req->rq_lock);
1277
1278         array->paa_reqs_count[req->rq_at_index]--;
1279         array->paa_count--;
1280 }
1281
1282 static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
1283 {
1284         struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
1285         struct ptlrpc_request *reqcopy;
1286         struct lustre_msg *reqmsg;
1287         cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
1288         time_t newdl;
1289         int rc;
1290         ENTRY;
1291
1292         /* deadline is when the client expects us to reply, margin is the
1293            difference between clients' and servers' expectations */
1294         DEBUG_REQ(D_ADAPTTO, req,
1295                   "%ssending early reply (deadline %+lds, margin %+lds) for "
1296                   "%d+%d", AT_OFF ? "AT off - not " : "",
1297                   olddl, olddl - at_get(&svcpt->scp_at_estimate),
1298                   at_get(&svcpt->scp_at_estimate), at_extra);
1299
1300         if (AT_OFF)
1301                 RETURN(0);
1302
1303         if (olddl < 0) {
1304                 DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
1305                           "not sending early reply. Consider increasing "
1306                           "at_early_margin (%d)?", olddl, at_early_margin);
1307
1308                 /* Return an error so we're not re-added to the timed list. */
1309                 RETURN(-ETIMEDOUT);
1310         }
1311
1312         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
1313                 DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
1314                           "but no AT support");
1315                 RETURN(-ENOSYS);
1316         }
1317
1318         if (req->rq_export &&
1319             lustre_msg_get_flags(req->rq_reqmsg) &
1320             (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
1321                 /* During recovery, we don't want to send too many early
1322                  * replies, but on the other hand we want to make sure the
1323                  * client has enough time to resend if the rpc is lost. So
1324                  * during the recovery period send at least 4 early replies,
1325                  * spacing them every at_extra if we can. at_estimate should
1326                  * always equal this fixed value during recovery. */
1327                 at_measured(&svcpt->scp_at_estimate, min(at_extra,
1328                             req->rq_export->exp_obd->obd_recovery_timeout / 4));
1329         } else {
1330                 /* Fake our processing time into the future to ask the clients
1331                  * for some extra amount of time */
1332                 at_measured(&svcpt->scp_at_estimate, at_extra +
1333                             cfs_time_current_sec() -
1334                             req->rq_arrival_time.tv_sec);
1335
1336                 /* Check to see if we've actually increased the deadline -
1337                  * we may be past adaptive_max */
1338                 if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
1339                     at_get(&svcpt->scp_at_estimate)) {
1340                         DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
1341                                   "(%ld/%ld), not sending early reply\n",
1342                                   olddl, req->rq_arrival_time.tv_sec +
1343                                   at_get(&svcpt->scp_at_estimate) -
1344                                   cfs_time_current_sec());
1345                         RETURN(-ETIMEDOUT);
1346                 }
1347         }
1348         newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
1349
1350         reqcopy = ptlrpc_request_cache_alloc(__GFP_IO);
1351         if (reqcopy == NULL)
1352                 RETURN(-ENOMEM);
1353         OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
1354         if (!reqmsg)
1355                 GOTO(out_free, rc = -ENOMEM);
1356
1357         *reqcopy = *req;
1358         reqcopy->rq_reply_state = NULL;
1359         reqcopy->rq_rep_swab_mask = 0;
1360         reqcopy->rq_pack_bulk = 0;
1361         reqcopy->rq_pack_udesc = 0;
1362         reqcopy->rq_packed_final = 0;
1363         sptlrpc_svc_ctx_addref(reqcopy);
1364         /* We only need the reqmsg for the magic */
1365         reqcopy->rq_reqmsg = reqmsg;
1366         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
1367
1368         LASSERT(cfs_atomic_read(&req->rq_refcount));
1369         /** if it is last refcount then early reply isn't needed */
1370         if (cfs_atomic_read(&req->rq_refcount) == 1) {
1371                 DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
1372                           "abort sending early reply\n");
1373                 GOTO(out, rc = -EINVAL);
1374         }
1375
1376         /* Connection ref */
1377         reqcopy->rq_export = class_conn2export(
1378                                      lustre_msg_get_handle(reqcopy->rq_reqmsg));
1379         if (reqcopy->rq_export == NULL)
1380                 GOTO(out, rc = -ENODEV);
1381
1382         /* RPC ref */
1383         class_export_rpc_inc(reqcopy->rq_export);
1384         if (reqcopy->rq_export->exp_obd &&
1385             reqcopy->rq_export->exp_obd->obd_fail)
1386                 GOTO(out_put, rc = -ENODEV);
1387
1388         rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
1389         if (rc)
1390                 GOTO(out_put, rc);
1391
1392         rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
1393
1394         if (!rc) {
1395                 /* Adjust our own deadline to what we told the client */
1396                 req->rq_deadline = newdl;
1397                 req->rq_early_count++; /* number sent, server side */
1398         } else {
1399                 DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
1400         }
1401
1402         /* Free the (early) reply state from lustre_pack_reply.
1403            (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
1404         ptlrpc_req_drop_rs(reqcopy);
1405
1406 out_put:
1407         class_export_rpc_dec(reqcopy->rq_export);
1408         class_export_put(reqcopy->rq_export);
1409 out:
1410         sptlrpc_svc_ctx_decref(reqcopy);
1411         OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
1412 out_free:
1413         ptlrpc_request_cache_free(reqcopy);
1414         RETURN(rc);
1415 }
1416
1417 /* Send early replies to everybody expiring within at_early_margin
1418    asking for at_extra time */
1419 static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
1420 {
1421         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1422         struct ptlrpc_request *rq, *n;
1423         cfs_list_t work_list;
1424         __u32  index, count;
1425         time_t deadline;
1426         time_t now = cfs_time_current_sec();
1427         cfs_duration_t delay;
1428         int first, counter = 0;
1429         ENTRY;
1430
1431         spin_lock(&svcpt->scp_at_lock);
1432         if (svcpt->scp_at_check == 0) {
1433                 spin_unlock(&svcpt->scp_at_lock);
1434                 RETURN(0);
1435         }
1436         delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
1437         svcpt->scp_at_check = 0;
1438
1439         if (array->paa_count == 0) {
1440                 spin_unlock(&svcpt->scp_at_lock);
1441                 RETURN(0);
1442         }
1443
1444         /* The timer went off, but maybe the nearest rpc already completed. */
1445         first = array->paa_deadline - now;
1446         if (first > at_early_margin) {
1447                 /* We've still got plenty of time.  Reset the timer. */
1448                 ptlrpc_at_set_timer(svcpt);
1449                 spin_unlock(&svcpt->scp_at_lock);
1450                 RETURN(0);
1451         }
1452
1453         /* We're close to a timeout, and we don't know how much longer the
1454            server will take. Send early replies to everyone expiring soon. */
1455         CFS_INIT_LIST_HEAD(&work_list);
1456         deadline = -1;
1457         index = (unsigned long)array->paa_deadline % array->paa_size;
1458         count = array->paa_count;
1459         while (count > 0) {
1460                 count -= array->paa_reqs_count[index];
1461                 cfs_list_for_each_entry_safe(rq, n,
1462                                              &array->paa_reqs_array[index],
1463                                              rq_timed_list) {
1464                         if (rq->rq_deadline > now + at_early_margin) {
1465                                 /* update the earliest deadline */
1466                                 if (deadline == -1 ||
1467                                     rq->rq_deadline < deadline)
1468                                         deadline = rq->rq_deadline;
1469                                 break;
1470                         }
1471
1472                         ptlrpc_at_remove_timed(rq);
1473                         /**
1474                          * ptlrpc_server_drop_request() may drop
1475                          * refcount to 0 already. Let's check this and
1476                          * don't add entry to work_list
1477                          */
1478                         if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount)))
1479                                 cfs_list_add(&rq->rq_timed_list, &work_list);
1480                         counter++;
1481                 }
1482
1483                 if (++index >= array->paa_size)
1484                         index = 0;
1485         }
1486         array->paa_deadline = deadline;
1487         /* we have a new earliest deadline, restart the timer */
1488         ptlrpc_at_set_timer(svcpt);
1489
1490         spin_unlock(&svcpt->scp_at_lock);
1491
1492         CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
1493                "replies\n", first, at_extra, counter);
1494         if (first < 0) {
1495                 /* We're already past request deadlines before we even get a
1496                    chance to send early replies */
1497                 LCONSOLE_WARN("%s: This server is not able to keep up with "
1498                               "request traffic (cpu-bound).\n",
1499                               svcpt->scp_service->srv_name);
1500                 CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
1501                       "delay="CFS_DURATION_T"(jiff)\n",
1502                       counter, svcpt->scp_nreqs_incoming,
1503                       svcpt->scp_nreqs_active,
1504                       at_get(&svcpt->scp_at_estimate), delay);
1505         }
1506
1507         /* we took additional refcount so entries can't be deleted from list, no
1508          * locking is needed */
1509         while (!cfs_list_empty(&work_list)) {
1510                 rq = cfs_list_entry(work_list.next, struct ptlrpc_request,
1511                                     rq_timed_list);
1512                 cfs_list_del_init(&rq->rq_timed_list);
1513
1514                 if (ptlrpc_at_send_early_reply(rq) == 0)
1515                         ptlrpc_at_add_timed(rq);
1516
1517                 ptlrpc_server_drop_request(rq);
1518         }
1519
1520         RETURN(1); /* return "did_something" for liblustre */
1521 }
1522
1523 /* Check if we are already handling earlier incarnation of this request.
1524  * Called under &req->rq_export->exp_rpc_lock locked */
1525 static int ptlrpc_server_check_resend_in_progress(struct ptlrpc_request *req)
1526 {
1527         struct ptlrpc_request   *tmp = NULL;
1528
1529         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ||
1530             (cfs_atomic_read(&req->rq_export->exp_rpc_count) == 0))
1531                 return 0;
1532
1533         /* bulk request are aborted upon reconnect, don't try to
1534          * find a match */
1535         if (req->rq_bulk_write || req->rq_bulk_read)
1536                 return 0;
1537
1538         /* This list should not be longer than max_requests in
1539          * flights on the client, so it is not all that long.
1540          * Also we only hit this codepath in case of a resent
1541          * request which makes it even more rarely hit */
1542         cfs_list_for_each_entry(tmp, &req->rq_export->exp_reg_rpcs,
1543                                 rq_exp_list) {
1544                 /* Found duplicate one */
1545                 if (tmp->rq_xid == req->rq_xid)
1546                         goto found;
1547         }
1548         cfs_list_for_each_entry(tmp, &req->rq_export->exp_hp_rpcs,
1549                                 rq_exp_list) {
1550                 /* Found duplicate one */
1551                 if (tmp->rq_xid == req->rq_xid)
1552                         goto found;
1553         }
1554         return 0;
1555
1556 found:
1557         DEBUG_REQ(D_HA, req, "Found duplicate req in processing\n");
1558         DEBUG_REQ(D_HA, tmp, "Request being processed\n");
1559         return -EBUSY;
1560 }
1561
1562 /**
1563  * Put the request to the export list if the request may become
1564  * a high priority one.
1565  */
1566 static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
1567                                     struct ptlrpc_request *req)
1568 {
1569         cfs_list_t      *list;
1570         int              rc, hp = 0;
1571
1572         ENTRY;
1573
1574         if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
1575                 rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
1576                 if (rc < 0)
1577                         RETURN(rc);
1578                 LASSERT(rc == 0);
1579         }
1580         if (req->rq_export) {
1581                 if (req->rq_ops) {
1582                         /* Perform request specific check. We should do this
1583                          * check before the request is added into exp_hp_rpcs
1584                          * list otherwise it may hit swab race at LU-1044. */
1585                         if (req->rq_ops->hpreq_check) {
1586                                 rc = req->rq_ops->hpreq_check(req);
1587                                 /**
1588                                  * XXX: Out of all current
1589                                  * ptlrpc_hpreq_ops::hpreq_check(), only
1590                                  * ldlm_cancel_hpreq_check() can return an
1591                                  * error code; other functions assert in
1592                                  * similar places, which seems odd.
1593                                  * What also does not seem right is that
1594                                  * handlers for those RPCs do not assert
1595                                  * on the same checks, but rather handle the
1596                                  * error cases. e.g. see ost_rw_hpreq_check(),
1597                                  * and ost_brw_read(), ost_brw_write().
1598                                  */
1599                                 if (rc < 0)
1600                                         RETURN(rc);
1601                                 LASSERT(rc == 0 || rc == 1);
1602                                 hp = rc;
1603                         }
1604                         list = &req->rq_export->exp_hp_rpcs;
1605                 } else {
1606                         list = &req->rq_export->exp_reg_rpcs;
1607                 }
1608
1609                 /* do search for duplicated xid and the adding to the list
1610                  * atomically */
1611                 spin_lock_bh(&req->rq_export->exp_rpc_lock);
1612                 rc = ptlrpc_server_check_resend_in_progress(req);
1613                 if (rc < 0) {
1614                         spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1615                         RETURN(rc);
1616                 }
1617                 cfs_list_add(&req->rq_exp_list, list);
1618                 spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1619         }
1620
1621         ptlrpc_nrs_req_initialize(svcpt, req, !!hp);
1622
1623         RETURN(hp);
1624 }
1625
1626 /** Remove the request from the export list. */
1627 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
1628 {
1629         ENTRY;
1630         if (req->rq_export) {
1631                 /* refresh lock timeout again so that client has more
1632                  * room to send lock cancel RPC. */
1633                 if (req->rq_ops && req->rq_ops->hpreq_fini)
1634                         req->rq_ops->hpreq_fini(req);
1635
1636                 spin_lock_bh(&req->rq_export->exp_rpc_lock);
1637                 cfs_list_del_init(&req->rq_exp_list);
1638                 spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1639         }
1640         EXIT;
1641 }
1642
1643 static int ptlrpc_hpreq_check(struct ptlrpc_request *req)
1644 {
1645         return 1;
1646 }
1647
1648 static struct ptlrpc_hpreq_ops ptlrpc_hpreq_common = {
1649         .hpreq_check       = ptlrpc_hpreq_check,
1650 };
1651
1652 /* Hi-Priority RPC check by RPC operation code. */
1653 int ptlrpc_hpreq_handler(struct ptlrpc_request *req)
1654 {
1655         int opc = lustre_msg_get_opc(req->rq_reqmsg);
1656
1657         /* Check for export to let only reconnects for not yet evicted
1658          * export to become a HP rpc. */
1659         if ((req->rq_export != NULL) &&
1660             (opc == OBD_PING || opc == MDS_CONNECT || opc == OST_CONNECT))
1661                 req->rq_ops = &ptlrpc_hpreq_common;
1662
1663         return 0;
1664 }
1665 EXPORT_SYMBOL(ptlrpc_hpreq_handler);
1666
1667 static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
1668                                      struct ptlrpc_request *req)
1669 {
1670         int     rc;
1671         ENTRY;
1672
1673         rc = ptlrpc_server_hpreq_init(svcpt, req);
1674         if (rc < 0)
1675                 RETURN(rc);
1676
1677         ptlrpc_nrs_req_add(svcpt, req, !!rc);
1678
1679         RETURN(0);
1680 }
1681
1682 /**
1683  * Allow to handle high priority request
1684  * User can call it w/o any lock but need to hold
1685  * ptlrpc_service_part::scp_req_lock to get reliable result
1686  */
1687 static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
1688                                      bool force)
1689 {
1690         int running = svcpt->scp_nthrs_running;
1691
1692         if (!nrs_svcpt_has_hp(svcpt))
1693                 return false;
1694
1695         if (force)
1696                 return true;
1697
1698         if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
1699                      CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
1700                 /* leave just 1 thread for normal RPCs */
1701                 running = PTLRPC_NTHRS_INIT;
1702                 if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
1703                         running += 1;
1704         }
1705
1706         if (svcpt->scp_nreqs_active >= running - 1)
1707                 return false;
1708
1709         if (svcpt->scp_nhreqs_active == 0)
1710                 return true;
1711
1712         return !ptlrpc_nrs_req_pending_nolock(svcpt, false) ||
1713                svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio;
1714 }
1715
1716 static bool ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
1717                                        bool force)
1718 {
1719         return ptlrpc_server_allow_high(svcpt, force) &&
1720                ptlrpc_nrs_req_pending_nolock(svcpt, true);
1721 }
1722
1723 /**
1724  * Only allow normal priority requests on a service that has a high-priority
1725  * queue if forced (i.e. cleanup), if there are other high priority requests
1726  * already being processed (i.e. those threads can service more high-priority
1727  * requests), or if there are enough idle threads that a later thread can do
1728  * a high priority request.
1729  * User can call it w/o any lock but need to hold
1730  * ptlrpc_service_part::scp_req_lock to get reliable result
1731  */
1732 static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
1733                                        bool force)
1734 {
1735         int running = svcpt->scp_nthrs_running;
1736 #ifndef __KERNEL__
1737         if (1) /* always allow to handle normal request for liblustre */
1738                 return true;
1739 #endif
1740         if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
1741                      CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
1742                 /* leave just 1 thread for normal RPCs */
1743                 running = PTLRPC_NTHRS_INIT;
1744                 if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
1745                         running += 1;
1746         }
1747
1748         if (force ||
1749             svcpt->scp_nreqs_active < running - 2)
1750                 return true;
1751
1752         if (svcpt->scp_nreqs_active >= running - 1)
1753                 return false;
1754
1755         return svcpt->scp_nhreqs_active > 0 || !nrs_svcpt_has_hp(svcpt);
1756 }
1757
1758 static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
1759                                          bool force)
1760 {
1761         return ptlrpc_server_allow_normal(svcpt, force) &&
1762                ptlrpc_nrs_req_pending_nolock(svcpt, false);
1763 }
1764
1765 /**
1766  * Returns true if there are requests available in incoming
1767  * request queue for processing and it is allowed to fetch them.
1768  * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
1769  * to get reliable result
1770  * \see ptlrpc_server_allow_normal
1771  * \see ptlrpc_server_allow high
1772  */
1773 static inline bool
1774 ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force)
1775 {
1776         return ptlrpc_server_high_pending(svcpt, force) ||
1777                ptlrpc_server_normal_pending(svcpt, force);
1778 }
1779
1780 /**
1781  * Fetch a request for processing from queue of unprocessed requests.
1782  * Favors high-priority requests.
1783  * Returns a pointer to fetched request.
1784  */
1785 static struct ptlrpc_request *
1786 ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
1787 {
1788         struct ptlrpc_request *req = NULL;
1789         ENTRY;
1790
1791         spin_lock(&svcpt->scp_req_lock);
1792 #ifndef __KERNEL__
1793         /* !@%$# liblustre only has 1 thread */
1794         if (cfs_atomic_read(&svcpt->scp_nreps_difficult) != 0) {
1795                 spin_unlock(&svcpt->scp_req_lock);
1796                 RETURN(NULL);
1797         }
1798 #endif
1799
1800         if (ptlrpc_server_high_pending(svcpt, force)) {
1801                 req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
1802                 if (req != NULL) {
1803                         svcpt->scp_hreq_count++;
1804                         goto got_request;
1805                 }
1806         }
1807
1808         if (ptlrpc_server_normal_pending(svcpt, force)) {
1809                 req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
1810                 if (req != NULL) {
1811                         svcpt->scp_hreq_count = 0;
1812                         goto got_request;
1813                 }
1814         }
1815
1816         spin_unlock(&svcpt->scp_req_lock);
1817         RETURN(NULL);
1818
1819 got_request:
1820         svcpt->scp_nreqs_active++;
1821         if (req->rq_hp)
1822                 svcpt->scp_nhreqs_active++;
1823
1824         spin_unlock(&svcpt->scp_req_lock);
1825
1826         if (likely(req->rq_export))
1827                 class_export_rpc_inc(req->rq_export);
1828
1829         RETURN(req);
1830 }
1831
1832 /**
1833  * Handle freshly incoming reqs, add to timed early reply list,
1834  * pass on to regular request queue.
1835  * All incoming requests pass through here before getting into
1836  * ptlrpc_server_handle_req later on.
1837  */
1838 static int
1839 ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
1840                             struct ptlrpc_thread *thread)
1841 {
1842         struct ptlrpc_service   *svc = svcpt->scp_service;
1843         struct ptlrpc_request   *req;
1844         __u32                   deadline;
1845         int                     rc;
1846         ENTRY;
1847
1848         spin_lock(&svcpt->scp_lock);
1849         if (cfs_list_empty(&svcpt->scp_req_incoming)) {
1850                 spin_unlock(&svcpt->scp_lock);
1851                 RETURN(0);
1852         }
1853
1854         req = cfs_list_entry(svcpt->scp_req_incoming.next,
1855                              struct ptlrpc_request, rq_list);
1856         cfs_list_del_init(&req->rq_list);
1857         svcpt->scp_nreqs_incoming--;
1858         /* Consider this still a "queued" request as far as stats are
1859          * concerned */
1860         spin_unlock(&svcpt->scp_lock);
1861
1862         /* go through security check/transform */
1863         rc = sptlrpc_svc_unwrap_request(req);
1864         switch (rc) {
1865         case SECSVC_OK:
1866                 break;
1867         case SECSVC_COMPLETE:
1868                 target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
1869                 goto err_req;
1870         case SECSVC_DROP:
1871                 goto err_req;
1872         default:
1873                 LBUG();
1874         }
1875
1876         /*
1877          * for null-flavored rpc, msg has been unpacked by sptlrpc, although
1878          * redo it wouldn't be harmful.
1879          */
1880         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
1881                 rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
1882                 if (rc != 0) {
1883                         CERROR("error unpacking request: ptl %d from %s "
1884                                "x"LPU64"\n", svc->srv_req_portal,
1885                                libcfs_id2str(req->rq_peer), req->rq_xid);
1886                         goto err_req;
1887                 }
1888         }
1889
1890         rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
1891         if (rc) {
1892                 CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
1893                         LPU64"\n", svc->srv_req_portal,
1894                         libcfs_id2str(req->rq_peer), req->rq_xid);
1895                 goto err_req;
1896         }
1897
1898         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
1899             lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
1900                 CERROR("drop incoming rpc opc %u, x"LPU64"\n",
1901                        cfs_fail_val, req->rq_xid);
1902                 goto err_req;
1903         }
1904
1905         rc = -EINVAL;
1906         if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
1907                 CERROR("wrong packet type received (type=%u) from %s\n",
1908                        lustre_msg_get_type(req->rq_reqmsg),
1909                        libcfs_id2str(req->rq_peer));
1910                 goto err_req;
1911         }
1912
1913         switch(lustre_msg_get_opc(req->rq_reqmsg)) {
1914         case MDS_WRITEPAGE:
1915         case OST_WRITE:
1916                 req->rq_bulk_write = 1;
1917                 break;
1918         case MDS_READPAGE:
1919         case OST_READ:
1920         case MGS_CONFIG_READ:
1921                 req->rq_bulk_read = 1;
1922                 break;
1923         }
1924
1925         CDEBUG(D_RPCTRACE, "got req x"LPU64"\n", req->rq_xid);
1926
1927         req->rq_export = class_conn2export(
1928                 lustre_msg_get_handle(req->rq_reqmsg));
1929         if (req->rq_export) {
1930                 rc = ptlrpc_check_req(req);
1931                 if (rc == 0) {
1932                         rc = sptlrpc_target_export_check(req->rq_export, req);
1933                         if (rc)
1934                                 DEBUG_REQ(D_ERROR, req, "DROPPING req with "
1935                                           "illegal security flavor,");
1936                 }
1937
1938                 if (rc)
1939                         goto err_req;
1940                 ptlrpc_update_export_timer(req->rq_export, 0);
1941         }
1942
1943         /* req_in handling should/must be fast */
1944         if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
1945                 DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
1946                           cfs_time_sub(cfs_time_current_sec(),
1947                                        req->rq_arrival_time.tv_sec));
1948
1949         /* Set rpc server deadline and add it to the timed list */
1950         deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
1951                     MSGHDR_AT_SUPPORT) ?
1952                    /* The max time the client expects us to take */
1953                    lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
1954         req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
1955         if (unlikely(deadline == 0)) {
1956                 DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
1957                 goto err_req;
1958         }
1959
1960         req->rq_svc_thread = thread;
1961         if (thread != NULL) {
1962                 /* initialize request session, it is needed for request
1963                  * processing by target */
1964                 rc = lu_context_init(&req->rq_session, LCT_SERVER_SESSION |
1965                                                        LCT_NOREF);
1966                 if (rc) {
1967                         CERROR("%s: failure to initialize session: rc = %d\n",
1968                                thread->t_name, rc);
1969                         goto err_req;
1970                 }
1971                 req->rq_session.lc_thread = thread;
1972                 lu_context_enter(&req->rq_session);
1973                 req->rq_svc_thread->t_env->le_ses = &req->rq_session;
1974         }
1975
1976         ptlrpc_at_add_timed(req);
1977
1978         /* Move it over to the request processing queue */
1979         rc = ptlrpc_server_request_add(svcpt, req);
1980         if (rc)
1981                 GOTO(err_req, rc);
1982
1983         wake_up(&svcpt->scp_waitq);
1984         RETURN(1);
1985
1986 err_req:
1987         ptlrpc_server_finish_request(svcpt, req);
1988
1989         RETURN(1);
1990 }
1991
1992 /**
1993  * Main incoming request handling logic.
1994  * Calls handler function from service to do actual processing.
1995  */
1996 static int
1997 ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
1998                              struct ptlrpc_thread *thread)
1999 {
2000         struct ptlrpc_service   *svc = svcpt->scp_service;
2001         struct ptlrpc_request   *request;
2002         struct timeval           work_start;
2003         struct timeval           work_end;
2004         long                     timediff;
2005         int                      fail_opc = 0;
2006
2007         ENTRY;
2008
2009         request = ptlrpc_server_request_get(svcpt, false);
2010         if (request == NULL)
2011                 RETURN(0);
2012
2013         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
2014                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
2015         else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
2016                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
2017
2018         if (unlikely(fail_opc)) {
2019                 if (request->rq_export && request->rq_ops)
2020                         OBD_FAIL_TIMEOUT(fail_opc, 4);
2021         }
2022
2023         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
2024
2025         if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
2026                 libcfs_debug_dumplog();
2027
2028         do_gettimeofday(&work_start);
2029         timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
2030         if (likely(svc->srv_stats != NULL)) {
2031                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
2032                                     timediff);
2033                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
2034                                     svcpt->scp_nreqs_incoming);
2035                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
2036                                     svcpt->scp_nreqs_active);
2037                 lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
2038                                     at_get(&svcpt->scp_at_estimate));
2039         }
2040
2041         if (likely(request->rq_export)) {
2042                 if (unlikely(ptlrpc_check_req(request)))
2043                         goto put_conn;
2044                 ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
2045         }
2046
2047         /* Discard requests queued for longer than the deadline.
2048            The deadline is increased if we send an early reply. */
2049         if (cfs_time_current_sec() > request->rq_deadline) {
2050                 DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
2051                           ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
2052                           libcfs_id2str(request->rq_peer),
2053                           cfs_time_sub(request->rq_deadline,
2054                           request->rq_arrival_time.tv_sec),
2055                           cfs_time_sub(cfs_time_current_sec(),
2056                           request->rq_deadline));
2057                 goto put_conn;
2058         }
2059
2060         CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
2061                "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
2062                (request->rq_export ?
2063                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
2064                (request->rq_export ?
2065                 cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
2066                lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
2067                libcfs_id2str(request->rq_peer),
2068                lustre_msg_get_opc(request->rq_reqmsg));
2069
2070         if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
2071                 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
2072
2073         CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
2074
2075         /* re-assign request and sesson thread to the current one */
2076         request->rq_svc_thread = thread;
2077         if (thread != NULL) {
2078                 LASSERT(request->rq_session.lc_thread != NULL);
2079                 request->rq_session.lc_thread = thread;
2080                 request->rq_session.lc_cookie = 0x55;
2081                 thread->t_env->le_ses = &request->rq_session;
2082         }
2083         svc->srv_ops.so_req_handler(request);
2084
2085         ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
2086
2087 put_conn:
2088         if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
2089                      DEBUG_REQ(D_WARNING, request, "Request took longer "
2090                                "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
2091                                " client may timeout.",
2092                                cfs_time_sub(request->rq_deadline,
2093                                             request->rq_arrival_time.tv_sec),
2094                                cfs_time_sub(cfs_time_current_sec(),
2095                                             request->rq_deadline));
2096         }
2097
2098         do_gettimeofday(&work_end);
2099         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
2100         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
2101                "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
2102                "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
2103                 current_comm(),
2104                 (request->rq_export ?
2105                  (char *)request->rq_export->exp_client_uuid.uuid : "0"),
2106                 (request->rq_export ?
2107                  cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
2108                 lustre_msg_get_status(request->rq_reqmsg),
2109                 request->rq_xid,
2110                 libcfs_id2str(request->rq_peer),
2111                 lustre_msg_get_opc(request->rq_reqmsg),
2112                 timediff,
2113                 cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
2114                 (request->rq_repmsg ?
2115                  lustre_msg_get_transno(request->rq_repmsg) :
2116                  request->rq_transno),
2117                 request->rq_status,
2118                 (request->rq_repmsg ?
2119                  lustre_msg_get_status(request->rq_repmsg) : -999));
2120         if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
2121                 __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
2122                 int opc = opcode_offset(op);
2123                 if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
2124                         LASSERT(opc < LUSTRE_MAX_OPCODES);
2125                         lprocfs_counter_add(svc->srv_stats,
2126                                             opc + EXTRA_MAX_OPCODES,
2127                                             timediff);
2128                 }
2129         }
2130         if (unlikely(request->rq_early_count)) {
2131                 DEBUG_REQ(D_ADAPTTO, request,
2132                           "sent %d early replies before finishing in "
2133                           CFS_DURATION_T"s",
2134                           request->rq_early_count,
2135                           cfs_time_sub(work_end.tv_sec,
2136                           request->rq_arrival_time.tv_sec));
2137         }
2138
2139         ptlrpc_server_finish_active_request(svcpt, request);
2140
2141         RETURN(1);
2142 }
2143
2144 /**
2145  * An internal function to process a single reply state object.
2146  */
2147 static int
2148 ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
2149 {
2150         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
2151         struct ptlrpc_service     *svc = svcpt->scp_service;
2152         struct obd_export         *exp;
2153         int                        nlocks;
2154         int                        been_handled;
2155         ENTRY;
2156
2157         exp = rs->rs_export;
2158
2159         LASSERT (rs->rs_difficult);
2160         LASSERT (rs->rs_scheduled);
2161         LASSERT (cfs_list_empty(&rs->rs_list));
2162
2163         spin_lock(&exp->exp_lock);
2164         /* Noop if removed already */
2165         cfs_list_del_init (&rs->rs_exp_list);
2166         spin_unlock(&exp->exp_lock);
2167
2168         /* The disk commit callback holds exp_uncommitted_replies_lock while it
2169          * iterates over newly committed replies, removing them from
2170          * exp_uncommitted_replies.  It then drops this lock and schedules the
2171          * replies it found for handling here.
2172          *
2173          * We can avoid contention for exp_uncommitted_replies_lock between the
2174          * HRT threads and further commit callbacks by checking rs_committed
2175          * which is set in the commit callback while it holds both
2176          * rs_lock and exp_uncommitted_reples.
2177          *
2178          * If we see rs_committed clear, the commit callback _may_ not have
2179          * handled this reply yet and we race with it to grab
2180          * exp_uncommitted_replies_lock before removing the reply from
2181          * exp_uncommitted_replies.  Note that if we lose the race and the
2182          * reply has already been removed, list_del_init() is a noop.
2183          *
2184          * If we see rs_committed set, we know the commit callback is handling,
2185          * or has handled this reply since store reordering might allow us to
2186          * see rs_committed set out of sequence.  But since this is done
2187          * holding rs_lock, we can be sure it has all completed once we hold
2188          * rs_lock, which we do right next.
2189          */
2190         if (!rs->rs_committed) {
2191                 spin_lock(&exp->exp_uncommitted_replies_lock);
2192                 cfs_list_del_init(&rs->rs_obd_list);
2193                 spin_unlock(&exp->exp_uncommitted_replies_lock);
2194         }
2195
2196         spin_lock(&rs->rs_lock);
2197
2198         been_handled = rs->rs_handled;
2199         rs->rs_handled = 1;
2200
2201         nlocks = rs->rs_nlocks;                 /* atomic "steal", but */
2202         rs->rs_nlocks = 0;                      /* locks still on rs_locks! */
2203
2204         if (nlocks == 0 && !been_handled) {
2205                 /* If we see this, we should already have seen the warning
2206                  * in mds_steal_ack_locks()  */
2207                 CDEBUG(D_HA, "All locks stolen from rs %p x"LPD64".t"LPD64
2208                        " o%d NID %s\n",
2209                        rs,
2210                        rs->rs_xid, rs->rs_transno, rs->rs_opc,
2211                        libcfs_nid2str(exp->exp_connection->c_peer.nid));
2212         }
2213
2214         if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
2215                 spin_unlock(&rs->rs_lock);
2216
2217                 if (!been_handled && rs->rs_on_net) {
2218                         LNetMDUnlink(rs->rs_md_h);
2219                         /* Ignore return code; we're racing with completion */
2220                 }
2221
2222                 while (nlocks-- > 0)
2223                         ldlm_lock_decref(&rs->rs_locks[nlocks],
2224                                          rs->rs_modes[nlocks]);
2225
2226                 spin_lock(&rs->rs_lock);
2227         }
2228
2229         rs->rs_scheduled = 0;
2230
2231         if (!rs->rs_on_net) {
2232                 /* Off the net */
2233                 spin_unlock(&rs->rs_lock);
2234
2235                 class_export_put (exp);
2236                 rs->rs_export = NULL;
2237                 ptlrpc_rs_decref (rs);
2238                 if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
2239                     svc->srv_is_stopping)
2240                         wake_up_all(&svcpt->scp_waitq);
2241                 RETURN(1);
2242         }
2243
2244         /* still on the net; callback will schedule */
2245         spin_unlock(&rs->rs_lock);
2246         RETURN(1);
2247 }
2248
2249 #ifndef __KERNEL__
2250
2251 /**
2252  * Check whether given service has a reply available for processing
2253  * and process it.
2254  *
2255  * \param svc a ptlrpc service
2256  * \retval 0 no replies processed
2257  * \retval 1 one reply processed
2258  */
2259 static int
2260 ptlrpc_server_handle_reply(struct ptlrpc_service_part *svcpt)
2261 {
2262         struct ptlrpc_reply_state *rs = NULL;
2263         ENTRY;
2264
2265         spin_lock(&svcpt->scp_rep_lock);
2266         if (!cfs_list_empty(&svcpt->scp_rep_queue)) {
2267                 rs = cfs_list_entry(svcpt->scp_rep_queue.prev,
2268                                     struct ptlrpc_reply_state,
2269                                     rs_list);
2270                 cfs_list_del_init(&rs->rs_list);
2271         }
2272         spin_unlock(&svcpt->scp_rep_lock);
2273         if (rs != NULL)
2274                 ptlrpc_handle_rs(rs);
2275         RETURN(rs != NULL);
2276 }
2277
2278 /* FIXME make use of timeout later */
2279 int
2280 liblustre_check_services (void *arg)
2281 {
2282         int  did_something = 0;
2283         int  rc;
2284         cfs_list_t *tmp, *nxt;
2285         ENTRY;
2286
2287         /* I'm relying on being single threaded, not to have to lock
2288          * ptlrpc_all_services etc */
2289         cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
2290                 struct ptlrpc_service *svc =
2291                         cfs_list_entry (tmp, struct ptlrpc_service, srv_list);
2292                 struct ptlrpc_service_part *svcpt;
2293
2294                 LASSERT(svc->srv_ncpts == 1);
2295                 svcpt = svc->srv_parts[0];
2296
2297                 if (svcpt->scp_nthrs_running != 0)     /* I've recursed */
2298                         continue;
2299
2300                 /* service threads can block for bulk, so this limits us
2301                  * (arbitrarily) to recursing 1 stack frame per service.
2302                  * Note that the problem with recursion is that we have to
2303                  * unwind completely before our caller can resume. */
2304
2305                 svcpt->scp_nthrs_running++;
2306
2307                 do {
2308                         rc = ptlrpc_server_handle_req_in(svcpt, NULL);
2309                         rc |= ptlrpc_server_handle_reply(svcpt);
2310                         rc |= ptlrpc_at_check_timed(svcpt);
2311                         rc |= ptlrpc_server_handle_request(svcpt, NULL);
2312                         rc |= (ptlrpc_server_post_idle_rqbds(svcpt) > 0);
2313                         did_something |= rc;
2314                 } while (rc);
2315
2316                 svcpt->scp_nthrs_running--;
2317         }
2318
2319         RETURN(did_something);
2320 }
2321 #define ptlrpc_stop_all_threads(s) do {} while (0)
2322
2323 #else /* __KERNEL__ */
2324
2325 static void
2326 ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
2327 {
2328         int avail = svcpt->scp_nrqbds_posted;
2329         int low_water = test_req_buffer_pressure ? 0 :
2330                         svcpt->scp_service->srv_nbuf_per_group / 2;
2331
2332         /* NB I'm not locking; just looking. */
2333
2334         /* CAVEAT EMPTOR: We might be allocating buffers here because we've
2335          * allowed the request history to grow out of control.  We could put a
2336          * sanity check on that here and cull some history if we need the
2337          * space. */
2338
2339         if (avail <= low_water)
2340                 ptlrpc_grow_req_bufs(svcpt, 1);
2341
2342         if (svcpt->scp_service->srv_stats) {
2343                 lprocfs_counter_add(svcpt->scp_service->srv_stats,
2344                                     PTLRPC_REQBUF_AVAIL_CNTR, avail);
2345         }
2346 }
2347
2348 static int
2349 ptlrpc_retry_rqbds(void *arg)
2350 {
2351         struct ptlrpc_service_part *svcpt = (struct ptlrpc_service_part *)arg;
2352
2353         svcpt->scp_rqbd_timeout = 0;
2354         return -ETIMEDOUT;
2355 }
2356
2357 static inline int
2358 ptlrpc_threads_enough(struct ptlrpc_service_part *svcpt)
2359 {
2360         return svcpt->scp_nreqs_active <
2361                svcpt->scp_nthrs_running - 1 -
2362                (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL);
2363 }
2364
2365 /**
2366  * allowed to create more threads
2367  * user can call it w/o any lock but need to hold
2368  * ptlrpc_service_part::scp_lock to get reliable result
2369  */
2370 static inline int
2371 ptlrpc_threads_increasable(struct ptlrpc_service_part *svcpt)
2372 {
2373         return svcpt->scp_nthrs_running +
2374                svcpt->scp_nthrs_starting <
2375                svcpt->scp_service->srv_nthrs_cpt_limit;
2376 }
2377
2378 /**
2379  * too many requests and allowed to create more threads
2380  */
2381 static inline int
2382 ptlrpc_threads_need_create(struct ptlrpc_service_part *svcpt)
2383 {
2384         return !ptlrpc_threads_enough(svcpt) &&
2385                 ptlrpc_threads_increasable(svcpt);
2386 }
2387
2388 static inline int
2389 ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
2390 {
2391         return thread_is_stopping(thread) ||
2392                thread->t_svcpt->scp_service->srv_is_stopping;
2393 }
2394
2395 static inline int
2396 ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt)
2397 {
2398         return !cfs_list_empty(&svcpt->scp_rqbd_idle) &&
2399                svcpt->scp_rqbd_timeout == 0;
2400 }
2401
2402 static inline int
2403 ptlrpc_at_check(struct ptlrpc_service_part *svcpt)
2404 {
2405         return svcpt->scp_at_check;
2406 }
2407
2408 /**
2409  * requests wait on preprocessing
2410  * user can call it w/o any lock but need to hold
2411  * ptlrpc_service_part::scp_lock to get reliable result
2412  */
2413 static inline int
2414 ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
2415 {
2416         return !cfs_list_empty(&svcpt->scp_req_incoming);
2417 }
2418
2419 static __attribute__((__noinline__)) int
2420 ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
2421                   struct ptlrpc_thread *thread)
2422 {
2423         /* Don't exit while there are replies to be handled */
2424         struct l_wait_info lwi = LWI_TIMEOUT(svcpt->scp_rqbd_timeout,
2425                                              ptlrpc_retry_rqbds, svcpt);
2426
2427         lc_watchdog_disable(thread->t_watchdog);
2428
2429         cond_resched();
2430
2431         l_wait_event_exclusive_head(svcpt->scp_waitq,
2432                                 ptlrpc_thread_stopping(thread) ||
2433                                 ptlrpc_server_request_incoming(svcpt) ||
2434                                 ptlrpc_server_request_pending(svcpt, false) ||
2435                                 ptlrpc_rqbd_pending(svcpt) ||
2436                                 ptlrpc_at_check(svcpt), &lwi);
2437
2438         if (ptlrpc_thread_stopping(thread))
2439                 return -EINTR;
2440
2441         lc_watchdog_touch(thread->t_watchdog,
2442                           ptlrpc_server_get_timeout(svcpt));
2443         return 0;
2444 }
2445
2446 /**
2447  * Main thread body for service threads.
2448  * Waits in a loop waiting for new requests to process to appear.
2449  * Every time an incoming requests is added to its queue, a waitq
2450  * is woken up and one of the threads will handle it.
2451  */
2452 static int ptlrpc_main(void *arg)
2453 {
2454         struct ptlrpc_thread            *thread = (struct ptlrpc_thread *)arg;
2455         struct ptlrpc_service_part      *svcpt = thread->t_svcpt;
2456         struct ptlrpc_service           *svc = svcpt->scp_service;
2457         struct ptlrpc_reply_state       *rs;
2458 #ifdef WITH_GROUP_INFO
2459         struct group_info *ginfo = NULL;
2460 #endif
2461         struct lu_env *env;
2462         int counter = 0, rc = 0;
2463         ENTRY;
2464
2465         thread->t_pid = current_pid();
2466         unshare_fs_struct();
2467
2468         /* NB: we will call cfs_cpt_bind() for all threads, because we
2469          * might want to run lustre server only on a subset of system CPUs,
2470          * in that case ->scp_cpt is CFS_CPT_ANY */
2471         rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
2472         if (rc != 0) {
2473                 CWARN("%s: failed to bind %s on CPT %d\n",
2474                       svc->srv_name, thread->t_name, svcpt->scp_cpt);
2475         }
2476
2477 #ifdef WITH_GROUP_INFO
2478         ginfo = groups_alloc(0);
2479         if (!ginfo) {
2480                 rc = -ENOMEM;
2481                 goto out;
2482         }
2483
2484         set_current_groups(ginfo);
2485         put_group_info(ginfo);
2486 #endif
2487
2488         if (svc->srv_ops.so_thr_init != NULL) {
2489                 rc = svc->srv_ops.so_thr_init(thread);
2490                 if (rc)
2491                         goto out;
2492         }
2493
2494         OBD_ALLOC_PTR(env);
2495         if (env == NULL) {
2496                 rc = -ENOMEM;
2497                 goto out_srv_fini;
2498         }
2499
2500         rc = lu_context_init(&env->le_ctx,
2501                              svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
2502         if (rc)
2503                 goto out_srv_fini;
2504
2505         thread->t_env = env;
2506         env->le_ctx.lc_thread = thread;
2507         env->le_ctx.lc_cookie = 0x6;
2508
2509         while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) {
2510                 rc = ptlrpc_server_post_idle_rqbds(svcpt);
2511                 if (rc >= 0)
2512                         continue;
2513
2514                 CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
2515                         svc->srv_name, svcpt->scp_cpt, rc);
2516                 goto out_srv_fini;
2517         }
2518
2519         /* Alloc reply state structure for this one */
2520         OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
2521         if (!rs) {
2522                 rc = -ENOMEM;
2523                 goto out_srv_fini;
2524         }
2525
2526         spin_lock(&svcpt->scp_lock);
2527
2528         LASSERT(thread_is_starting(thread));
2529         thread_clear_flags(thread, SVC_STARTING);
2530
2531         LASSERT(svcpt->scp_nthrs_starting == 1);
2532         svcpt->scp_nthrs_starting--;
2533
2534         /* SVC_STOPPING may already be set here if someone else is trying
2535          * to stop the service while this new thread has been dynamically
2536          * forked. We still set SVC_RUNNING to let our creator know that
2537          * we are now running, however we will exit as soon as possible */
2538         thread_add_flags(thread, SVC_RUNNING);
2539         svcpt->scp_nthrs_running++;
2540         spin_unlock(&svcpt->scp_lock);
2541
2542         /* wake up our creator in case he's still waiting. */
2543         wake_up(&thread->t_ctl_waitq);
2544
2545         thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
2546                                              NULL, NULL);
2547
2548         spin_lock(&svcpt->scp_rep_lock);
2549         cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
2550         wake_up(&svcpt->scp_rep_waitq);
2551         spin_unlock(&svcpt->scp_rep_lock);
2552
2553         CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
2554                svcpt->scp_nthrs_running);
2555
2556         /* XXX maintain a list of all managed devices: insert here */
2557         while (!ptlrpc_thread_stopping(thread)) {
2558                 if (ptlrpc_wait_event(svcpt, thread))
2559                         break;
2560
2561                 ptlrpc_check_rqbd_pool(svcpt);
2562
2563                 if (ptlrpc_threads_need_create(svcpt)) {
2564                         /* Ignore return code - we tried... */
2565                         ptlrpc_start_thread(svcpt, 0);
2566                 }
2567
2568                 /* Process all incoming reqs before handling any */
2569                 if (ptlrpc_server_request_incoming(svcpt)) {
2570                         lu_context_enter(&env->le_ctx);
2571                         env->le_ses = NULL;
2572                         ptlrpc_server_handle_req_in(svcpt, thread);
2573                         lu_context_exit(&env->le_ctx);
2574
2575                         /* but limit ourselves in case of flood */
2576                         if (counter++ < 100)
2577                                 continue;
2578                         counter = 0;
2579                 }
2580
2581                 if (ptlrpc_at_check(svcpt))
2582                         ptlrpc_at_check_timed(svcpt);
2583
2584                 if (ptlrpc_server_request_pending(svcpt, false)) {
2585                         lu_context_enter(&env->le_ctx);
2586                         ptlrpc_server_handle_request(svcpt, thread);
2587                         lu_context_exit(&env->le_ctx);
2588                 }
2589
2590                 if (ptlrpc_rqbd_pending(svcpt) &&
2591                     ptlrpc_server_post_idle_rqbds(svcpt) < 0) {
2592                         /* I just failed to repost request buffers.
2593                          * Wait for a timeout (unless something else
2594                          * happens) before I try again */
2595                         svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
2596                         CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
2597                                svcpt->scp_nrqbds_posted);
2598                 }
2599         }
2600
2601         lc_watchdog_delete(thread->t_watchdog);
2602         thread->t_watchdog = NULL;
2603
2604 out_srv_fini:
2605         /*
2606          * deconstruct service specific state created by ptlrpc_start_thread()
2607          */
2608         if (svc->srv_ops.so_thr_done != NULL)
2609                 svc->srv_ops.so_thr_done(thread);
2610
2611         if (env != NULL) {
2612                 lu_context_fini(&env->le_ctx);
2613                 OBD_FREE_PTR(env);
2614         }
2615 out:
2616         CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
2617                thread, thread->t_pid, thread->t_id, rc);
2618
2619         spin_lock(&svcpt->scp_lock);
2620         if (thread_test_and_clear_flags(thread, SVC_STARTING))
2621                 svcpt->scp_nthrs_starting--;
2622
2623         if (thread_test_and_clear_flags(thread, SVC_RUNNING)) {
2624                 /* must know immediately */
2625                 svcpt->scp_nthrs_running--;
2626         }
2627
2628         thread->t_id = rc;
2629         thread_add_flags(thread, SVC_STOPPED);
2630
2631         wake_up(&thread->t_ctl_waitq);
2632         spin_unlock(&svcpt->scp_lock);
2633
2634         return rc;
2635 }
2636
2637 static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
2638                           cfs_list_t *replies)
2639 {
2640         int result;
2641
2642         spin_lock(&hrt->hrt_lock);
2643
2644         cfs_list_splice_init(&hrt->hrt_queue, replies);
2645         result = ptlrpc_hr.hr_stopping || !cfs_list_empty(replies);
2646
2647         spin_unlock(&hrt->hrt_lock);
2648         return result;
2649 }
2650
2651 /**
2652  * Main body of "handle reply" function.
2653  * It processes acked reply states
2654  */
2655 static int ptlrpc_hr_main(void *arg)
2656 {
2657         struct ptlrpc_hr_thread         *hrt = (struct ptlrpc_hr_thread *)arg;
2658         struct ptlrpc_hr_partition      *hrp = hrt->hrt_partition;
2659         CFS_LIST_HEAD                   (replies);
2660         char                            threadname[20];
2661         int                             rc;
2662
2663         snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
2664                  hrp->hrp_cpt, hrt->hrt_id);
2665         unshare_fs_struct();
2666
2667         rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
2668         if (rc != 0) {
2669                 CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
2670                       threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
2671         }
2672
2673         cfs_atomic_inc(&hrp->hrp_nstarted);
2674         wake_up(&ptlrpc_hr.hr_waitq);
2675
2676         while (!ptlrpc_hr.hr_stopping) {
2677                 l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
2678
2679                 while (!cfs_list_empty(&replies)) {
2680                         struct ptlrpc_reply_state *rs;
2681
2682                         rs = cfs_list_entry(replies.prev,
2683                                             struct ptlrpc_reply_state,
2684                                             rs_list);
2685                         cfs_list_del_init(&rs->rs_list);
2686                         ptlrpc_handle_rs(rs);
2687                 }
2688         }
2689
2690         cfs_atomic_inc(&hrp->hrp_nstopped);
2691         wake_up(&ptlrpc_hr.hr_waitq);
2692
2693         return 0;
2694 }
2695
2696 static void ptlrpc_stop_hr_threads(void)
2697 {
2698         struct ptlrpc_hr_partition      *hrp;
2699         int                             i;
2700         int                             j;
2701
2702         ptlrpc_hr.hr_stopping = 1;
2703
2704         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2705                 if (hrp->hrp_thrs == NULL)
2706                         continue; /* uninitialized */
2707                 for (j = 0; j < hrp->hrp_nthrs; j++)
2708                         wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
2709         }
2710
2711         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2712                 if (hrp->hrp_thrs == NULL)
2713                         continue; /* uninitialized */
2714                 wait_event(ptlrpc_hr.hr_waitq,
2715                                cfs_atomic_read(&hrp->hrp_nstopped) ==
2716                                cfs_atomic_read(&hrp->hrp_nstarted));
2717         }
2718 }
2719
2720 static int ptlrpc_start_hr_threads(void)
2721 {
2722         struct ptlrpc_hr_partition      *hrp;
2723         int                             i;
2724         int                             j;
2725         ENTRY;
2726
2727         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2728                 int     rc = 0;
2729
2730                 for (j = 0; j < hrp->hrp_nthrs; j++) {
2731                         struct  ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
2732                         rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
2733                                                  &hrp->hrp_thrs[j],
2734                                                  "ptlrpc_hr%02d_%03d",
2735                                                  hrp->hrp_cpt,
2736                                                  hrt->hrt_id));
2737                         if (IS_ERR_VALUE(rc))
2738                                 break;
2739                 }
2740                 wait_event(ptlrpc_hr.hr_waitq,
2741                                cfs_atomic_read(&hrp->hrp_nstarted) == j);
2742                 if (!IS_ERR_VALUE(rc))
2743                         continue;
2744
2745                 CERROR("Reply handling thread %d:%d Failed on starting: "
2746                        "rc = %d\n", i, j, rc);
2747                 ptlrpc_stop_hr_threads();
2748                 RETURN(rc);
2749         }
2750         RETURN(0);
2751 }
2752
2753 static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
2754 {
2755         struct l_wait_info      lwi = { 0 };
2756         struct ptlrpc_thread    *thread;
2757         CFS_LIST_HEAD           (zombie);
2758
2759         ENTRY;
2760
2761         CDEBUG(D_INFO, "Stopping threads for service %s\n",
2762                svcpt->scp_service->srv_name);
2763
2764         spin_lock(&svcpt->scp_lock);
2765         /* let the thread know that we would like it to stop asap */
2766         list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
2767                 CDEBUG(D_INFO, "Stopping thread %s #%u\n",
2768                        svcpt->scp_service->srv_thread_name, thread->t_id);
2769                 thread_add_flags(thread, SVC_STOPPING);
2770         }
2771
2772         wake_up_all(&svcpt->scp_waitq);
2773
2774         while (!cfs_list_empty(&svcpt->scp_threads)) {
2775                 thread = cfs_list_entry(svcpt->scp_threads.next,
2776                                         struct ptlrpc_thread, t_link);
2777                 if (thread_is_stopped(thread)) {
2778                         cfs_list_del(&thread->t_link);
2779                         cfs_list_add(&thread->t_link, &zombie);
2780                         continue;
2781                 }
2782                 spin_unlock(&svcpt->scp_lock);
2783
2784                 CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
2785                        svcpt->scp_service->srv_thread_name, thread->t_id);
2786                 l_wait_event(thread->t_ctl_waitq,
2787                              thread_is_stopped(thread), &lwi);
2788
2789                 spin_lock(&svcpt->scp_lock);
2790         }
2791
2792         spin_unlock(&svcpt->scp_lock);
2793
2794         while (!cfs_list_empty(&zombie)) {
2795                 thread = cfs_list_entry(zombie.next,
2796                                         struct ptlrpc_thread, t_link);
2797                 cfs_list_del(&thread->t_link);
2798                 OBD_FREE_PTR(thread);
2799         }
2800         EXIT;
2801 }
2802
2803 /**
2804  * Stops all threads of a particular service \a svc
2805  */
2806 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
2807 {
2808         struct ptlrpc_service_part *svcpt;
2809         int                        i;
2810         ENTRY;
2811
2812         ptlrpc_service_for_each_part(svcpt, i, svc) {
2813                 if (svcpt->scp_service != NULL)
2814                         ptlrpc_svcpt_stop_threads(svcpt);
2815         }
2816
2817         EXIT;
2818 }
2819 EXPORT_SYMBOL(ptlrpc_stop_all_threads);
2820
2821 int ptlrpc_start_threads(struct ptlrpc_service *svc)
2822 {
2823         int     rc = 0;
2824         int     i;
2825         int     j;
2826         ENTRY;
2827
2828         /* We require 2 threads min, see note in ptlrpc_server_handle_request */
2829         LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT);
2830
2831         for (i = 0; i < svc->srv_ncpts; i++) {
2832                 for (j = 0; j < svc->srv_nthrs_cpt_init; j++) {
2833                         rc = ptlrpc_start_thread(svc->srv_parts[i], 1);
2834                         if (rc == 0)
2835                                 continue;
2836
2837                         if (rc != -EMFILE)
2838                                 goto failed;
2839                         /* We have enough threads, don't start more. b=15759 */
2840                         break;
2841                 }
2842         }
2843
2844         RETURN(0);
2845  failed:
2846         CERROR("cannot start %s thread #%d_%d: rc %d\n",
2847                svc->srv_thread_name, i, j, rc);
2848         ptlrpc_stop_all_threads(svc);
2849         RETURN(rc);
2850 }
2851 EXPORT_SYMBOL(ptlrpc_start_threads);
2852
2853 int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
2854 {
2855         struct l_wait_info      lwi = { 0 };
2856         struct ptlrpc_thread    *thread;
2857         struct ptlrpc_service   *svc;
2858         int                     rc;
2859         ENTRY;
2860
2861         LASSERT(svcpt != NULL);
2862
2863         svc = svcpt->scp_service;
2864
2865         CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n",
2866                svc->srv_name, svcpt->scp_cpt, svcpt->scp_nthrs_running,
2867                svc->srv_nthrs_cpt_init, svc->srv_nthrs_cpt_limit);
2868
2869  again:
2870         if (unlikely(svc->srv_is_stopping))
2871                 RETURN(-ESRCH);
2872
2873         if (!ptlrpc_threads_increasable(svcpt) ||
2874             (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
2875              svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1))
2876                 RETURN(-EMFILE);
2877
2878         OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
2879         if (thread == NULL)
2880                 RETURN(-ENOMEM);
2881         init_waitqueue_head(&thread->t_ctl_waitq);
2882
2883         spin_lock(&svcpt->scp_lock);
2884         if (!ptlrpc_threads_increasable(svcpt)) {
2885                 spin_unlock(&svcpt->scp_lock);
2886                 OBD_FREE_PTR(thread);
2887                 RETURN(-EMFILE);
2888         }
2889
2890         if (svcpt->scp_nthrs_starting != 0) {
2891                 /* serialize starting because some modules (obdfilter)
2892                  * might require unique and contiguous t_id */
2893                 LASSERT(svcpt->scp_nthrs_starting == 1);
2894                 spin_unlock(&svcpt->scp_lock);
2895                 OBD_FREE_PTR(thread);
2896                 if (wait) {
2897                         CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
2898                                svc->srv_thread_name, svcpt->scp_thr_nextid);
2899                         schedule();
2900                         goto again;
2901                 }
2902
2903                 CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n",
2904                        svc->srv_thread_name, svcpt->scp_thr_nextid);
2905                 RETURN(-EAGAIN);
2906         }
2907
2908         svcpt->scp_nthrs_starting++;
2909         thread->t_id = svcpt->scp_thr_nextid++;
2910         thread_add_flags(thread, SVC_STARTING);
2911         thread->t_svcpt = svcpt;
2912
2913         cfs_list_add(&thread->t_link, &svcpt->scp_threads);
2914         spin_unlock(&svcpt->scp_lock);
2915
2916         if (svcpt->scp_cpt >= 0) {
2917                 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d",
2918                          svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
2919         } else {
2920                 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d",
2921                          svc->srv_thread_name, thread->t_id);
2922         }
2923
2924         CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
2925         rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name));
2926         if (IS_ERR_VALUE(rc)) {
2927                 CERROR("cannot start thread '%s': rc %d\n",
2928                        thread->t_name, rc);
2929                 spin_lock(&svcpt->scp_lock);
2930                 --svcpt->scp_nthrs_starting;
2931                 if (thread_is_stopping(thread)) {
2932                         /* this ptlrpc_thread is being hanled
2933                          * by ptlrpc_svcpt_stop_threads now
2934                          */
2935                         thread_add_flags(thread, SVC_STOPPED);
2936                         wake_up(&thread->t_ctl_waitq);
2937                         spin_unlock(&svcpt->scp_lock);
2938                 } else {
2939                         cfs_list_del(&thread->t_link);
2940                         spin_unlock(&svcpt->scp_lock);
2941                         OBD_FREE_PTR(thread);
2942                 }
2943                 RETURN(rc);
2944         }
2945
2946         if (!wait)
2947                 RETURN(0);
2948
2949         l_wait_event(thread->t_ctl_waitq,
2950                      thread_is_running(thread) || thread_is_stopped(thread),
2951                      &lwi);
2952
2953         rc = thread_is_stopped(thread) ? thread->t_id : 0;
2954         RETURN(rc);
2955 }
2956
2957 int ptlrpc_hr_init(void)
2958 {
2959         struct ptlrpc_hr_partition      *hrp;
2960         struct ptlrpc_hr_thread         *hrt;
2961         int                             rc;
2962         int                             i;
2963         int                             j;
2964         ENTRY;
2965
2966         memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
2967         ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
2968
2969         ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
2970                                                    sizeof(*hrp));
2971         if (ptlrpc_hr.hr_partitions == NULL)
2972                 RETURN(-ENOMEM);
2973
2974         init_waitqueue_head(&ptlrpc_hr.hr_waitq);
2975
2976         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2977                 hrp->hrp_cpt = i;
2978
2979                 cfs_atomic_set(&hrp->hrp_nstarted, 0);
2980                 cfs_atomic_set(&hrp->hrp_nstopped, 0);
2981
2982                 hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
2983                 hrp->hrp_nthrs /= cfs_cpu_ht_nsiblings(0);
2984
2985                 LASSERT(hrp->hrp_nthrs > 0);
2986                 OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i,
2987                               hrp->hrp_nthrs * sizeof(*hrt));
2988                 if (hrp->hrp_thrs == NULL)
2989                         GOTO(out, rc = -ENOMEM);
2990
2991                 for (j = 0; j < hrp->hrp_nthrs; j++) {
2992                         hrt = &hrp->hrp_thrs[j];
2993
2994                         hrt->hrt_id = j;
2995                         hrt->hrt_partition = hrp;
2996                         init_waitqueue_head(&hrt->hrt_waitq);
2997                         spin_lock_init(&hrt->hrt_lock);
2998                         CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
2999                 }
3000         }
3001
3002         rc = ptlrpc_start_hr_threads();
3003 out:
3004         if (rc != 0)
3005                 ptlrpc_hr_fini();
3006         RETURN(rc);
3007 }
3008
3009 void ptlrpc_hr_fini(void)
3010 {
3011         struct ptlrpc_hr_partition      *hrp;
3012         int                             i;
3013
3014         if (ptlrpc_hr.hr_partitions == NULL)
3015                 return;
3016
3017         ptlrpc_stop_hr_threads();
3018
3019         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
3020                 if (hrp->hrp_thrs != NULL) {
3021                         OBD_FREE(hrp->hrp_thrs,
3022                                  hrp->hrp_nthrs * sizeof(hrp->hrp_thrs[0]));
3023                 }
3024         }
3025
3026         cfs_percpt_free(ptlrpc_hr.hr_partitions);
3027         ptlrpc_hr.hr_partitions = NULL;
3028 }
3029
3030 #endif /* __KERNEL__ */
3031
3032 /**
3033  * Wait until all already scheduled replies are processed.
3034  */
3035 static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
3036 {
3037         while (1) {
3038                 int rc;
3039                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
3040                                                      NULL, NULL);
3041
3042                 rc = l_wait_event(svcpt->scp_waitq,
3043                      cfs_atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
3044                 if (rc == 0)
3045                         break;
3046                 CWARN("Unexpectedly long timeout %s %p\n",
3047                       svcpt->scp_service->srv_name, svcpt->scp_service);
3048         }
3049 }
3050
3051 static void
3052 ptlrpc_service_del_atimer(struct ptlrpc_service *svc)
3053 {
3054         struct ptlrpc_service_part      *svcpt;
3055         int                             i;
3056
3057         /* early disarm AT timer... */
3058         ptlrpc_service_for_each_part(svcpt, i, svc) {
3059                 if (svcpt->scp_service != NULL)
3060                         cfs_timer_disarm(&svcpt->scp_at_timer);
3061         }
3062 }
3063
3064 static void
3065 ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
3066 {
3067         struct ptlrpc_service_part        *svcpt;
3068         struct ptlrpc_request_buffer_desc *rqbd;
3069         struct l_wait_info                lwi;
3070         int                               rc;
3071         int                               i;
3072
3073         /* All history will be culled when the next request buffer is
3074          * freed in ptlrpc_service_purge_all() */
3075         svc->srv_hist_nrqbds_cpt_max = 0;
3076
3077         rc = LNetClearLazyPortal(svc->srv_req_portal);
3078         LASSERT(rc == 0);
3079
3080         ptlrpc_service_for_each_part(svcpt, i, svc) {
3081                 if (svcpt->scp_service == NULL)
3082                         break;
3083
3084                 /* Unlink all the request buffers.  This forces a 'final'
3085                  * event with its 'unlink' flag set for each posted rqbd */
3086                 cfs_list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
3087                                         rqbd_list) {
3088                         rc = LNetMDUnlink(rqbd->rqbd_md_h);
3089                         LASSERT(rc == 0 || rc == -ENOENT);
3090                 }
3091         }
3092
3093         ptlrpc_service_for_each_part(svcpt, i, svc) {
3094                 if (svcpt->scp_service == NULL)
3095                         break;
3096
3097                 /* Wait for the network to release any buffers
3098                  * it's currently filling */
3099                 spin_lock(&svcpt->scp_lock);
3100                 while (svcpt->scp_nrqbds_posted != 0) {
3101                         spin_unlock(&svcpt->scp_lock);
3102                         /* Network access will complete in finite time but
3103                          * the HUGE timeout lets us CWARN for visibility
3104                          * of sluggish NALs */
3105                         lwi = LWI_TIMEOUT_INTERVAL(
3106                                         cfs_time_seconds(LONG_UNLINK),
3107                                         cfs_time_seconds(1), NULL, NULL);
3108                         rc = l_wait_event(svcpt->scp_waitq,
3109                                           svcpt->scp_nrqbds_posted == 0, &lwi);
3110                         if (rc == -ETIMEDOUT) {
3111                                 CWARN("Service %s waiting for "
3112                                       "request buffers\n",
3113                                       svcpt->scp_service->srv_name);
3114                         }
3115                         spin_lock(&svcpt->scp_lock);
3116                 }
3117                 spin_unlock(&svcpt->scp_lock);
3118         }
3119 }
3120
3121 static void
3122 ptlrpc_service_purge_all(struct ptlrpc_service *svc)
3123 {
3124         struct ptlrpc_service_part              *svcpt;
3125         struct ptlrpc_request_buffer_desc       *rqbd;
3126         struct ptlrpc_request                   *req;
3127         struct ptlrpc_reply_state               *rs;
3128         int                                     i;
3129
3130         ptlrpc_service_for_each_part(svcpt, i, svc) {
3131                 if (svcpt->scp_service == NULL)
3132                         break;
3133
3134                 spin_lock(&svcpt->scp_rep_lock);
3135                 while (!cfs_list_empty(&svcpt->scp_rep_active)) {
3136                         rs = cfs_list_entry(svcpt->scp_rep_active.next,
3137                                             struct ptlrpc_reply_state, rs_list);
3138                         spin_lock(&rs->rs_lock);
3139                         ptlrpc_schedule_difficult_reply(rs);
3140                         spin_unlock(&rs->rs_lock);
3141                 }
3142                 spin_unlock(&svcpt->scp_rep_lock);
3143
3144                 /* purge the request queue.  NB No new replies (rqbds
3145                  * all unlinked) and no service threads, so I'm the only
3146                  * thread noodling the request queue now */
3147                 while (!cfs_list_empty(&svcpt->scp_req_incoming)) {
3148                         req = cfs_list_entry(svcpt->scp_req_incoming.next,
3149                                              struct ptlrpc_request, rq_list);
3150
3151                         cfs_list_del(&req->rq_list);
3152                         svcpt->scp_nreqs_incoming--;
3153                         ptlrpc_server_finish_request(svcpt, req);
3154                 }
3155
3156                 while (ptlrpc_server_request_pending(svcpt, true)) {
3157                         req = ptlrpc_server_request_get(svcpt, true);
3158                         ptlrpc_server_finish_active_request(svcpt, req);
3159                 }
3160
3161                 LASSERT(cfs_list_empty(&svcpt->scp_rqbd_posted));
3162                 LASSERT(svcpt->scp_nreqs_incoming == 0);
3163                 LASSERT(svcpt->scp_nreqs_active == 0);
3164                 /* history should have been culled by
3165                  * ptlrpc_server_finish_request */
3166                 LASSERT(svcpt->scp_hist_nrqbds == 0);
3167
3168                 /* Now free all the request buffers since nothing
3169                  * references them any more... */
3170
3171                 while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) {
3172                         rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next,
3173                                               struct ptlrpc_request_buffer_desc,
3174                                               rqbd_list);
3175                         ptlrpc_free_rqbd(rqbd);
3176                 }
3177                 ptlrpc_wait_replies(svcpt);
3178
3179                 while (!cfs_list_empty(&svcpt->scp_rep_idle)) {
3180                         rs = cfs_list_entry(svcpt->scp_rep_idle.next,
3181                                             struct ptlrpc_reply_state,
3182                                             rs_list);
3183                         cfs_list_del(&rs->rs_list);
3184                         OBD_FREE_LARGE(rs, svc->srv_max_reply_size);
3185                 }
3186         }
3187 }
3188
3189 static void
3190 ptlrpc_service_free(struct ptlrpc_service *svc)
3191 {
3192         struct ptlrpc_service_part      *svcpt;
3193         struct ptlrpc_at_array          *array;
3194         int                             i;
3195
3196         ptlrpc_service_for_each_part(svcpt, i, svc) {
3197                 if (svcpt->scp_service == NULL)
3198                         break;
3199
3200                 /* In case somebody rearmed this in the meantime */
3201                 cfs_timer_disarm(&svcpt->scp_at_timer);
3202                 array = &svcpt->scp_at_array;
3203
3204                 if (array->paa_reqs_array != NULL) {
3205                         OBD_FREE(array->paa_reqs_array,
3206                                  sizeof(cfs_list_t) * array->paa_size);
3207                         array->paa_reqs_array = NULL;
3208                 }
3209
3210                 if (array->paa_reqs_count != NULL) {
3211                         OBD_FREE(array->paa_reqs_count,
3212                                  sizeof(__u32) * array->paa_size);
3213                         array->paa_reqs_count = NULL;
3214                 }
3215         }
3216
3217         ptlrpc_service_for_each_part(svcpt, i, svc)
3218                 OBD_FREE_PTR(svcpt);
3219
3220         if (svc->srv_cpts != NULL)
3221                 cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts);
3222
3223         OBD_FREE(svc, offsetof(struct ptlrpc_service,
3224                                srv_parts[svc->srv_ncpts]));
3225 }
3226
3227 int ptlrpc_unregister_service(struct ptlrpc_service *service)
3228 {
3229         ENTRY;
3230
3231         CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
3232
3233         service->srv_is_stopping = 1;
3234
3235         mutex_lock(&ptlrpc_all_services_mutex);
3236         cfs_list_del_init(&service->srv_list);
3237         mutex_unlock(&ptlrpc_all_services_mutex);
3238
3239         ptlrpc_service_del_atimer(service);
3240         ptlrpc_stop_all_threads(service);
3241
3242         ptlrpc_service_unlink_rqbd(service);
3243         ptlrpc_service_purge_all(service);
3244         ptlrpc_service_nrs_cleanup(service);
3245
3246         ptlrpc_lprocfs_unregister_service(service);
3247
3248         ptlrpc_service_free(service);
3249
3250         RETURN(0);
3251 }
3252 EXPORT_SYMBOL(ptlrpc_unregister_service);
3253
3254 /**
3255  * Returns 0 if the service is healthy.
3256  *
3257  * Right now, it just checks to make sure that requests aren't languishing
3258  * in the queue.  We'll use this health check to govern whether a node needs
3259  * to be shot, so it's intentionally non-aggressive. */
3260 int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
3261 {
3262         struct ptlrpc_request           *request = NULL;
3263         struct timeval                  right_now;
3264         long                            timediff;
3265
3266         do_gettimeofday(&right_now);
3267
3268         spin_lock(&svcpt->scp_req_lock);
3269         /* How long has the next entry been waiting? */
3270         if (ptlrpc_server_high_pending(svcpt, true))
3271                 request = ptlrpc_nrs_req_peek_nolock(svcpt, true);
3272         else if (ptlrpc_server_normal_pending(svcpt, true))
3273                 request = ptlrpc_nrs_req_peek_nolock(svcpt, false);
3274
3275         if (request == NULL) {
3276                 spin_unlock(&svcpt->scp_req_lock);
3277                 return 0;
3278         }
3279
3280         timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
3281         spin_unlock(&svcpt->scp_req_lock);
3282
3283         if ((timediff / ONE_MILLION) >
3284             (AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
3285                 CERROR("%s: unhealthy - request has been waiting %lds\n",
3286                        svcpt->scp_service->srv_name, timediff / ONE_MILLION);
3287                 return -1;
3288         }
3289
3290         return 0;
3291 }
3292
3293 int
3294 ptlrpc_service_health_check(struct ptlrpc_service *svc)
3295 {
3296         struct ptlrpc_service_part      *svcpt;
3297         int                             i;
3298
3299         if (svc == NULL)
3300                 return 0;
3301
3302         ptlrpc_service_for_each_part(svcpt, i, svc) {
3303                 int rc = ptlrpc_svcpt_health_check(svcpt);
3304
3305                 if (rc != 0)
3306                         return rc;
3307         }
3308         return 0;
3309 }
3310 EXPORT_SYMBOL(ptlrpc_service_health_check);