Whamcloud - gitweb
LU-4499 nrs: adjust the order of REQ NRS initilization
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #include <linux/kthread.h>
39 #include <obd_support.h>
40 #include <obd_class.h>
41 #include <lustre_net.h>
42 #include <lu_object.h>
43 #include <lnet/types.h>
44 #include "ptlrpc_internal.h"
45
46 /* The following are visible and mutable through /sys/module/ptlrpc */
47 int test_req_buffer_pressure = 0;
48 CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
49                 "set non-zero to put pressure on request buffer pools");
50 CFS_MODULE_PARM(at_min, "i", int, 0644,
51                 "Adaptive timeout minimum (sec)");
52 CFS_MODULE_PARM(at_max, "i", int, 0644,
53                 "Adaptive timeout maximum (sec)");
54 CFS_MODULE_PARM(at_history, "i", int, 0644,
55                 "Adaptive timeouts remember the slowest event that took place "
56                 "within this period (sec)");
57 CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
58                 "How soon before an RPC deadline to send an early reply");
59 CFS_MODULE_PARM(at_extra, "i", int, 0644,
60                 "How much extra time to give with each early reply");
61
62
63 /* forward ref */
64 static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
65 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
66 static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
67
68 /** Holds a list of all PTLRPC services */
69 struct list_head ptlrpc_all_services;
70 /** Used to protect the \e ptlrpc_all_services list */
71 struct mutex ptlrpc_all_services_mutex;
72
73 static struct ptlrpc_request_buffer_desc *
74 ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
75 {
76         struct ptlrpc_service             *svc = svcpt->scp_service;
77         struct ptlrpc_request_buffer_desc *rqbd;
78
79         OBD_CPT_ALLOC_PTR(rqbd, svc->srv_cptable, svcpt->scp_cpt);
80         if (rqbd == NULL)
81                 return NULL;
82
83         rqbd->rqbd_svcpt = svcpt;
84         rqbd->rqbd_refcount = 0;
85         rqbd->rqbd_cbid.cbid_fn = request_in_callback;
86         rqbd->rqbd_cbid.cbid_arg = rqbd;
87         INIT_LIST_HEAD(&rqbd->rqbd_reqs);
88         OBD_CPT_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_cptable,
89                             svcpt->scp_cpt, svc->srv_buf_size);
90         if (rqbd->rqbd_buffer == NULL) {
91                 OBD_FREE_PTR(rqbd);
92                 return NULL;
93         }
94
95         spin_lock(&svcpt->scp_lock);
96         list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
97         svcpt->scp_nrqbds_total++;
98         spin_unlock(&svcpt->scp_lock);
99
100         return rqbd;
101 }
102
103 static void
104 ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
105 {
106         struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
107
108         LASSERT(rqbd->rqbd_refcount == 0);
109         LASSERT(list_empty(&rqbd->rqbd_reqs));
110
111         spin_lock(&svcpt->scp_lock);
112         list_del(&rqbd->rqbd_list);
113         svcpt->scp_nrqbds_total--;
114         spin_unlock(&svcpt->scp_lock);
115
116         OBD_FREE_LARGE(rqbd->rqbd_buffer, svcpt->scp_service->srv_buf_size);
117         OBD_FREE_PTR(rqbd);
118 }
119
120 static int
121 ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
122 {
123         struct ptlrpc_service             *svc = svcpt->scp_service;
124         struct ptlrpc_request_buffer_desc *rqbd;
125         int                                rc = 0;
126         int                                i;
127
128         if (svcpt->scp_rqbd_allocating)
129                 goto try_post;
130
131         spin_lock(&svcpt->scp_lock);
132         /* check again with lock */
133         if (svcpt->scp_rqbd_allocating) {
134                 /* NB: we might allow more than one thread in the future */
135                 LASSERT(svcpt->scp_rqbd_allocating == 1);
136                 spin_unlock(&svcpt->scp_lock);
137                 goto try_post;
138         }
139
140         svcpt->scp_rqbd_allocating++;
141         spin_unlock(&svcpt->scp_lock);
142
143
144         for (i = 0; i < svc->srv_nbuf_per_group; i++) {
145                 /* NB: another thread might have recycled enough rqbds, we
146                  * need to make sure it wouldn't over-allocate, see LU-1212. */
147                 if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group)
148                         break;
149
150                 rqbd = ptlrpc_alloc_rqbd(svcpt);
151
152                 if (rqbd == NULL) {
153                         CERROR("%s: Can't allocate request buffer\n",
154                                svc->srv_name);
155                         rc = -ENOMEM;
156                         break;
157                 }
158         }
159
160         spin_lock(&svcpt->scp_lock);
161
162         LASSERT(svcpt->scp_rqbd_allocating == 1);
163         svcpt->scp_rqbd_allocating--;
164
165         spin_unlock(&svcpt->scp_lock);
166
167         CDEBUG(D_RPCTRACE,
168                "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
169                svc->srv_name, i, svc->srv_buf_size, svcpt->scp_nrqbds_posted,
170                svcpt->scp_nrqbds_total, rc);
171
172  try_post:
173         if (post && rc == 0)
174                 rc = ptlrpc_server_post_idle_rqbds(svcpt);
175
176         return rc;
177 }
178
179 /**
180  * Part of Rep-Ack logic.
181  * Puts a lock and its mode into reply state assotiated to request reply.
182  */
183 void
184 ptlrpc_save_lock(struct ptlrpc_request *req,
185                  struct lustre_handle *lock, int mode, int no_ack)
186 {
187         struct ptlrpc_reply_state *rs = req->rq_reply_state;
188         int                        idx;
189
190         LASSERT(rs != NULL);
191         LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
192
193         if (req->rq_export->exp_disconnected) {
194                 ldlm_lock_decref(lock, mode);
195         } else {
196                 idx = rs->rs_nlocks++;
197                 rs->rs_locks[idx] = *lock;
198                 rs->rs_modes[idx] = mode;
199                 rs->rs_difficult = 1;
200                 rs->rs_no_ack = !!no_ack;
201         }
202 }
203 EXPORT_SYMBOL(ptlrpc_save_lock);
204
205
206 struct ptlrpc_hr_partition;
207
208 struct ptlrpc_hr_thread {
209         int                             hrt_id;         /* thread ID */
210         spinlock_t                      hrt_lock;
211         wait_queue_head_t               hrt_waitq;
212         struct list_head                        hrt_queue;      /* RS queue */
213         struct ptlrpc_hr_partition      *hrt_partition;
214 };
215
216 struct ptlrpc_hr_partition {
217         /* # of started threads */
218         atomic_t                        hrp_nstarted;
219         /* # of stopped threads */
220         atomic_t                        hrp_nstopped;
221         /* cpu partition id */
222         int                             hrp_cpt;
223         /* round-robin rotor for choosing thread */
224         int                             hrp_rotor;
225         /* total number of threads on this partition */
226         int                             hrp_nthrs;
227         /* threads table */
228         struct ptlrpc_hr_thread         *hrp_thrs;
229 };
230
231 #define HRT_RUNNING 0
232 #define HRT_STOPPING 1
233
234 struct ptlrpc_hr_service {
235         /* CPU partition table, it's just cfs_cpt_table for now */
236         struct cfs_cpt_table            *hr_cpt_table;
237         /** controller sleep waitq */
238         wait_queue_head_t               hr_waitq;
239         unsigned int                    hr_stopping;
240         /** roundrobin rotor for non-affinity service */
241         unsigned int                    hr_rotor;
242         /* partition data */
243         struct ptlrpc_hr_partition      **hr_partitions;
244 };
245
246 struct rs_batch {
247         struct list_head                        rsb_replies;
248         unsigned int                    rsb_n_replies;
249         struct ptlrpc_service_part      *rsb_svcpt;
250 };
251
252 /** reply handling service. */
253 static struct ptlrpc_hr_service         ptlrpc_hr;
254
255 /**
256  * maximum mumber of replies scheduled in one batch
257  */
258 #define MAX_SCHEDULED 256
259
260 /**
261  * Initialize a reply batch.
262  *
263  * \param b batch
264  */
265 static void rs_batch_init(struct rs_batch *b)
266 {
267         memset(b, 0, sizeof *b);
268         INIT_LIST_HEAD(&b->rsb_replies);
269 }
270
271 /**
272  * Choose an hr thread to dispatch requests to.
273  */
274 static struct ptlrpc_hr_thread *
275 ptlrpc_hr_select(struct ptlrpc_service_part *svcpt)
276 {
277         struct ptlrpc_hr_partition      *hrp;
278         unsigned int                    rotor;
279
280         if (svcpt->scp_cpt >= 0 &&
281             svcpt->scp_service->srv_cptable == ptlrpc_hr.hr_cpt_table) {
282                 /* directly match partition */
283                 hrp = ptlrpc_hr.hr_partitions[svcpt->scp_cpt];
284
285         } else {
286                 rotor = ptlrpc_hr.hr_rotor++;
287                 rotor %= cfs_cpt_number(ptlrpc_hr.hr_cpt_table);
288
289                 hrp = ptlrpc_hr.hr_partitions[rotor];
290         }
291
292         rotor = hrp->hrp_rotor++;
293         return &hrp->hrp_thrs[rotor % hrp->hrp_nthrs];
294 }
295
296 /**
297  * Dispatch all replies accumulated in the batch to one from
298  * dedicated reply handling threads.
299  *
300  * \param b batch
301  */
302 static void rs_batch_dispatch(struct rs_batch *b)
303 {
304         if (b->rsb_n_replies != 0) {
305                 struct ptlrpc_hr_thread *hrt;
306
307                 hrt = ptlrpc_hr_select(b->rsb_svcpt);
308
309                 spin_lock(&hrt->hrt_lock);
310                 list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
311                 spin_unlock(&hrt->hrt_lock);
312
313                 wake_up(&hrt->hrt_waitq);
314                 b->rsb_n_replies = 0;
315         }
316 }
317
318 /**
319  * Add a reply to a batch.
320  * Add one reply object to a batch, schedule batched replies if overload.
321  *
322  * \param b batch
323  * \param rs reply
324  */
325 static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
326 {
327         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
328
329         if (svcpt != b->rsb_svcpt || b->rsb_n_replies >= MAX_SCHEDULED) {
330                 if (b->rsb_svcpt != NULL) {
331                         rs_batch_dispatch(b);
332                         spin_unlock(&b->rsb_svcpt->scp_rep_lock);
333                 }
334                 spin_lock(&svcpt->scp_rep_lock);
335                 b->rsb_svcpt = svcpt;
336         }
337         spin_lock(&rs->rs_lock);
338         rs->rs_scheduled_ever = 1;
339         if (rs->rs_scheduled == 0) {
340                 list_move(&rs->rs_list, &b->rsb_replies);
341                 rs->rs_scheduled = 1;
342                 b->rsb_n_replies++;
343         }
344         rs->rs_committed = 1;
345         spin_unlock(&rs->rs_lock);
346 }
347
348 /**
349  * Reply batch finalization.
350  * Dispatch remaining replies from the batch
351  * and release remaining spinlock.
352  *
353  * \param b batch
354  */
355 static void rs_batch_fini(struct rs_batch *b)
356 {
357         if (b->rsb_svcpt != NULL) {
358                 rs_batch_dispatch(b);
359                 spin_unlock(&b->rsb_svcpt->scp_rep_lock);
360         }
361 }
362
363 #define DECLARE_RS_BATCH(b)     struct rs_batch b
364
365
366 /**
367  * Put reply state into a queue for processing because we received
368  * ACK from the client
369  */
370 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
371 {
372         struct ptlrpc_hr_thread *hrt;
373         ENTRY;
374
375         LASSERT(list_empty(&rs->rs_list));
376
377         hrt = ptlrpc_hr_select(rs->rs_svcpt);
378
379         spin_lock(&hrt->hrt_lock);
380         list_add_tail(&rs->rs_list, &hrt->hrt_queue);
381         spin_unlock(&hrt->hrt_lock);
382
383         wake_up(&hrt->hrt_waitq);
384         EXIT;
385 }
386
387 void
388 ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
389 {
390         ENTRY;
391
392         assert_spin_locked(&rs->rs_svcpt->scp_rep_lock);
393         assert_spin_locked(&rs->rs_lock);
394         LASSERT (rs->rs_difficult);
395         rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
396
397         if (rs->rs_scheduled) {     /* being set up or already notified */
398                 EXIT;
399                 return;
400         }
401
402         rs->rs_scheduled = 1;
403         list_del_init(&rs->rs_list);
404         ptlrpc_dispatch_difficult_reply(rs);
405         EXIT;
406 }
407 EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
408
409 void ptlrpc_commit_replies(struct obd_export *exp)
410 {
411         struct ptlrpc_reply_state *rs, *nxt;
412         DECLARE_RS_BATCH(batch);
413         ENTRY;
414
415         rs_batch_init(&batch);
416         /* Find any replies that have been committed and get their service
417          * to attend to complete them. */
418
419         /* CAVEAT EMPTOR: spinlock ordering!!! */
420         spin_lock(&exp->exp_uncommitted_replies_lock);
421         list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
422                                      rs_obd_list) {
423                 LASSERT (rs->rs_difficult);
424                 /* VBR: per-export last_committed */
425                 LASSERT(rs->rs_export);
426                 if (rs->rs_transno <= exp->exp_last_committed) {
427                         list_del_init(&rs->rs_obd_list);
428                         rs_batch_add(&batch, rs);
429                 }
430         }
431         spin_unlock(&exp->exp_uncommitted_replies_lock);
432         rs_batch_fini(&batch);
433         EXIT;
434 }
435
436 static int
437 ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
438 {
439         struct ptlrpc_request_buffer_desc *rqbd;
440         int                               rc;
441         int                               posted = 0;
442
443         for (;;) {
444                 spin_lock(&svcpt->scp_lock);
445
446                 if (list_empty(&svcpt->scp_rqbd_idle)) {
447                         spin_unlock(&svcpt->scp_lock);
448                         return posted;
449                 }
450
451                 rqbd = list_entry(svcpt->scp_rqbd_idle.next,
452                                       struct ptlrpc_request_buffer_desc,
453                                       rqbd_list);
454                 list_del(&rqbd->rqbd_list);
455
456                 /* assume we will post successfully */
457                 svcpt->scp_nrqbds_posted++;
458                 list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
459
460                 spin_unlock(&svcpt->scp_lock);
461
462                 rc = ptlrpc_register_rqbd(rqbd);
463                 if (rc != 0)
464                         break;
465
466                 posted = 1;
467         }
468
469         spin_lock(&svcpt->scp_lock);
470
471         svcpt->scp_nrqbds_posted--;
472         list_del(&rqbd->rqbd_list);
473         list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
474
475         /* Don't complain if no request buffers are posted right now; LNET
476          * won't drop requests because we set the portal lazy! */
477
478         spin_unlock(&svcpt->scp_lock);
479
480         return -1;
481 }
482
483 static void ptlrpc_at_timer(unsigned long castmeharder)
484 {
485         struct ptlrpc_service_part *svcpt;
486
487         svcpt = (struct ptlrpc_service_part *)castmeharder;
488
489         svcpt->scp_at_check = 1;
490         svcpt->scp_at_checktime = cfs_time_current();
491         wake_up(&svcpt->scp_waitq);
492 }
493
494 static void
495 ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
496                              struct ptlrpc_service_conf *conf)
497 {
498         struct ptlrpc_service_thr_conf  *tc = &conf->psc_thr;
499         unsigned                        init;
500         unsigned                        total;
501         unsigned                        nthrs;
502         int                             weight;
503
504         /*
505          * Common code for estimating & validating threads number.
506          * CPT affinity service could have percpt thread-pool instead
507          * of a global thread-pool, which means user might not always
508          * get the threads number they give it in conf::tc_nthrs_user
509          * even they did set. It's because we need to validate threads
510          * number for each CPT to guarantee each pool will have enough
511          * threads to keep the service healthy.
512          */
513         init = PTLRPC_NTHRS_INIT + (svc->srv_ops.so_hpreq_handler != NULL);
514         init = max_t(int, init, tc->tc_nthrs_init);
515
516         /* NB: please see comments in lustre_lnet.h for definition
517          * details of these members */
518         LASSERT(tc->tc_nthrs_max != 0);
519
520         if (tc->tc_nthrs_user != 0) {
521                 /* In case there is a reason to test a service with many
522                  * threads, we give a less strict check here, it can
523                  * be up to 8 * nthrs_max */
524                 total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user);
525                 nthrs = total / svc->srv_ncpts;
526                 init  = max(init, nthrs);
527                 goto out;
528         }
529
530         total = tc->tc_nthrs_max;
531         if (tc->tc_nthrs_base == 0) {
532                 /* don't care about base threads number per partition,
533                  * this is most for non-affinity service */
534                 nthrs = total / svc->srv_ncpts;
535                 goto out;
536         }
537
538         nthrs = tc->tc_nthrs_base;
539         if (svc->srv_ncpts == 1) {
540                 int     i;
541
542                 /* NB: Increase the base number if it's single partition
543                  * and total number of cores/HTs is larger or equal to 4.
544                  * result will always < 2 * nthrs_base */
545                 weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY);
546                 for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */
547                             (tc->tc_nthrs_base >> i) != 0; i++)
548                         nthrs += tc->tc_nthrs_base >> i;
549         }
550
551         if (tc->tc_thr_factor != 0) {
552                 int       factor = tc->tc_thr_factor;
553                 const int fade = 4;
554
555                 /*
556                  * User wants to increase number of threads with for
557                  * each CPU core/HT, most likely the factor is larger than
558                  * one thread/core because service threads are supposed to
559                  * be blocked by lock or wait for IO.
560                  */
561                 /*
562                  * Amdahl's law says that adding processors wouldn't give
563                  * a linear increasing of parallelism, so it's nonsense to
564                  * have too many threads no matter how many cores/HTs
565                  * there are.
566                  */
567                 if (cfs_cpu_ht_nsiblings(0) > 1) { /* weight is # of HTs */
568                         /* depress thread factor for hyper-thread */
569                         factor = factor - (factor >> 1) + (factor >> 3);
570                 }
571
572                 weight = cfs_cpt_weight(svc->srv_cptable, 0);
573                 LASSERT(weight > 0);
574
575                 for (; factor > 0 && weight > 0; factor--, weight -= fade)
576                         nthrs += min(weight, fade) * factor;
577         }
578
579         if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
580                 nthrs = max(tc->tc_nthrs_base,
581                             tc->tc_nthrs_max / svc->srv_ncpts);
582         }
583  out:
584         nthrs = max(nthrs, tc->tc_nthrs_init);
585         svc->srv_nthrs_cpt_limit = nthrs;
586         svc->srv_nthrs_cpt_init = init;
587
588         if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
589                 CDEBUG(D_OTHER, "%s: This service may have more threads (%d) "
590                        "than the given soft limit (%d)\n",
591                        svc->srv_name, nthrs * svc->srv_ncpts,
592                        tc->tc_nthrs_max);
593         }
594 }
595
596 /**
597  * Initialize percpt data for a service
598  */
599 static int
600 ptlrpc_service_part_init(struct ptlrpc_service *svc,
601                          struct ptlrpc_service_part *svcpt, int cpt)
602 {
603         struct ptlrpc_at_array  *array;
604         int                     size;
605         int                     index;
606         int                     rc;
607
608         svcpt->scp_cpt = cpt;
609         INIT_LIST_HEAD(&svcpt->scp_threads);
610
611         /* rqbd and incoming request queue */
612         spin_lock_init(&svcpt->scp_lock);
613         INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
614         INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
615         INIT_LIST_HEAD(&svcpt->scp_req_incoming);
616         init_waitqueue_head(&svcpt->scp_waitq);
617         /* history request & rqbd list */
618         INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
619         INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
620
621         /* acitve requests and hp requests */
622         spin_lock_init(&svcpt->scp_req_lock);
623
624         /* reply states */
625         spin_lock_init(&svcpt->scp_rep_lock);
626         INIT_LIST_HEAD(&svcpt->scp_rep_active);
627         INIT_LIST_HEAD(&svcpt->scp_rep_idle);
628         init_waitqueue_head(&svcpt->scp_rep_waitq);
629         atomic_set(&svcpt->scp_nreps_difficult, 0);
630
631         /* adaptive timeout */
632         spin_lock_init(&svcpt->scp_at_lock);
633         array = &svcpt->scp_at_array;
634
635         size = at_est2timeout(at_max);
636         array->paa_size     = size;
637         array->paa_count    = 0;
638         array->paa_deadline = -1;
639
640         /* allocate memory for scp_at_array (ptlrpc_at_array) */
641         OBD_CPT_ALLOC(array->paa_reqs_array,
642                       svc->srv_cptable, cpt, sizeof(struct list_head) * size);
643         if (array->paa_reqs_array == NULL)
644                 return -ENOMEM;
645
646         for (index = 0; index < size; index++)
647                 INIT_LIST_HEAD(&array->paa_reqs_array[index]);
648
649         OBD_CPT_ALLOC(array->paa_reqs_count,
650                       svc->srv_cptable, cpt, sizeof(__u32) * size);
651         if (array->paa_reqs_count == NULL)
652                 goto failed;
653
654         cfs_timer_init(&svcpt->scp_at_timer, ptlrpc_at_timer, svcpt);
655         /* At SOW, service time should be quick; 10s seems generous. If client
656          * timeout is less than this, we'll be sending an early reply. */
657         at_init(&svcpt->scp_at_estimate, 10, 0);
658
659         /* assign this before call ptlrpc_grow_req_bufs */
660         svcpt->scp_service = svc;
661         /* Now allocate the request buffers, but don't post them now */
662         rc = ptlrpc_grow_req_bufs(svcpt, 0);
663         /* We shouldn't be under memory pressure at startup, so
664          * fail if we can't allocate all our buffers at this time. */
665         if (rc != 0)
666                 goto failed;
667
668         return 0;
669
670  failed:
671         if (array->paa_reqs_count != NULL) {
672                 OBD_FREE(array->paa_reqs_count, sizeof(__u32) * size);
673                 array->paa_reqs_count = NULL;
674         }
675
676         if (array->paa_reqs_array != NULL) {
677                 OBD_FREE(array->paa_reqs_array,
678                          sizeof(struct list_head) * array->paa_size);
679                 array->paa_reqs_array = NULL;
680         }
681
682         return -ENOMEM;
683 }
684
685 /**
686  * Initialize service on a given portal.
687  * This includes starting serving threads , allocating and posting rqbds and
688  * so on.
689  */
690 struct ptlrpc_service *
691 ptlrpc_register_service(struct ptlrpc_service_conf *conf,
692                         struct proc_dir_entry *proc_entry)
693 {
694         struct ptlrpc_service_cpt_conf  *cconf = &conf->psc_cpt;
695         struct ptlrpc_service           *service;
696         struct ptlrpc_service_part      *svcpt;
697         struct cfs_cpt_table            *cptable;
698         __u32                           *cpts = NULL;
699         int                             ncpts;
700         int                             cpt;
701         int                             rc;
702         int                             i;
703         ENTRY;
704
705         LASSERT(conf->psc_buf.bc_nbufs > 0);
706         LASSERT(conf->psc_buf.bc_buf_size >=
707                 conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD);
708         LASSERT(conf->psc_thr.tc_ctx_tags != 0);
709
710         cptable = cconf->cc_cptable;
711         if (cptable == NULL)
712                 cptable = cfs_cpt_table;
713
714         if (!conf->psc_thr.tc_cpu_affinity) {
715                 ncpts = 1;
716         } else {
717                 ncpts = cfs_cpt_number(cptable);
718                 if (cconf->cc_pattern != NULL) {
719                         struct cfs_expr_list    *el;
720
721                         rc = cfs_expr_list_parse(cconf->cc_pattern,
722                                                  strlen(cconf->cc_pattern),
723                                                  0, ncpts - 1, &el);
724                         if (rc != 0) {
725                                 CERROR("%s: invalid CPT pattern string: %s",
726                                        conf->psc_name, cconf->cc_pattern);
727                                 RETURN(ERR_PTR(-EINVAL));
728                         }
729
730                         rc = cfs_expr_list_values(el, ncpts, &cpts);
731                         cfs_expr_list_free(el);
732                         if (rc <= 0) {
733                                 CERROR("%s: failed to parse CPT array %s: %d\n",
734                                        conf->psc_name, cconf->cc_pattern, rc);
735                                 if (cpts != NULL)
736                                         OBD_FREE(cpts, sizeof(*cpts) * ncpts);
737                                 RETURN(ERR_PTR(rc < 0 ? rc : -EINVAL));
738                         }
739                         ncpts = rc;
740                 }
741         }
742
743         OBD_ALLOC(service, offsetof(struct ptlrpc_service, srv_parts[ncpts]));
744         if (service == NULL) {
745                 if (cpts != NULL)
746                         OBD_FREE(cpts, sizeof(*cpts) * ncpts);
747                 RETURN(ERR_PTR(-ENOMEM));
748         }
749
750         service->srv_cptable            = cptable;
751         service->srv_cpts               = cpts;
752         service->srv_ncpts              = ncpts;
753
754         service->srv_cpt_bits = 0; /* it's zero already, easy to read... */
755         while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable))
756                 service->srv_cpt_bits++;
757
758         /* public members */
759         spin_lock_init(&service->srv_lock);
760         service->srv_name               = conf->psc_name;
761         service->srv_watchdog_factor    = conf->psc_watchdog_factor;
762         INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
763
764         /* buffer configuration */
765         service->srv_nbuf_per_group     = test_req_buffer_pressure ?
766                                           1 : conf->psc_buf.bc_nbufs;
767         service->srv_max_req_size       = conf->psc_buf.bc_req_max_size +
768                                           SPTLRPC_MAX_PAYLOAD;
769         service->srv_buf_size           = conf->psc_buf.bc_buf_size;
770         service->srv_rep_portal         = conf->psc_buf.bc_rep_portal;
771         service->srv_req_portal         = conf->psc_buf.bc_req_portal;
772
773         /* Increase max reply size to next power of two */
774         service->srv_max_reply_size = 1;
775         while (service->srv_max_reply_size <
776                conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD)
777                 service->srv_max_reply_size <<= 1;
778
779         service->srv_thread_name        = conf->psc_thr.tc_thr_name;
780         service->srv_ctx_tags           = conf->psc_thr.tc_ctx_tags;
781         service->srv_hpreq_ratio        = PTLRPC_SVC_HP_RATIO;
782         service->srv_ops                = conf->psc_ops;
783
784         for (i = 0; i < ncpts; i++) {
785                 if (!conf->psc_thr.tc_cpu_affinity)
786                         cpt = CFS_CPT_ANY;
787                 else
788                         cpt = cpts != NULL ? cpts[i] : i;
789
790                 OBD_CPT_ALLOC(svcpt, cptable, cpt, sizeof(*svcpt));
791                 if (svcpt == NULL)
792                         GOTO(failed, rc = -ENOMEM);
793
794                 service->srv_parts[i] = svcpt;
795                 rc = ptlrpc_service_part_init(service, svcpt, cpt);
796                 if (rc != 0)
797                         GOTO(failed, rc);
798         }
799
800         ptlrpc_server_nthreads_check(service, conf);
801
802         rc = LNetSetLazyPortal(service->srv_req_portal);
803         LASSERT(rc == 0);
804
805         mutex_lock(&ptlrpc_all_services_mutex);
806         list_add(&service->srv_list, &ptlrpc_all_services);
807         mutex_unlock(&ptlrpc_all_services_mutex);
808
809         if (proc_entry != NULL)
810                 ptlrpc_lprocfs_register_service(proc_entry, service);
811
812         rc = ptlrpc_service_nrs_setup(service);
813         if (rc != 0)
814                 GOTO(failed, rc);
815
816         CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
817                service->srv_name, service->srv_req_portal);
818
819         rc = ptlrpc_start_threads(service);
820         if (rc != 0) {
821                 CERROR("Failed to start threads for service %s: %d\n",
822                        service->srv_name, rc);
823                 GOTO(failed, rc);
824         }
825
826         RETURN(service);
827 failed:
828         ptlrpc_unregister_service(service);
829         RETURN(ERR_PTR(rc));
830 }
831 EXPORT_SYMBOL(ptlrpc_register_service);
832
833 /**
834  * to actually free the request, must be called without holding svc_lock.
835  * note it's caller's responsibility to unlink req->rq_list.
836  */
837 static void ptlrpc_server_free_request(struct ptlrpc_request *req)
838 {
839         LASSERT(atomic_read(&req->rq_refcount) == 0);
840         LASSERT(list_empty(&req->rq_timed_list));
841
842         /* DEBUG_REQ() assumes the reply state of a request with a valid
843          * ref will not be destroyed until that reference is dropped. */
844         ptlrpc_req_drop_rs(req);
845
846         sptlrpc_svc_ctx_decref(req);
847
848         if (req != &req->rq_rqbd->rqbd_req) {
849                 /* NB request buffers use an embedded
850                  * req if the incoming req unlinked the
851                  * MD; this isn't one of them! */
852                 ptlrpc_request_cache_free(req);
853         }
854 }
855
856 /**
857  * drop a reference count of the request. if it reaches 0, we either
858  * put it into history list, or free it immediately.
859  */
860 void ptlrpc_server_drop_request(struct ptlrpc_request *req)
861 {
862         struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
863         struct ptlrpc_service_part        *svcpt = rqbd->rqbd_svcpt;
864         struct ptlrpc_service             *svc = svcpt->scp_service;
865         int                                refcount;
866         struct list_head                          *tmp;
867         struct list_head                          *nxt;
868
869         if (!atomic_dec_and_test(&req->rq_refcount))
870                 return;
871
872         if (req->rq_session.lc_state == LCS_ENTERED) {
873                 lu_context_exit(&req->rq_session);
874                 lu_context_fini(&req->rq_session);
875         }
876
877         if (req->rq_at_linked) {
878                 spin_lock(&svcpt->scp_at_lock);
879                 /* recheck with lock, in case it's unlinked by
880                  * ptlrpc_at_check_timed() */
881                 if (likely(req->rq_at_linked))
882                         ptlrpc_at_remove_timed(req);
883                 spin_unlock(&svcpt->scp_at_lock);
884         }
885
886         LASSERT(list_empty(&req->rq_timed_list));
887
888         /* finalize request */
889         if (req->rq_export) {
890                 class_export_put(req->rq_export);
891                 req->rq_export = NULL;
892         }
893
894         spin_lock(&svcpt->scp_lock);
895
896         list_add(&req->rq_list, &rqbd->rqbd_reqs);
897
898         refcount = --(rqbd->rqbd_refcount);
899         if (refcount == 0) {
900                 /* request buffer is now idle: add to history */
901                 list_del(&rqbd->rqbd_list);
902
903                 list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
904                 svcpt->scp_hist_nrqbds++;
905
906                 /* cull some history?
907                  * I expect only about 1 or 2 rqbds need to be recycled here */
908                 while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
909                         rqbd = list_entry(svcpt->scp_hist_rqbds.next,
910                                           struct ptlrpc_request_buffer_desc,
911                                           rqbd_list);
912
913                         list_del(&rqbd->rqbd_list);
914                         svcpt->scp_hist_nrqbds--;
915
916                         /* remove rqbd's reqs from svc's req history while
917                          * I've got the service lock */
918                         list_for_each(tmp, &rqbd->rqbd_reqs) {
919                                 req = list_entry(tmp, struct ptlrpc_request,
920                                                  rq_list);
921                                 /* Track the highest culled req seq */
922                                 if (req->rq_history_seq >
923                                     svcpt->scp_hist_seq_culled) {
924                                         svcpt->scp_hist_seq_culled =
925                                                 req->rq_history_seq;
926                                 }
927                                 list_del(&req->rq_history_list);
928                         }
929
930                         spin_unlock(&svcpt->scp_lock);
931
932                         list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
933                                 req = list_entry(rqbd->rqbd_reqs.next,
934                                                  struct ptlrpc_request,
935                                                  rq_list);
936                                 list_del(&req->rq_list);
937                                 ptlrpc_server_free_request(req);
938                         }
939
940                         spin_lock(&svcpt->scp_lock);
941                         /*
942                          * now all reqs including the embedded req has been
943                          * disposed, schedule request buffer for re-use.
944                          */
945                         LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0);
946                         list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
947                 }
948
949                 spin_unlock(&svcpt->scp_lock);
950         } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
951                 /* If we are low on memory, we are not interested in history */
952                 list_del(&req->rq_list);
953                 list_del_init(&req->rq_history_list);
954
955                 /* Track the highest culled req seq */
956                 if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
957                         svcpt->scp_hist_seq_culled = req->rq_history_seq;
958
959                 spin_unlock(&svcpt->scp_lock);
960
961                 ptlrpc_server_free_request(req);
962         } else {
963                 spin_unlock(&svcpt->scp_lock);
964         }
965 }
966
967 /** Change request export and move hp request from old export to new */
968 void ptlrpc_request_change_export(struct ptlrpc_request *req,
969                                   struct obd_export *export)
970 {
971         if (req->rq_export != NULL) {
972                 LASSERT(!list_empty(&req->rq_exp_list));
973                 /* remove rq_exp_list from last export */
974                 spin_lock_bh(&req->rq_export->exp_rpc_lock);
975                 list_del_init(&req->rq_exp_list);
976                 spin_unlock_bh(&req->rq_export->exp_rpc_lock);
977                 /* export has one reference already, so it`s safe to
978                  * add req to export queue here and get another
979                  * reference for request later */
980                 spin_lock_bh(&export->exp_rpc_lock);
981                 if (req->rq_ops != NULL) /* hp request */
982                         list_add(&req->rq_exp_list, &export->exp_hp_rpcs);
983                 else
984                         list_add(&req->rq_exp_list, &export->exp_reg_rpcs);
985                 spin_unlock_bh(&export->exp_rpc_lock);
986
987                 class_export_rpc_dec(req->rq_export);
988                 class_export_put(req->rq_export);
989         }
990
991         /* request takes one export refcount */
992         req->rq_export = class_export_get(export);
993         class_export_rpc_inc(export);
994
995         return;
996 }
997
998 /**
999  * to finish a request: stop sending more early replies, and release
1000  * the request.
1001  */
1002 static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
1003                                          struct ptlrpc_request *req)
1004 {
1005         ptlrpc_server_hpreq_fini(req);
1006
1007         ptlrpc_server_drop_request(req);
1008 }
1009
1010 /**
1011  * to finish an active request: stop sending more early replies, and release
1012  * the request. should be called after we finished handling the request.
1013  */
1014 static void ptlrpc_server_finish_active_request(
1015                                         struct ptlrpc_service_part *svcpt,
1016                                         struct ptlrpc_request *req)
1017 {
1018         spin_lock(&svcpt->scp_req_lock);
1019         ptlrpc_nrs_req_stop_nolock(req);
1020         svcpt->scp_nreqs_active--;
1021         if (req->rq_hp)
1022                 svcpt->scp_nhreqs_active--;
1023         spin_unlock(&svcpt->scp_req_lock);
1024
1025         ptlrpc_nrs_req_finalize(req);
1026
1027         if (req->rq_export != NULL)
1028                 class_export_rpc_dec(req->rq_export);
1029
1030         ptlrpc_server_finish_request(svcpt, req);
1031 }
1032
1033 /**
1034  * This function makes sure dead exports are evicted in a timely manner.
1035  * This function is only called when some export receives a message (i.e.,
1036  * the network is up.)
1037  */
1038 void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
1039 {
1040         struct obd_export *oldest_exp;
1041         time_t oldest_time, new_time;
1042
1043         ENTRY;
1044
1045         LASSERT(exp);
1046
1047         /* Compensate for slow machines, etc, by faking our request time
1048            into the future.  Although this can break the strict time-ordering
1049            of the list, we can be really lazy here - we don't have to evict
1050            at the exact right moment.  Eventually, all silent exports
1051            will make it to the top of the list. */
1052
1053         /* Do not pay attention on 1sec or smaller renewals. */
1054         new_time = cfs_time_current_sec() + extra_delay;
1055         if (exp->exp_last_request_time + 1 /*second */ >= new_time)
1056                 RETURN_EXIT;
1057
1058         exp->exp_last_request_time = new_time;
1059
1060         /* exports may get disconnected from the chain even though the
1061            export has references, so we must keep the spin lock while
1062            manipulating the lists */
1063         spin_lock(&exp->exp_obd->obd_dev_lock);
1064
1065         if (list_empty(&exp->exp_obd_chain_timed)) {
1066                 /* this one is not timed */
1067                 spin_unlock(&exp->exp_obd->obd_dev_lock);
1068                 RETURN_EXIT;
1069         }
1070
1071         list_move_tail(&exp->exp_obd_chain_timed,
1072                        &exp->exp_obd->obd_exports_timed);
1073
1074         oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next,
1075                                 struct obd_export, exp_obd_chain_timed);
1076         oldest_time = oldest_exp->exp_last_request_time;
1077         spin_unlock(&exp->exp_obd->obd_dev_lock);
1078
1079         if (exp->exp_obd->obd_recovering) {
1080                 /* be nice to everyone during recovery */
1081                 EXIT;
1082                 return;
1083         }
1084
1085         /* Note - racing to start/reset the obd_eviction timer is safe */
1086         if (exp->exp_obd->obd_eviction_timer == 0) {
1087                 /* Check if the oldest entry is expired. */
1088                 if (cfs_time_current_sec() > (oldest_time + PING_EVICT_TIMEOUT +
1089                                               extra_delay)) {
1090                         /* We need a second timer, in case the net was down and
1091                          * it just came back. Since the pinger may skip every
1092                          * other PING_INTERVAL (see note in ptlrpc_pinger_main),
1093                          * we better wait for 3. */
1094                         exp->exp_obd->obd_eviction_timer =
1095                                 cfs_time_current_sec() + 3 * PING_INTERVAL;
1096                         CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
1097                                exp->exp_obd->obd_name,
1098                                obd_export_nid2str(oldest_exp), oldest_time);
1099                 }
1100         } else {
1101                 if (cfs_time_current_sec() >
1102                     (exp->exp_obd->obd_eviction_timer + extra_delay)) {
1103                         /* The evictor won't evict anyone who we've heard from
1104                          * recently, so we don't have to check before we start
1105                          * it. */
1106                         if (!ping_evictor_wake(exp))
1107                                 exp->exp_obd->obd_eviction_timer = 0;
1108                 }
1109         }
1110
1111         EXIT;
1112 }
1113
1114 /**
1115  * Sanity check request \a req.
1116  * Return 0 if all is ok, error code otherwise.
1117  */
1118 static int ptlrpc_check_req(struct ptlrpc_request *req)
1119 {
1120         struct obd_device *obd = req->rq_export->exp_obd;
1121         int rc = 0;
1122
1123         if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
1124                      req->rq_export->exp_conn_cnt)) {
1125                 DEBUG_REQ(D_RPCTRACE, req,
1126                           "DROPPING req from old connection %d < %d",
1127                           lustre_msg_get_conn_cnt(req->rq_reqmsg),
1128                           req->rq_export->exp_conn_cnt);
1129                 return -EEXIST;
1130         }
1131         if (unlikely(obd == NULL || obd->obd_fail)) {
1132                 /* Failing over, don't handle any more reqs,
1133                  * send error response instead. */
1134                 CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
1135                         req, (obd != NULL) ? obd->obd_name : "unknown");
1136                 rc = -ENODEV;
1137         } else if (lustre_msg_get_flags(req->rq_reqmsg) &
1138                    (MSG_REPLAY | MSG_REQ_REPLAY_DONE) &&
1139                    !obd->obd_recovering) {
1140                         DEBUG_REQ(D_ERROR, req,
1141                                   "Invalid replay without recovery");
1142                         class_fail_export(req->rq_export);
1143                         rc = -ENODEV;
1144         } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 &&
1145                    !obd->obd_recovering) {
1146                         DEBUG_REQ(D_ERROR, req, "Invalid req with transno "
1147                                   LPU64" without recovery",
1148                                   lustre_msg_get_transno(req->rq_reqmsg));
1149                         class_fail_export(req->rq_export);
1150                         rc = -ENODEV;
1151         }
1152
1153         if (unlikely(rc < 0)) {
1154                 req->rq_status = rc;
1155                 ptlrpc_error(req);
1156         }
1157         return rc;
1158 }
1159
1160 static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
1161 {
1162         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1163         __s32 next;
1164
1165         if (array->paa_count == 0) {
1166                 cfs_timer_disarm(&svcpt->scp_at_timer);
1167                 return;
1168         }
1169
1170         /* Set timer for closest deadline */
1171         next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
1172                        at_early_margin);
1173         if (next <= 0) {
1174                 ptlrpc_at_timer((unsigned long)svcpt);
1175         } else {
1176                 cfs_timer_arm(&svcpt->scp_at_timer, cfs_time_shift(next));
1177                 CDEBUG(D_INFO, "armed %s at %+ds\n",
1178                        svcpt->scp_service->srv_name, next);
1179         }
1180 }
1181
1182 /* Add rpc to early reply check list */
1183 static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
1184 {
1185         struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
1186         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1187         struct ptlrpc_request *rq = NULL;
1188         __u32 index;
1189
1190         if (AT_OFF)
1191                 return(0);
1192
1193         if (req->rq_no_reply)
1194                 return 0;
1195
1196         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
1197                 return(-ENOSYS);
1198
1199         spin_lock(&svcpt->scp_at_lock);
1200         LASSERT(list_empty(&req->rq_timed_list));
1201
1202         index = (unsigned long)req->rq_deadline % array->paa_size;
1203         if (array->paa_reqs_count[index] > 0) {
1204                 /* latest rpcs will have the latest deadlines in the list,
1205                  * so search backward. */
1206                 list_for_each_entry_reverse(rq,
1207                                                 &array->paa_reqs_array[index],
1208                                                 rq_timed_list) {
1209                         if (req->rq_deadline >= rq->rq_deadline) {
1210                                 list_add(&req->rq_timed_list,
1211                                              &rq->rq_timed_list);
1212                                 break;
1213                         }
1214                 }
1215         }
1216
1217         /* Add the request at the head of the list */
1218         if (list_empty(&req->rq_timed_list))
1219                 list_add(&req->rq_timed_list,
1220                              &array->paa_reqs_array[index]);
1221
1222         spin_lock(&req->rq_lock);
1223         req->rq_at_linked = 1;
1224         spin_unlock(&req->rq_lock);
1225         req->rq_at_index = index;
1226         array->paa_reqs_count[index]++;
1227         array->paa_count++;
1228         if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
1229                 array->paa_deadline = req->rq_deadline;
1230                 ptlrpc_at_set_timer(svcpt);
1231         }
1232         spin_unlock(&svcpt->scp_at_lock);
1233
1234         return 0;
1235 }
1236
1237 static void
1238 ptlrpc_at_remove_timed(struct ptlrpc_request *req)
1239 {
1240         struct ptlrpc_at_array *array;
1241
1242         array = &req->rq_rqbd->rqbd_svcpt->scp_at_array;
1243
1244         /* NB: must call with hold svcpt::scp_at_lock */
1245         LASSERT(!list_empty(&req->rq_timed_list));
1246         list_del_init(&req->rq_timed_list);
1247
1248         spin_lock(&req->rq_lock);
1249         req->rq_at_linked = 0;
1250         spin_unlock(&req->rq_lock);
1251
1252         array->paa_reqs_count[req->rq_at_index]--;
1253         array->paa_count--;
1254 }
1255
1256 /*
1257  * Attempt to extend the request deadline by sending an early reply to the
1258  * client.
1259  */
1260 static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
1261 {
1262         struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
1263         struct ptlrpc_request *reqcopy;
1264         struct lustre_msg *reqmsg;
1265         cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
1266         time_t  newdl;
1267         int rc;
1268
1269         ENTRY;
1270
1271         if (CFS_FAIL_CHECK(OBD_FAIL_TGT_REPLAY_RECONNECT)) {
1272                 /* don't send early reply */
1273                 RETURN(1);
1274         }
1275
1276         /* deadline is when the client expects us to reply, margin is the
1277            difference between clients' and servers' expectations */
1278         DEBUG_REQ(D_ADAPTTO, req,
1279                   "%ssending early reply (deadline %+lds, margin %+lds) for "
1280                   "%d+%d", AT_OFF ? "AT off - not " : "",
1281                   olddl, olddl - at_get(&svcpt->scp_at_estimate),
1282                   at_get(&svcpt->scp_at_estimate), at_extra);
1283
1284         if (AT_OFF)
1285                 RETURN(0);
1286
1287         if (olddl < 0) {
1288                 DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
1289                           "not sending early reply. Consider increasing "
1290                           "at_early_margin (%d)?", olddl, at_early_margin);
1291
1292                 /* Return an error so we're not re-added to the timed list. */
1293                 RETURN(-ETIMEDOUT);
1294         }
1295
1296         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
1297                 DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
1298                           "but no AT support");
1299                 RETURN(-ENOSYS);
1300         }
1301
1302         if (req->rq_export &&
1303             lustre_msg_get_flags(req->rq_reqmsg) &
1304             (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
1305                 /* During recovery, we don't want to send too many early
1306                  * replies, but on the other hand we want to make sure the
1307                  * client has enough time to resend if the rpc is lost. So
1308                  * during the recovery period send at least 4 early replies,
1309                  * spacing them every at_extra if we can. at_estimate should
1310                  * always equal this fixed value during recovery. */
1311                 /* Don't account request processing time into AT history
1312                  * during recovery, it is not service time we need but
1313                  * includes also waiting time for recovering clients */
1314                 newdl = cfs_time_current_sec() + min(at_extra,
1315                         req->rq_export->exp_obd->obd_recovery_timeout / 4);
1316         } else {
1317                 /* We want to extend the request deadline by at_extra seconds,
1318                  * so we set our service estimate to reflect how much time has
1319                  * passed since this request arrived plus an additional
1320                  * at_extra seconds. The client will calculate the new deadline
1321                  * based on this service estimate (plus some additional time to
1322                  * account for network latency). See ptlrpc_at_recv_early_reply
1323                  */
1324                 at_measured(&svcpt->scp_at_estimate, at_extra +
1325                             cfs_time_current_sec() -
1326                             req->rq_arrival_time.tv_sec);
1327                 newdl = req->rq_arrival_time.tv_sec +
1328                         at_get(&svcpt->scp_at_estimate);
1329         }
1330
1331         /* Check to see if we've actually increased the deadline -
1332          * we may be past adaptive_max */
1333         if (req->rq_deadline >= newdl) {
1334                 DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
1335                           "(%ld/%ld), not sending early reply\n",
1336                           olddl, newdl - cfs_time_current_sec());
1337                 RETURN(-ETIMEDOUT);
1338         }
1339
1340         reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
1341         if (reqcopy == NULL)
1342                 RETURN(-ENOMEM);
1343         OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
1344         if (!reqmsg)
1345                 GOTO(out_free, rc = -ENOMEM);
1346
1347         *reqcopy = *req;
1348         reqcopy->rq_reply_state = NULL;
1349         reqcopy->rq_rep_swab_mask = 0;
1350         reqcopy->rq_pack_bulk = 0;
1351         reqcopy->rq_pack_udesc = 0;
1352         reqcopy->rq_packed_final = 0;
1353         sptlrpc_svc_ctx_addref(reqcopy);
1354         /* We only need the reqmsg for the magic */
1355         reqcopy->rq_reqmsg = reqmsg;
1356         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
1357
1358         /*
1359          * tgt_brw_read() and tgt_brw_write() may have decided not to reply.
1360          * Without this check, we would fail the rq_no_reply assertion in
1361          * ptlrpc_send_reply().
1362          */
1363         if (reqcopy->rq_no_reply)
1364                 GOTO(out, rc = -ETIMEDOUT);
1365
1366         LASSERT(atomic_read(&req->rq_refcount));
1367         /** if it is last refcount then early reply isn't needed */
1368         if (atomic_read(&req->rq_refcount) == 1) {
1369                 DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
1370                           "abort sending early reply\n");
1371                 GOTO(out, rc = -EINVAL);
1372         }
1373
1374         /* Connection ref */
1375         reqcopy->rq_export = class_conn2export(
1376                                      lustre_msg_get_handle(reqcopy->rq_reqmsg));
1377         if (reqcopy->rq_export == NULL)
1378                 GOTO(out, rc = -ENODEV);
1379
1380         /* RPC ref */
1381         class_export_rpc_inc(reqcopy->rq_export);
1382         if (reqcopy->rq_export->exp_obd &&
1383             reqcopy->rq_export->exp_obd->obd_fail)
1384                 GOTO(out_put, rc = -ENODEV);
1385
1386         rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
1387         if (rc)
1388                 GOTO(out_put, rc);
1389
1390         rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
1391
1392         if (!rc) {
1393                 /* Adjust our own deadline to what we told the client */
1394                 req->rq_deadline = newdl;
1395                 req->rq_early_count++; /* number sent, server side */
1396         } else {
1397                 DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
1398         }
1399
1400         /* Free the (early) reply state from lustre_pack_reply.
1401            (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
1402         ptlrpc_req_drop_rs(reqcopy);
1403
1404 out_put:
1405         class_export_rpc_dec(reqcopy->rq_export);
1406         class_export_put(reqcopy->rq_export);
1407 out:
1408         sptlrpc_svc_ctx_decref(reqcopy);
1409         OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
1410 out_free:
1411         ptlrpc_request_cache_free(reqcopy);
1412         RETURN(rc);
1413 }
1414
1415 /* Send early replies to everybody expiring within at_early_margin
1416    asking for at_extra time */
1417 static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
1418 {
1419         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1420         struct ptlrpc_request *rq, *n;
1421         struct list_head work_list;
1422         __u32  index, count;
1423         time_t deadline;
1424         time_t now = cfs_time_current_sec();
1425         cfs_duration_t delay;
1426         int first, counter = 0;
1427         ENTRY;
1428
1429         spin_lock(&svcpt->scp_at_lock);
1430         if (svcpt->scp_at_check == 0) {
1431                 spin_unlock(&svcpt->scp_at_lock);
1432                 RETURN(0);
1433         }
1434         delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
1435         svcpt->scp_at_check = 0;
1436
1437         if (array->paa_count == 0) {
1438                 spin_unlock(&svcpt->scp_at_lock);
1439                 RETURN(0);
1440         }
1441
1442         /* The timer went off, but maybe the nearest rpc already completed. */
1443         first = array->paa_deadline - now;
1444         if (first > at_early_margin) {
1445                 /* We've still got plenty of time.  Reset the timer. */
1446                 ptlrpc_at_set_timer(svcpt);
1447                 spin_unlock(&svcpt->scp_at_lock);
1448                 RETURN(0);
1449         }
1450
1451         /* We're close to a timeout, and we don't know how much longer the
1452            server will take. Send early replies to everyone expiring soon. */
1453         INIT_LIST_HEAD(&work_list);
1454         deadline = -1;
1455         index = (unsigned long)array->paa_deadline % array->paa_size;
1456         count = array->paa_count;
1457         while (count > 0) {
1458                 count -= array->paa_reqs_count[index];
1459                 list_for_each_entry_safe(rq, n,
1460                                          &array->paa_reqs_array[index],
1461                                          rq_timed_list) {
1462                         if (rq->rq_deadline > now + at_early_margin) {
1463                                 /* update the earliest deadline */
1464                                 if (deadline == -1 ||
1465                                     rq->rq_deadline < deadline)
1466                                         deadline = rq->rq_deadline;
1467                                 break;
1468                         }
1469
1470                         ptlrpc_at_remove_timed(rq);
1471                         /**
1472                          * ptlrpc_server_drop_request() may drop
1473                          * refcount to 0 already. Let's check this and
1474                          * don't add entry to work_list
1475                          */
1476                         if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
1477                                 list_add(&rq->rq_timed_list, &work_list);
1478                         counter++;
1479                 }
1480
1481                 if (++index >= array->paa_size)
1482                         index = 0;
1483         }
1484         array->paa_deadline = deadline;
1485         /* we have a new earliest deadline, restart the timer */
1486         ptlrpc_at_set_timer(svcpt);
1487
1488         spin_unlock(&svcpt->scp_at_lock);
1489
1490         CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
1491                "replies\n", first, at_extra, counter);
1492         if (first < 0) {
1493                 /* We're already past request deadlines before we even get a
1494                    chance to send early replies */
1495                 LCONSOLE_WARN("%s: This server is not able to keep up with "
1496                               "request traffic (cpu-bound).\n",
1497                               svcpt->scp_service->srv_name);
1498                 CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
1499                       "delay="CFS_DURATION_T"(jiff)\n",
1500                       counter, svcpt->scp_nreqs_incoming,
1501                       svcpt->scp_nreqs_active,
1502                       at_get(&svcpt->scp_at_estimate), delay);
1503         }
1504
1505         /* we took additional refcount so entries can't be deleted from list, no
1506          * locking is needed */
1507         while (!list_empty(&work_list)) {
1508                 rq = list_entry(work_list.next, struct ptlrpc_request,
1509                                     rq_timed_list);
1510                 list_del_init(&rq->rq_timed_list);
1511
1512                 if (ptlrpc_at_send_early_reply(rq) == 0)
1513                         ptlrpc_at_add_timed(rq);
1514
1515                 ptlrpc_server_drop_request(rq);
1516         }
1517
1518         RETURN(1); /* return "did_something" for liblustre */
1519 }
1520
1521 /* Check if we are already handling earlier incarnation of this request.
1522  * Called under &req->rq_export->exp_rpc_lock locked */
1523 static int ptlrpc_server_check_resend_in_progress(struct ptlrpc_request *req)
1524 {
1525         struct ptlrpc_request   *tmp = NULL;
1526
1527         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ||
1528             (atomic_read(&req->rq_export->exp_rpc_count) == 0))
1529                 return 0;
1530
1531         /* bulk request are aborted upon reconnect, don't try to
1532          * find a match */
1533         if (req->rq_bulk_write || req->rq_bulk_read)
1534                 return 0;
1535
1536         /* This list should not be longer than max_requests in
1537          * flights on the client, so it is not all that long.
1538          * Also we only hit this codepath in case of a resent
1539          * request which makes it even more rarely hit */
1540         list_for_each_entry(tmp, &req->rq_export->exp_reg_rpcs,
1541                                 rq_exp_list) {
1542                 /* Found duplicate one */
1543                 if (tmp->rq_xid == req->rq_xid)
1544                         goto found;
1545         }
1546         list_for_each_entry(tmp, &req->rq_export->exp_hp_rpcs,
1547                                 rq_exp_list) {
1548                 /* Found duplicate one */
1549                 if (tmp->rq_xid == req->rq_xid)
1550                         goto found;
1551         }
1552         return 0;
1553
1554 found:
1555         DEBUG_REQ(D_HA, req, "Found duplicate req in processing");
1556         DEBUG_REQ(D_HA, tmp, "Request being processed");
1557         return -EBUSY;
1558 }
1559
1560 /**
1561  * Check if a request should be assigned with a high priority.
1562  *
1563  * \retval      < 0: error occurred
1564  *                0: normal RPC request
1565  *               +1: high priority request
1566  */
1567 static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
1568                                     struct ptlrpc_request *req)
1569 {
1570         int rc;
1571         ENTRY;
1572
1573         if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL) {
1574                 rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
1575                 if (rc < 0)
1576                         RETURN(rc);
1577
1578                 LASSERT(rc == 0);
1579         }
1580
1581         if (req->rq_export != NULL && req->rq_ops != NULL) {
1582                 /* Perform request specific check. We should do this
1583                  * check before the request is added into exp_hp_rpcs
1584                  * list otherwise it may hit swab race at LU-1044. */
1585                 if (req->rq_ops->hpreq_check != NULL) {
1586                         rc = req->rq_ops->hpreq_check(req);
1587                         LASSERT(rc <= 1); /* can only return error, 0, or 1 */
1588                 }
1589         }
1590
1591         RETURN(rc);
1592 }
1593
1594 /** Remove the request from the export list. */
1595 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
1596 {
1597         ENTRY;
1598         if (req->rq_export) {
1599                 /* refresh lock timeout again so that client has more
1600                  * room to send lock cancel RPC. */
1601                 if (req->rq_ops && req->rq_ops->hpreq_fini)
1602                         req->rq_ops->hpreq_fini(req);
1603
1604                 spin_lock_bh(&req->rq_export->exp_rpc_lock);
1605                 list_del_init(&req->rq_exp_list);
1606                 spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1607         }
1608         EXIT;
1609 }
1610
1611 static int ptlrpc_hpreq_check(struct ptlrpc_request *req)
1612 {
1613         return 1;
1614 }
1615
1616 static struct ptlrpc_hpreq_ops ptlrpc_hpreq_common = {
1617         .hpreq_check       = ptlrpc_hpreq_check,
1618 };
1619
1620 /* Hi-Priority RPC check by RPC operation code. */
1621 int ptlrpc_hpreq_handler(struct ptlrpc_request *req)
1622 {
1623         int opc = lustre_msg_get_opc(req->rq_reqmsg);
1624
1625         /* Check for export to let only reconnects for not yet evicted
1626          * export to become a HP rpc. */
1627         if ((req->rq_export != NULL) &&
1628             (opc == OBD_PING || opc == MDS_CONNECT || opc == OST_CONNECT))
1629                 req->rq_ops = &ptlrpc_hpreq_common;
1630
1631         return 0;
1632 }
1633 EXPORT_SYMBOL(ptlrpc_hpreq_handler);
1634
1635 static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
1636                                      struct ptlrpc_request *req)
1637 {
1638         int rc;
1639         bool hp;
1640         ENTRY;
1641
1642         rc = ptlrpc_server_hpreq_init(svcpt, req);
1643         if (rc < 0)
1644                 RETURN(rc);
1645
1646         hp = rc > 0;
1647         ptlrpc_nrs_req_initialize(svcpt, req, hp);
1648
1649         if (req->rq_export != NULL) {
1650                 struct obd_export *exp = req->rq_export;
1651
1652                 /* do search for duplicated xid and the adding to the list
1653                  * atomically */
1654                 spin_lock_bh(&exp->exp_rpc_lock);
1655                 rc = ptlrpc_server_check_resend_in_progress(req);
1656                 if (rc < 0) {
1657                         spin_unlock_bh(&exp->exp_rpc_lock);
1658
1659                         ptlrpc_nrs_req_finalize(req);
1660                         RETURN(rc);
1661                 }
1662
1663                 if (hp || req->rq_ops != NULL)
1664                         list_add(&req->rq_exp_list, &exp->exp_hp_rpcs);
1665                 else
1666                         list_add(&req->rq_exp_list, &exp->exp_reg_rpcs);
1667                 spin_unlock_bh(&exp->exp_rpc_lock);
1668         }
1669
1670         /* the current thread is not the processing thread for this request
1671          * since that, but request is in exp_hp_list and can be find there.
1672          * Remove all relations between request and old thread. */
1673         req->rq_svc_thread->t_env->le_ses = NULL;
1674         req->rq_svc_thread = NULL;
1675         req->rq_session.lc_thread = NULL;
1676
1677         ptlrpc_nrs_req_add(svcpt, req, hp);
1678
1679         RETURN(0);
1680 }
1681
1682 /**
1683  * Allow to handle high priority request
1684  * User can call it w/o any lock but need to hold
1685  * ptlrpc_service_part::scp_req_lock to get reliable result
1686  */
1687 static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
1688                                      bool force)
1689 {
1690         int running = svcpt->scp_nthrs_running;
1691
1692         if (!nrs_svcpt_has_hp(svcpt))
1693                 return false;
1694
1695         if (force)
1696                 return true;
1697
1698         if (ptlrpc_nrs_req_throttling_nolock(svcpt, true))
1699                 return false;
1700
1701         if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
1702                      CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
1703                 /* leave just 1 thread for normal RPCs */
1704                 running = PTLRPC_NTHRS_INIT;
1705                 if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
1706                         running += 1;
1707         }
1708
1709         if (svcpt->scp_nreqs_active >= running - 1)
1710                 return false;
1711
1712         if (svcpt->scp_nhreqs_active == 0)
1713                 return true;
1714
1715         return !ptlrpc_nrs_req_pending_nolock(svcpt, false) ||
1716                svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio;
1717 }
1718
1719 static bool ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
1720                                        bool force)
1721 {
1722         return ptlrpc_server_allow_high(svcpt, force) &&
1723                ptlrpc_nrs_req_pending_nolock(svcpt, true);
1724 }
1725
1726 /**
1727  * Only allow normal priority requests on a service that has a high-priority
1728  * queue if forced (i.e. cleanup), if there are other high priority requests
1729  * already being processed (i.e. those threads can service more high-priority
1730  * requests), or if there are enough idle threads that a later thread can do
1731  * a high priority request.
1732  * User can call it w/o any lock but need to hold
1733  * ptlrpc_service_part::scp_req_lock to get reliable result
1734  */
1735 static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
1736                                        bool force)
1737 {
1738         int running = svcpt->scp_nthrs_running;
1739         if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
1740                      CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
1741                 /* leave just 1 thread for normal RPCs */
1742                 running = PTLRPC_NTHRS_INIT;
1743                 if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
1744                         running += 1;
1745         }
1746
1747         if (force)
1748                 return true;
1749
1750         if (ptlrpc_nrs_req_throttling_nolock(svcpt, false))
1751                 return false;
1752
1753         if (svcpt->scp_nreqs_active < running - 2)
1754                 return true;
1755
1756         if (svcpt->scp_nreqs_active >= running - 1)
1757                 return false;
1758
1759         return svcpt->scp_nhreqs_active > 0 || !nrs_svcpt_has_hp(svcpt);
1760 }
1761
1762 static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
1763                                          bool force)
1764 {
1765         return ptlrpc_server_allow_normal(svcpt, force) &&
1766                ptlrpc_nrs_req_pending_nolock(svcpt, false);
1767 }
1768
1769 /**
1770  * Returns true if there are requests available in incoming
1771  * request queue for processing and it is allowed to fetch them.
1772  * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
1773  * to get reliable result
1774  * \see ptlrpc_server_allow_normal
1775  * \see ptlrpc_server_allow high
1776  */
1777 static inline bool
1778 ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force)
1779 {
1780         return ptlrpc_server_high_pending(svcpt, force) ||
1781                ptlrpc_server_normal_pending(svcpt, force);
1782 }
1783
1784 /**
1785  * Fetch a request for processing from queue of unprocessed requests.
1786  * Favors high-priority requests.
1787  * Returns a pointer to fetched request.
1788  */
1789 static struct ptlrpc_request *
1790 ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
1791 {
1792         struct ptlrpc_request *req = NULL;
1793         ENTRY;
1794
1795         spin_lock(&svcpt->scp_req_lock);
1796
1797         if (ptlrpc_server_high_pending(svcpt, force)) {
1798                 req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
1799                 if (req != NULL) {
1800                         svcpt->scp_hreq_count++;
1801                         goto got_request;
1802                 }
1803         }
1804
1805         if (ptlrpc_server_normal_pending(svcpt, force)) {
1806                 req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
1807                 if (req != NULL) {
1808                         svcpt->scp_hreq_count = 0;
1809                         goto got_request;
1810                 }
1811         }
1812
1813         spin_unlock(&svcpt->scp_req_lock);
1814         RETURN(NULL);
1815
1816 got_request:
1817         svcpt->scp_nreqs_active++;
1818         if (req->rq_hp)
1819                 svcpt->scp_nhreqs_active++;
1820
1821         spin_unlock(&svcpt->scp_req_lock);
1822
1823         if (likely(req->rq_export))
1824                 class_export_rpc_inc(req->rq_export);
1825
1826         RETURN(req);
1827 }
1828
1829 /**
1830  * Handle freshly incoming reqs, add to timed early reply list,
1831  * pass on to regular request queue.
1832  * All incoming requests pass through here before getting into
1833  * ptlrpc_server_handle_req later on.
1834  */
1835 static int
1836 ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
1837                             struct ptlrpc_thread *thread)
1838 {
1839         struct ptlrpc_service   *svc = svcpt->scp_service;
1840         struct ptlrpc_request   *req;
1841         __u32                   deadline;
1842         int                     rc;
1843         ENTRY;
1844
1845         spin_lock(&svcpt->scp_lock);
1846         if (list_empty(&svcpt->scp_req_incoming)) {
1847                 spin_unlock(&svcpt->scp_lock);
1848                 RETURN(0);
1849         }
1850
1851         req = list_entry(svcpt->scp_req_incoming.next,
1852                              struct ptlrpc_request, rq_list);
1853         list_del_init(&req->rq_list);
1854         svcpt->scp_nreqs_incoming--;
1855         /* Consider this still a "queued" request as far as stats are
1856          * concerned */
1857         spin_unlock(&svcpt->scp_lock);
1858
1859         /* go through security check/transform */
1860         rc = sptlrpc_svc_unwrap_request(req);
1861         switch (rc) {
1862         case SECSVC_OK:
1863                 break;
1864         case SECSVC_COMPLETE:
1865                 target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
1866                 goto err_req;
1867         case SECSVC_DROP:
1868                 goto err_req;
1869         default:
1870                 LBUG();
1871         }
1872
1873         /*
1874          * for null-flavored rpc, msg has been unpacked by sptlrpc, although
1875          * redo it wouldn't be harmful.
1876          */
1877         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
1878                 rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
1879                 if (rc != 0) {
1880                         CERROR("error unpacking request: ptl %d from %s "
1881                                "x"LPU64"\n", svc->srv_req_portal,
1882                                libcfs_id2str(req->rq_peer), req->rq_xid);
1883                         goto err_req;
1884                 }
1885         }
1886
1887         rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
1888         if (rc) {
1889                 CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
1890                         LPU64"\n", svc->srv_req_portal,
1891                         libcfs_id2str(req->rq_peer), req->rq_xid);
1892                 goto err_req;
1893         }
1894
1895         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
1896             lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
1897                 CERROR("drop incoming rpc opc %u, x"LPU64"\n",
1898                        cfs_fail_val, req->rq_xid);
1899                 goto err_req;
1900         }
1901
1902         rc = -EINVAL;
1903         if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
1904                 CERROR("wrong packet type received (type=%u) from %s\n",
1905                        lustre_msg_get_type(req->rq_reqmsg),
1906                        libcfs_id2str(req->rq_peer));
1907                 goto err_req;
1908         }
1909
1910         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1911         case MDS_WRITEPAGE:
1912         case OST_WRITE:
1913         case OUT_UPDATE:
1914                 req->rq_bulk_write = 1;
1915                 break;
1916         case MDS_READPAGE:
1917         case OST_READ:
1918         case MGS_CONFIG_READ:
1919                 req->rq_bulk_read = 1;
1920                 break;
1921         }
1922
1923         CDEBUG(D_RPCTRACE, "got req x"LPU64"\n", req->rq_xid);
1924
1925         req->rq_export = class_conn2export(
1926                 lustre_msg_get_handle(req->rq_reqmsg));
1927         if (req->rq_export) {
1928                 rc = ptlrpc_check_req(req);
1929                 if (rc == 0) {
1930                         rc = sptlrpc_target_export_check(req->rq_export, req);
1931                         if (rc)
1932                                 DEBUG_REQ(D_ERROR, req, "DROPPING req with "
1933                                           "illegal security flavor,");
1934                 }
1935
1936                 if (rc)
1937                         goto err_req;
1938                 ptlrpc_update_export_timer(req->rq_export, 0);
1939         }
1940
1941         /* req_in handling should/must be fast */
1942         if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
1943                 DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
1944                           cfs_time_sub(cfs_time_current_sec(),
1945                                        req->rq_arrival_time.tv_sec));
1946
1947         /* Set rpc server deadline and add it to the timed list */
1948         deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
1949                     MSGHDR_AT_SUPPORT) ?
1950                    /* The max time the client expects us to take */
1951                    lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
1952
1953         req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
1954         if (unlikely(deadline == 0)) {
1955                 DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
1956                 goto err_req;
1957         }
1958
1959         /* Skip early reply */
1960         if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_RESEND))
1961                 req->rq_deadline += obd_timeout;
1962
1963         req->rq_svc_thread = thread;
1964         if (thread != NULL) {
1965                 /* initialize request session, it is needed for request
1966                  * processing by target */
1967                 rc = lu_context_init(&req->rq_session, LCT_SERVER_SESSION |
1968                                                        LCT_NOREF);
1969                 if (rc) {
1970                         CERROR("%s: failure to initialize session: rc = %d\n",
1971                                thread->t_name, rc);
1972                         goto err_req;
1973                 }
1974                 req->rq_session.lc_thread = thread;
1975                 lu_context_enter(&req->rq_session);
1976                 thread->t_env->le_ses = &req->rq_session;
1977         }
1978
1979         ptlrpc_at_add_timed(req);
1980
1981         /* Move it over to the request processing queue */
1982         rc = ptlrpc_server_request_add(svcpt, req);
1983         if (rc)
1984                 GOTO(err_req, rc);
1985
1986         wake_up(&svcpt->scp_waitq);
1987         RETURN(1);
1988
1989 err_req:
1990         ptlrpc_server_finish_request(svcpt, req);
1991
1992         RETURN(1);
1993 }
1994
1995 /**
1996  * Main incoming request handling logic.
1997  * Calls handler function from service to do actual processing.
1998  */
1999 static int
2000 ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
2001                              struct ptlrpc_thread *thread)
2002 {
2003         struct ptlrpc_service   *svc = svcpt->scp_service;
2004         struct ptlrpc_request   *request;
2005         struct timeval           work_start;
2006         struct timeval           work_end;
2007         long                     timediff;
2008         int                      fail_opc = 0;
2009
2010         ENTRY;
2011
2012         request = ptlrpc_server_request_get(svcpt, false);
2013         if (request == NULL)
2014                 RETURN(0);
2015
2016         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
2017                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
2018         else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
2019                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
2020
2021         if (unlikely(fail_opc)) {
2022                 if (request->rq_export && request->rq_ops)
2023                         OBD_FAIL_TIMEOUT(fail_opc, 4);
2024         }
2025
2026         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
2027
2028         if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
2029                 libcfs_debug_dumplog();
2030
2031         do_gettimeofday(&work_start);
2032         timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
2033         if (likely(svc->srv_stats != NULL)) {
2034                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
2035                                     timediff);
2036                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
2037                                     svcpt->scp_nreqs_incoming);
2038                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
2039                                     svcpt->scp_nreqs_active);
2040                 lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
2041                                     at_get(&svcpt->scp_at_estimate));
2042         }
2043
2044         if (likely(request->rq_export)) {
2045                 if (unlikely(ptlrpc_check_req(request)))
2046                         goto put_conn;
2047                 ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
2048         }
2049
2050         /* Discard requests queued for longer than the deadline.
2051            The deadline is increased if we send an early reply. */
2052         if (cfs_time_current_sec() > request->rq_deadline) {
2053                 DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
2054                           ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
2055                           libcfs_id2str(request->rq_peer),
2056                           cfs_time_sub(request->rq_deadline,
2057                           request->rq_arrival_time.tv_sec),
2058                           cfs_time_sub(cfs_time_current_sec(),
2059                           request->rq_deadline));
2060                 goto put_conn;
2061         }
2062
2063         CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
2064                "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
2065                (request->rq_export ?
2066                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
2067                (request->rq_export ?
2068                 atomic_read(&request->rq_export->exp_refcount) : -99),
2069                lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
2070                libcfs_id2str(request->rq_peer),
2071                lustre_msg_get_opc(request->rq_reqmsg));
2072
2073         if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
2074                 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
2075
2076         CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
2077
2078         /* re-assign request and sesson thread to the current one */
2079         request->rq_svc_thread = thread;
2080         if (thread != NULL) {
2081                 LASSERT(request->rq_session.lc_thread == NULL);
2082                 request->rq_session.lc_thread = thread;
2083                 thread->t_env->le_ses = &request->rq_session;
2084         }
2085         svc->srv_ops.so_req_handler(request);
2086
2087         ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
2088
2089 put_conn:
2090         if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
2091                      DEBUG_REQ(D_WARNING, request, "Request took longer "
2092                                "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
2093                                " client may timeout.",
2094                                cfs_time_sub(request->rq_deadline,
2095                                             request->rq_arrival_time.tv_sec),
2096                                cfs_time_sub(cfs_time_current_sec(),
2097                                             request->rq_deadline));
2098         }
2099
2100         do_gettimeofday(&work_end);
2101         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
2102         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
2103                "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
2104                "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
2105                 current_comm(),
2106                 (request->rq_export ?
2107                  (char *)request->rq_export->exp_client_uuid.uuid : "0"),
2108                 (request->rq_export ?
2109                  atomic_read(&request->rq_export->exp_refcount) : -99),
2110                 lustre_msg_get_status(request->rq_reqmsg),
2111                 request->rq_xid,
2112                 libcfs_id2str(request->rq_peer),
2113                 lustre_msg_get_opc(request->rq_reqmsg),
2114                 timediff,
2115                 cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
2116                 (request->rq_repmsg ?
2117                  lustre_msg_get_transno(request->rq_repmsg) :
2118                  request->rq_transno),
2119                 request->rq_status,
2120                 (request->rq_repmsg ?
2121                  lustre_msg_get_status(request->rq_repmsg) : -999));
2122         if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
2123                 __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
2124                 int opc = opcode_offset(op);
2125                 if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
2126                         LASSERT(opc < LUSTRE_MAX_OPCODES);
2127                         lprocfs_counter_add(svc->srv_stats,
2128                                             opc + EXTRA_MAX_OPCODES,
2129                                             timediff);
2130                 }
2131         }
2132         if (unlikely(request->rq_early_count)) {
2133                 DEBUG_REQ(D_ADAPTTO, request,
2134                           "sent %d early replies before finishing in "
2135                           CFS_DURATION_T"s",
2136                           request->rq_early_count,
2137                           cfs_time_sub(work_end.tv_sec,
2138                           request->rq_arrival_time.tv_sec));
2139         }
2140
2141         ptlrpc_server_finish_active_request(svcpt, request);
2142
2143         RETURN(1);
2144 }
2145
2146 /**
2147  * An internal function to process a single reply state object.
2148  */
2149 static int
2150 ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
2151 {
2152         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
2153         struct ptlrpc_service     *svc = svcpt->scp_service;
2154         struct obd_export         *exp;
2155         int                        nlocks;
2156         int                        been_handled;
2157         ENTRY;
2158
2159         exp = rs->rs_export;
2160
2161         LASSERT(rs->rs_difficult);
2162         LASSERT(rs->rs_scheduled);
2163         LASSERT(list_empty(&rs->rs_list));
2164
2165         spin_lock(&exp->exp_lock);
2166         /* Noop if removed already */
2167         list_del_init(&rs->rs_exp_list);
2168         spin_unlock(&exp->exp_lock);
2169
2170         /* The disk commit callback holds exp_uncommitted_replies_lock while it
2171          * iterates over newly committed replies, removing them from
2172          * exp_uncommitted_replies.  It then drops this lock and schedules the
2173          * replies it found for handling here.
2174          *
2175          * We can avoid contention for exp_uncommitted_replies_lock between the
2176          * HRT threads and further commit callbacks by checking rs_committed
2177          * which is set in the commit callback while it holds both
2178          * rs_lock and exp_uncommitted_reples.
2179          *
2180          * If we see rs_committed clear, the commit callback _may_ not have
2181          * handled this reply yet and we race with it to grab
2182          * exp_uncommitted_replies_lock before removing the reply from
2183          * exp_uncommitted_replies.  Note that if we lose the race and the
2184          * reply has already been removed, list_del_init() is a noop.
2185          *
2186          * If we see rs_committed set, we know the commit callback is handling,
2187          * or has handled this reply since store reordering might allow us to
2188          * see rs_committed set out of sequence.  But since this is done
2189          * holding rs_lock, we can be sure it has all completed once we hold
2190          * rs_lock, which we do right next.
2191          */
2192         if (!rs->rs_committed) {
2193                 spin_lock(&exp->exp_uncommitted_replies_lock);
2194                 list_del_init(&rs->rs_obd_list);
2195                 spin_unlock(&exp->exp_uncommitted_replies_lock);
2196         }
2197
2198         spin_lock(&rs->rs_lock);
2199
2200         been_handled = rs->rs_handled;
2201         rs->rs_handled = 1;
2202
2203         nlocks = rs->rs_nlocks;                 /* atomic "steal", but */
2204         rs->rs_nlocks = 0;                      /* locks still on rs_locks! */
2205
2206         if (nlocks == 0 && !been_handled) {
2207                 /* If we see this, we should already have seen the warning
2208                  * in mds_steal_ack_locks()  */
2209                 CDEBUG(D_HA, "All locks stolen from rs %p x"LPD64".t"LPD64
2210                        " o%d NID %s\n",
2211                        rs,
2212                        rs->rs_xid, rs->rs_transno, rs->rs_opc,
2213                        libcfs_nid2str(exp->exp_connection->c_peer.nid));
2214         }
2215
2216         if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
2217                 spin_unlock(&rs->rs_lock);
2218
2219                 if (!been_handled && rs->rs_on_net) {
2220                         LNetMDUnlink(rs->rs_md_h);
2221                         /* Ignore return code; we're racing with completion */
2222                 }
2223
2224                 while (nlocks-- > 0)
2225                         ldlm_lock_decref(&rs->rs_locks[nlocks],
2226                                          rs->rs_modes[nlocks]);
2227
2228                 spin_lock(&rs->rs_lock);
2229         }
2230
2231         rs->rs_scheduled = 0;
2232
2233         if (!rs->rs_on_net) {
2234                 /* Off the net */
2235                 spin_unlock(&rs->rs_lock);
2236
2237                 class_export_put (exp);
2238                 rs->rs_export = NULL;
2239                 ptlrpc_rs_decref(rs);
2240                 if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
2241                     svc->srv_is_stopping)
2242                         wake_up_all(&svcpt->scp_waitq);
2243                 RETURN(1);
2244         }
2245
2246         /* still on the net; callback will schedule */
2247         spin_unlock(&rs->rs_lock);
2248         RETURN(1);
2249 }
2250
2251
2252 static void
2253 ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
2254 {
2255         int avail = svcpt->scp_nrqbds_posted;
2256         int low_water = test_req_buffer_pressure ? 0 :
2257                         svcpt->scp_service->srv_nbuf_per_group / 2;
2258
2259         /* NB I'm not locking; just looking. */
2260
2261         /* CAVEAT EMPTOR: We might be allocating buffers here because we've
2262          * allowed the request history to grow out of control.  We could put a
2263          * sanity check on that here and cull some history if we need the
2264          * space. */
2265
2266         if (avail <= low_water)
2267                 ptlrpc_grow_req_bufs(svcpt, 1);
2268
2269         if (svcpt->scp_service->srv_stats) {
2270                 lprocfs_counter_add(svcpt->scp_service->srv_stats,
2271                                     PTLRPC_REQBUF_AVAIL_CNTR, avail);
2272         }
2273 }
2274
2275 static int
2276 ptlrpc_retry_rqbds(void *arg)
2277 {
2278         struct ptlrpc_service_part *svcpt = (struct ptlrpc_service_part *)arg;
2279
2280         svcpt->scp_rqbd_timeout = 0;
2281         return -ETIMEDOUT;
2282 }
2283
2284 static inline int
2285 ptlrpc_threads_enough(struct ptlrpc_service_part *svcpt)
2286 {
2287         return svcpt->scp_nreqs_active <
2288                svcpt->scp_nthrs_running - 1 -
2289                (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL);
2290 }
2291
2292 /**
2293  * allowed to create more threads
2294  * user can call it w/o any lock but need to hold
2295  * ptlrpc_service_part::scp_lock to get reliable result
2296  */
2297 static inline int
2298 ptlrpc_threads_increasable(struct ptlrpc_service_part *svcpt)
2299 {
2300         return svcpt->scp_nthrs_running +
2301                svcpt->scp_nthrs_starting <
2302                svcpt->scp_service->srv_nthrs_cpt_limit;
2303 }
2304
2305 /**
2306  * too many requests and allowed to create more threads
2307  */
2308 static inline int
2309 ptlrpc_threads_need_create(struct ptlrpc_service_part *svcpt)
2310 {
2311         return !ptlrpc_threads_enough(svcpt) &&
2312                 ptlrpc_threads_increasable(svcpt);
2313 }
2314
2315 static inline int
2316 ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
2317 {
2318         return thread_is_stopping(thread) ||
2319                thread->t_svcpt->scp_service->srv_is_stopping;
2320 }
2321
2322 static inline int
2323 ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt)
2324 {
2325         return !list_empty(&svcpt->scp_rqbd_idle) &&
2326                svcpt->scp_rqbd_timeout == 0;
2327 }
2328
2329 static inline int
2330 ptlrpc_at_check(struct ptlrpc_service_part *svcpt)
2331 {
2332         return svcpt->scp_at_check;
2333 }
2334
2335 /**
2336  * requests wait on preprocessing
2337  * user can call it w/o any lock but need to hold
2338  * ptlrpc_service_part::scp_lock to get reliable result
2339  */
2340 static inline int
2341 ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
2342 {
2343         return !list_empty(&svcpt->scp_req_incoming);
2344 }
2345
2346 static __attribute__((__noinline__)) int
2347 ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
2348                   struct ptlrpc_thread *thread)
2349 {
2350         /* Don't exit while there are replies to be handled */
2351         struct l_wait_info lwi = LWI_TIMEOUT(svcpt->scp_rqbd_timeout,
2352                                              ptlrpc_retry_rqbds, svcpt);
2353
2354         lc_watchdog_disable(thread->t_watchdog);
2355
2356         cond_resched();
2357
2358         l_wait_event_exclusive_head(svcpt->scp_waitq,
2359                                 ptlrpc_thread_stopping(thread) ||
2360                                 ptlrpc_server_request_incoming(svcpt) ||
2361                                 ptlrpc_server_request_pending(svcpt, false) ||
2362                                 ptlrpc_rqbd_pending(svcpt) ||
2363                                 ptlrpc_at_check(svcpt), &lwi);
2364
2365         if (ptlrpc_thread_stopping(thread))
2366                 return -EINTR;
2367
2368         lc_watchdog_touch(thread->t_watchdog,
2369                           ptlrpc_server_get_timeout(svcpt));
2370         return 0;
2371 }
2372
2373 /**
2374  * Main thread body for service threads.
2375  * Waits in a loop waiting for new requests to process to appear.
2376  * Every time an incoming requests is added to its queue, a waitq
2377  * is woken up and one of the threads will handle it.
2378  */
2379 static int ptlrpc_main(void *arg)
2380 {
2381         struct ptlrpc_thread            *thread = (struct ptlrpc_thread *)arg;
2382         struct ptlrpc_service_part      *svcpt = thread->t_svcpt;
2383         struct ptlrpc_service           *svc = svcpt->scp_service;
2384         struct ptlrpc_reply_state       *rs;
2385         struct group_info *ginfo = NULL;
2386         struct lu_env *env;
2387         int counter = 0, rc = 0;
2388         ENTRY;
2389
2390         thread->t_pid = current_pid();
2391         unshare_fs_struct();
2392
2393         /* NB: we will call cfs_cpt_bind() for all threads, because we
2394          * might want to run lustre server only on a subset of system CPUs,
2395          * in that case ->scp_cpt is CFS_CPT_ANY */
2396         rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
2397         if (rc != 0) {
2398                 CWARN("%s: failed to bind %s on CPT %d\n",
2399                       svc->srv_name, thread->t_name, svcpt->scp_cpt);
2400         }
2401
2402         ginfo = groups_alloc(0);
2403         if (!ginfo) {
2404                 rc = -ENOMEM;
2405                 goto out;
2406         }
2407
2408         set_current_groups(ginfo);
2409         put_group_info(ginfo);
2410
2411         if (svc->srv_ops.so_thr_init != NULL) {
2412                 rc = svc->srv_ops.so_thr_init(thread);
2413                 if (rc)
2414                         goto out;
2415         }
2416
2417         OBD_ALLOC_PTR(env);
2418         if (env == NULL) {
2419                 rc = -ENOMEM;
2420                 goto out_srv_fini;
2421         }
2422
2423         rc = lu_context_init(&env->le_ctx,
2424                              svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
2425         if (rc)
2426                 goto out_srv_fini;
2427
2428         thread->t_env = env;
2429         env->le_ctx.lc_thread = thread;
2430         env->le_ctx.lc_cookie = 0x6;
2431
2432         while (!list_empty(&svcpt->scp_rqbd_idle)) {
2433                 rc = ptlrpc_server_post_idle_rqbds(svcpt);
2434                 if (rc >= 0)
2435                         continue;
2436
2437                 CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
2438                         svc->srv_name, svcpt->scp_cpt, rc);
2439                 goto out_srv_fini;
2440         }
2441
2442         /* Alloc reply state structure for this one */
2443         OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
2444         if (!rs) {
2445                 rc = -ENOMEM;
2446                 goto out_srv_fini;
2447         }
2448
2449         spin_lock(&svcpt->scp_lock);
2450
2451         LASSERT(thread_is_starting(thread));
2452         thread_clear_flags(thread, SVC_STARTING);
2453
2454         LASSERT(svcpt->scp_nthrs_starting == 1);
2455         svcpt->scp_nthrs_starting--;
2456
2457         /* SVC_STOPPING may already be set here if someone else is trying
2458          * to stop the service while this new thread has been dynamically
2459          * forked. We still set SVC_RUNNING to let our creator know that
2460          * we are now running, however we will exit as soon as possible */
2461         thread_add_flags(thread, SVC_RUNNING);
2462         svcpt->scp_nthrs_running++;
2463         spin_unlock(&svcpt->scp_lock);
2464
2465         /* wake up our creator in case he's still waiting. */
2466         wake_up(&thread->t_ctl_waitq);
2467
2468         thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
2469                                              NULL, NULL);
2470
2471         spin_lock(&svcpt->scp_rep_lock);
2472         list_add(&rs->rs_list, &svcpt->scp_rep_idle);
2473         wake_up(&svcpt->scp_rep_waitq);
2474         spin_unlock(&svcpt->scp_rep_lock);
2475
2476         CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
2477                svcpt->scp_nthrs_running);
2478
2479         /* XXX maintain a list of all managed devices: insert here */
2480         while (!ptlrpc_thread_stopping(thread)) {
2481                 if (ptlrpc_wait_event(svcpt, thread))
2482                         break;
2483
2484                 ptlrpc_check_rqbd_pool(svcpt);
2485
2486                 if (ptlrpc_threads_need_create(svcpt)) {
2487                         /* Ignore return code - we tried... */
2488                         ptlrpc_start_thread(svcpt, 0);
2489                 }
2490
2491                 /* reset le_ses to initial state */
2492                 env->le_ses = NULL;
2493                 /* Process all incoming reqs before handling any */
2494                 if (ptlrpc_server_request_incoming(svcpt)) {
2495                         lu_context_enter(&env->le_ctx);
2496                         ptlrpc_server_handle_req_in(svcpt, thread);
2497                         lu_context_exit(&env->le_ctx);
2498
2499                         /* but limit ourselves in case of flood */
2500                         if (counter++ < 100)
2501                                 continue;
2502                         counter = 0;
2503                 }
2504
2505                 if (ptlrpc_at_check(svcpt))
2506                         ptlrpc_at_check_timed(svcpt);
2507
2508                 if (ptlrpc_server_request_pending(svcpt, false)) {
2509                         lu_context_enter(&env->le_ctx);
2510                         ptlrpc_server_handle_request(svcpt, thread);
2511                         lu_context_exit(&env->le_ctx);
2512                 }
2513
2514                 if (ptlrpc_rqbd_pending(svcpt) &&
2515                     ptlrpc_server_post_idle_rqbds(svcpt) < 0) {
2516                         /* I just failed to repost request buffers.
2517                          * Wait for a timeout (unless something else
2518                          * happens) before I try again */
2519                         svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
2520                         CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
2521                                svcpt->scp_nrqbds_posted);
2522                 }
2523         }
2524
2525         lc_watchdog_delete(thread->t_watchdog);
2526         thread->t_watchdog = NULL;
2527
2528 out_srv_fini:
2529         /*
2530          * deconstruct service specific state created by ptlrpc_start_thread()
2531          */
2532         if (svc->srv_ops.so_thr_done != NULL)
2533                 svc->srv_ops.so_thr_done(thread);
2534
2535         if (env != NULL) {
2536                 lu_context_fini(&env->le_ctx);
2537                 OBD_FREE_PTR(env);
2538         }
2539 out:
2540         CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
2541                thread, thread->t_pid, thread->t_id, rc);
2542
2543         spin_lock(&svcpt->scp_lock);
2544         if (thread_test_and_clear_flags(thread, SVC_STARTING))
2545                 svcpt->scp_nthrs_starting--;
2546
2547         if (thread_test_and_clear_flags(thread, SVC_RUNNING)) {
2548                 /* must know immediately */
2549                 svcpt->scp_nthrs_running--;
2550         }
2551
2552         thread->t_id = rc;
2553         thread_add_flags(thread, SVC_STOPPED);
2554
2555         wake_up(&thread->t_ctl_waitq);
2556         spin_unlock(&svcpt->scp_lock);
2557
2558         return rc;
2559 }
2560
2561 static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
2562                           struct list_head *replies)
2563 {
2564         int result;
2565
2566         spin_lock(&hrt->hrt_lock);
2567
2568         list_splice_init(&hrt->hrt_queue, replies);
2569         result = ptlrpc_hr.hr_stopping || !list_empty(replies);
2570
2571         spin_unlock(&hrt->hrt_lock);
2572         return result;
2573 }
2574
2575 /**
2576  * Main body of "handle reply" function.
2577  * It processes acked reply states
2578  */
2579 static int ptlrpc_hr_main(void *arg)
2580 {
2581         struct ptlrpc_hr_thread         *hrt = (struct ptlrpc_hr_thread *)arg;
2582         struct ptlrpc_hr_partition      *hrp = hrt->hrt_partition;
2583         struct list_head                replies;
2584         char                            threadname[20];
2585         int                             rc;
2586
2587         INIT_LIST_HEAD(&replies);
2588         snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
2589                  hrp->hrp_cpt, hrt->hrt_id);
2590         unshare_fs_struct();
2591
2592         rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
2593         if (rc != 0) {
2594                 CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
2595                       threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
2596         }
2597
2598         atomic_inc(&hrp->hrp_nstarted);
2599         wake_up(&ptlrpc_hr.hr_waitq);
2600
2601         while (!ptlrpc_hr.hr_stopping) {
2602                 l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
2603
2604                 while (!list_empty(&replies)) {
2605                         struct ptlrpc_reply_state *rs;
2606
2607                         rs = list_entry(replies.prev,
2608                                         struct ptlrpc_reply_state,
2609                                         rs_list);
2610                         list_del_init(&rs->rs_list);
2611                         ptlrpc_handle_rs(rs);
2612                 }
2613         }
2614
2615         atomic_inc(&hrp->hrp_nstopped);
2616         wake_up(&ptlrpc_hr.hr_waitq);
2617
2618         return 0;
2619 }
2620
2621 static void ptlrpc_stop_hr_threads(void)
2622 {
2623         struct ptlrpc_hr_partition      *hrp;
2624         int                             i;
2625         int                             j;
2626
2627         ptlrpc_hr.hr_stopping = 1;
2628
2629         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2630                 if (hrp->hrp_thrs == NULL)
2631                         continue; /* uninitialized */
2632                 for (j = 0; j < hrp->hrp_nthrs; j++)
2633                         wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
2634         }
2635
2636         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2637                 if (hrp->hrp_thrs == NULL)
2638                         continue; /* uninitialized */
2639                 wait_event(ptlrpc_hr.hr_waitq,
2640                                atomic_read(&hrp->hrp_nstopped) ==
2641                                atomic_read(&hrp->hrp_nstarted));
2642         }
2643 }
2644
2645 static int ptlrpc_start_hr_threads(void)
2646 {
2647         struct ptlrpc_hr_partition      *hrp;
2648         int                             i;
2649         int                             j;
2650         ENTRY;
2651
2652         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2653                 int     rc = 0;
2654
2655                 for (j = 0; j < hrp->hrp_nthrs; j++) {
2656                         struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
2657                         struct task_struct *task;
2658
2659                         task = kthread_run(ptlrpc_hr_main,
2660                                            &hrp->hrp_thrs[j],
2661                                            "ptlrpc_hr%02d_%03d",
2662                                            hrp->hrp_cpt,
2663                                            hrt->hrt_id);
2664                         if (IS_ERR(task)) {
2665                                 rc = PTR_ERR(task);
2666                                 break;
2667                         }
2668                 }
2669
2670                 wait_event(ptlrpc_hr.hr_waitq,
2671                            atomic_read(&hrp->hrp_nstarted) == j);
2672
2673                 if (rc < 0) {
2674                         CERROR("cannot start reply handler thread %d:%d: "
2675                                "rc = %d\n", i, j, rc);
2676                         ptlrpc_stop_hr_threads();
2677                         RETURN(rc);
2678                 }
2679         }
2680
2681         RETURN(0);
2682 }
2683
2684 static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
2685 {
2686         struct l_wait_info      lwi = { 0 };
2687         struct ptlrpc_thread    *thread;
2688         struct list_head        zombie;
2689
2690         ENTRY;
2691
2692         CDEBUG(D_INFO, "Stopping threads for service %s\n",
2693                svcpt->scp_service->srv_name);
2694
2695         INIT_LIST_HEAD(&zombie);
2696         spin_lock(&svcpt->scp_lock);
2697         /* let the thread know that we would like it to stop asap */
2698         list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
2699                 CDEBUG(D_INFO, "Stopping thread %s #%u\n",
2700                        svcpt->scp_service->srv_thread_name, thread->t_id);
2701                 thread_add_flags(thread, SVC_STOPPING);
2702         }
2703
2704         wake_up_all(&svcpt->scp_waitq);
2705
2706         while (!list_empty(&svcpt->scp_threads)) {
2707                 thread = list_entry(svcpt->scp_threads.next,
2708                                         struct ptlrpc_thread, t_link);
2709                 if (thread_is_stopped(thread)) {
2710                         list_del(&thread->t_link);
2711                         list_add(&thread->t_link, &zombie);
2712                         continue;
2713                 }
2714                 spin_unlock(&svcpt->scp_lock);
2715
2716                 CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
2717                        svcpt->scp_service->srv_thread_name, thread->t_id);
2718                 l_wait_event(thread->t_ctl_waitq,
2719                              thread_is_stopped(thread), &lwi);
2720
2721                 spin_lock(&svcpt->scp_lock);
2722         }
2723
2724         spin_unlock(&svcpt->scp_lock);
2725
2726         while (!list_empty(&zombie)) {
2727                 thread = list_entry(zombie.next,
2728                                         struct ptlrpc_thread, t_link);
2729                 list_del(&thread->t_link);
2730                 OBD_FREE_PTR(thread);
2731         }
2732         EXIT;
2733 }
2734
2735 /**
2736  * Stops all threads of a particular service \a svc
2737  */
2738 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
2739 {
2740         struct ptlrpc_service_part *svcpt;
2741         int                        i;
2742         ENTRY;
2743
2744         ptlrpc_service_for_each_part(svcpt, i, svc) {
2745                 if (svcpt->scp_service != NULL)
2746                         ptlrpc_svcpt_stop_threads(svcpt);
2747         }
2748
2749         EXIT;
2750 }
2751
2752 int ptlrpc_start_threads(struct ptlrpc_service *svc)
2753 {
2754         int     rc = 0;
2755         int     i;
2756         int     j;
2757         ENTRY;
2758
2759         /* We require 2 threads min, see note in ptlrpc_server_handle_request */
2760         LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT);
2761
2762         for (i = 0; i < svc->srv_ncpts; i++) {
2763                 for (j = 0; j < svc->srv_nthrs_cpt_init; j++) {
2764                         rc = ptlrpc_start_thread(svc->srv_parts[i], 1);
2765                         if (rc == 0)
2766                                 continue;
2767
2768                         if (rc != -EMFILE)
2769                                 goto failed;
2770                         /* We have enough threads, don't start more. b=15759 */
2771                         break;
2772                 }
2773         }
2774
2775         RETURN(0);
2776  failed:
2777         CERROR("cannot start %s thread #%d_%d: rc %d\n",
2778                svc->srv_thread_name, i, j, rc);
2779         ptlrpc_stop_all_threads(svc);
2780         RETURN(rc);
2781 }
2782
2783 int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
2784 {
2785         struct l_wait_info      lwi = { 0 };
2786         struct ptlrpc_thread    *thread;
2787         struct ptlrpc_service   *svc;
2788         struct task_struct      *task;
2789         int                     rc;
2790         ENTRY;
2791
2792         LASSERT(svcpt != NULL);
2793
2794         svc = svcpt->scp_service;
2795
2796         CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n",
2797                svc->srv_name, svcpt->scp_cpt, svcpt->scp_nthrs_running,
2798                svc->srv_nthrs_cpt_init, svc->srv_nthrs_cpt_limit);
2799
2800  again:
2801         if (unlikely(svc->srv_is_stopping))
2802                 RETURN(-ESRCH);
2803
2804         if (!ptlrpc_threads_increasable(svcpt) ||
2805             (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
2806              svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1))
2807                 RETURN(-EMFILE);
2808
2809         OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
2810         if (thread == NULL)
2811                 RETURN(-ENOMEM);
2812         init_waitqueue_head(&thread->t_ctl_waitq);
2813
2814         spin_lock(&svcpt->scp_lock);
2815         if (!ptlrpc_threads_increasable(svcpt)) {
2816                 spin_unlock(&svcpt->scp_lock);
2817                 OBD_FREE_PTR(thread);
2818                 RETURN(-EMFILE);
2819         }
2820
2821         if (svcpt->scp_nthrs_starting != 0) {
2822                 /* serialize starting because some modules (obdfilter)
2823                  * might require unique and contiguous t_id */
2824                 LASSERT(svcpt->scp_nthrs_starting == 1);
2825                 spin_unlock(&svcpt->scp_lock);
2826                 OBD_FREE_PTR(thread);
2827                 if (wait) {
2828                         CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
2829                                svc->srv_thread_name, svcpt->scp_thr_nextid);
2830                         schedule();
2831                         goto again;
2832                 }
2833
2834                 CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n",
2835                        svc->srv_thread_name, svcpt->scp_thr_nextid);
2836                 RETURN(-EAGAIN);
2837         }
2838
2839         svcpt->scp_nthrs_starting++;
2840         thread->t_id = svcpt->scp_thr_nextid++;
2841         thread_add_flags(thread, SVC_STARTING);
2842         thread->t_svcpt = svcpt;
2843
2844         list_add(&thread->t_link, &svcpt->scp_threads);
2845         spin_unlock(&svcpt->scp_lock);
2846
2847         if (svcpt->scp_cpt >= 0) {
2848                 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d",
2849                          svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
2850         } else {
2851                 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d",
2852                          svc->srv_thread_name, thread->t_id);
2853         }
2854
2855         CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
2856         task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name);
2857         if (IS_ERR(task)) {
2858                 rc = PTR_ERR(task);
2859                 CERROR("cannot start thread '%s': rc = %d\n",
2860                        thread->t_name, rc);
2861                 spin_lock(&svcpt->scp_lock);
2862                 --svcpt->scp_nthrs_starting;
2863                 if (thread_is_stopping(thread)) {
2864                         /* this ptlrpc_thread is being hanled
2865                          * by ptlrpc_svcpt_stop_threads now
2866                          */
2867                         thread_add_flags(thread, SVC_STOPPED);
2868                         wake_up(&thread->t_ctl_waitq);
2869                         spin_unlock(&svcpt->scp_lock);
2870                 } else {
2871                         list_del(&thread->t_link);
2872                         spin_unlock(&svcpt->scp_lock);
2873                         OBD_FREE_PTR(thread);
2874                 }
2875                 RETURN(rc);
2876         }
2877
2878         if (!wait)
2879                 RETURN(0);
2880
2881         l_wait_event(thread->t_ctl_waitq,
2882                      thread_is_running(thread) || thread_is_stopped(thread),
2883                      &lwi);
2884
2885         rc = thread_is_stopped(thread) ? thread->t_id : 0;
2886         RETURN(rc);
2887 }
2888
2889 int ptlrpc_hr_init(void)
2890 {
2891         struct ptlrpc_hr_partition      *hrp;
2892         struct ptlrpc_hr_thread         *hrt;
2893         int                             rc;
2894         int                             i;
2895         int                             j;
2896         int                             weight;
2897         ENTRY;
2898
2899         memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
2900         ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
2901
2902         ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
2903                                                    sizeof(*hrp));
2904         if (ptlrpc_hr.hr_partitions == NULL)
2905                 RETURN(-ENOMEM);
2906
2907         init_waitqueue_head(&ptlrpc_hr.hr_waitq);
2908
2909         weight = cfs_cpu_ht_nsiblings(0);
2910
2911         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2912                 hrp->hrp_cpt = i;
2913
2914                 atomic_set(&hrp->hrp_nstarted, 0);
2915                 atomic_set(&hrp->hrp_nstopped, 0);
2916
2917                 hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
2918                 hrp->hrp_nthrs /= weight;
2919
2920                 LASSERT(hrp->hrp_nthrs > 0);
2921                 OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i,
2922                               hrp->hrp_nthrs * sizeof(*hrt));
2923                 if (hrp->hrp_thrs == NULL)
2924                         GOTO(out, rc = -ENOMEM);
2925
2926                 for (j = 0; j < hrp->hrp_nthrs; j++) {
2927                         hrt = &hrp->hrp_thrs[j];
2928
2929                         hrt->hrt_id = j;
2930                         hrt->hrt_partition = hrp;
2931                         init_waitqueue_head(&hrt->hrt_waitq);
2932                         spin_lock_init(&hrt->hrt_lock);
2933                         INIT_LIST_HEAD(&hrt->hrt_queue);
2934                 }
2935         }
2936
2937         rc = ptlrpc_start_hr_threads();
2938 out:
2939         if (rc != 0)
2940                 ptlrpc_hr_fini();
2941         RETURN(rc);
2942 }
2943
2944 void ptlrpc_hr_fini(void)
2945 {
2946         struct ptlrpc_hr_partition      *hrp;
2947         int                             i;
2948
2949         if (ptlrpc_hr.hr_partitions == NULL)
2950                 return;
2951
2952         ptlrpc_stop_hr_threads();
2953
2954         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2955                 if (hrp->hrp_thrs != NULL) {
2956                         OBD_FREE(hrp->hrp_thrs,
2957                                  hrp->hrp_nthrs * sizeof(hrp->hrp_thrs[0]));
2958                 }
2959         }
2960
2961         cfs_percpt_free(ptlrpc_hr.hr_partitions);
2962         ptlrpc_hr.hr_partitions = NULL;
2963 }
2964
2965
2966 /**
2967  * Wait until all already scheduled replies are processed.
2968  */
2969 static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
2970 {
2971         while (1) {
2972                 int rc;
2973                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
2974                                                      NULL, NULL);
2975
2976                 rc = l_wait_event(svcpt->scp_waitq,
2977                      atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
2978                 if (rc == 0)
2979                         break;
2980                 CWARN("Unexpectedly long timeout %s %p\n",
2981                       svcpt->scp_service->srv_name, svcpt->scp_service);
2982         }
2983 }
2984
2985 static void
2986 ptlrpc_service_del_atimer(struct ptlrpc_service *svc)
2987 {
2988         struct ptlrpc_service_part      *svcpt;
2989         int                             i;
2990
2991         /* early disarm AT timer... */
2992         ptlrpc_service_for_each_part(svcpt, i, svc) {
2993                 if (svcpt->scp_service != NULL)
2994                         cfs_timer_disarm(&svcpt->scp_at_timer);
2995         }
2996 }
2997
2998 static void
2999 ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
3000 {
3001         struct ptlrpc_service_part        *svcpt;
3002         struct ptlrpc_request_buffer_desc *rqbd;
3003         struct l_wait_info                lwi;
3004         int                               rc;
3005         int                               i;
3006
3007         /* All history will be culled when the next request buffer is
3008          * freed in ptlrpc_service_purge_all() */
3009         svc->srv_hist_nrqbds_cpt_max = 0;
3010
3011         rc = LNetClearLazyPortal(svc->srv_req_portal);
3012         LASSERT(rc == 0);
3013
3014         ptlrpc_service_for_each_part(svcpt, i, svc) {
3015                 if (svcpt->scp_service == NULL)
3016                         break;
3017
3018                 /* Unlink all the request buffers.  This forces a 'final'
3019                  * event with its 'unlink' flag set for each posted rqbd */
3020                 list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
3021                                         rqbd_list) {
3022                         rc = LNetMDUnlink(rqbd->rqbd_md_h);
3023                         LASSERT(rc == 0 || rc == -ENOENT);
3024                 }
3025         }
3026
3027         ptlrpc_service_for_each_part(svcpt, i, svc) {
3028                 if (svcpt->scp_service == NULL)
3029                         break;
3030
3031                 /* Wait for the network to release any buffers
3032                  * it's currently filling */
3033                 spin_lock(&svcpt->scp_lock);
3034                 while (svcpt->scp_nrqbds_posted != 0) {
3035                         spin_unlock(&svcpt->scp_lock);
3036                         /* Network access will complete in finite time but
3037                          * the HUGE timeout lets us CWARN for visibility
3038                          * of sluggish NALs */
3039                         lwi = LWI_TIMEOUT_INTERVAL(
3040                                         cfs_time_seconds(LONG_UNLINK),
3041                                         cfs_time_seconds(1), NULL, NULL);
3042                         rc = l_wait_event(svcpt->scp_waitq,
3043                                           svcpt->scp_nrqbds_posted == 0, &lwi);
3044                         if (rc == -ETIMEDOUT) {
3045                                 CWARN("Service %s waiting for "
3046                                       "request buffers\n",
3047                                       svcpt->scp_service->srv_name);
3048                         }
3049                         spin_lock(&svcpt->scp_lock);
3050                 }
3051                 spin_unlock(&svcpt->scp_lock);
3052         }
3053 }
3054
3055 static void
3056 ptlrpc_service_purge_all(struct ptlrpc_service *svc)
3057 {
3058         struct ptlrpc_service_part              *svcpt;
3059         struct ptlrpc_request_buffer_desc       *rqbd;
3060         struct ptlrpc_request                   *req;
3061         struct ptlrpc_reply_state               *rs;
3062         int                                     i;
3063
3064         ptlrpc_service_for_each_part(svcpt, i, svc) {
3065                 if (svcpt->scp_service == NULL)
3066                         break;
3067
3068                 spin_lock(&svcpt->scp_rep_lock);
3069                 while (!list_empty(&svcpt->scp_rep_active)) {
3070                         rs = list_entry(svcpt->scp_rep_active.next,
3071                                             struct ptlrpc_reply_state, rs_list);
3072                         spin_lock(&rs->rs_lock);
3073                         ptlrpc_schedule_difficult_reply(rs);
3074                         spin_unlock(&rs->rs_lock);
3075                 }
3076                 spin_unlock(&svcpt->scp_rep_lock);
3077
3078                 /* purge the request queue.  NB No new replies (rqbds
3079                  * all unlinked) and no service threads, so I'm the only
3080                  * thread noodling the request queue now */
3081                 while (!list_empty(&svcpt->scp_req_incoming)) {
3082                         req = list_entry(svcpt->scp_req_incoming.next,
3083                                              struct ptlrpc_request, rq_list);
3084
3085                         list_del(&req->rq_list);
3086                         svcpt->scp_nreqs_incoming--;
3087                         ptlrpc_server_finish_request(svcpt, req);
3088                 }
3089
3090                 while (ptlrpc_server_request_pending(svcpt, true)) {
3091                         req = ptlrpc_server_request_get(svcpt, true);
3092                         ptlrpc_server_finish_active_request(svcpt, req);
3093                 }
3094
3095                 LASSERT(list_empty(&svcpt->scp_rqbd_posted));
3096                 LASSERT(svcpt->scp_nreqs_incoming == 0);
3097                 LASSERT(svcpt->scp_nreqs_active == 0);
3098                 /* history should have been culled by
3099                  * ptlrpc_server_finish_request */
3100                 LASSERT(svcpt->scp_hist_nrqbds == 0);
3101
3102                 /* Now free all the request buffers since nothing
3103                  * references them any more... */
3104
3105                 while (!list_empty(&svcpt->scp_rqbd_idle)) {
3106                         rqbd = list_entry(svcpt->scp_rqbd_idle.next,
3107                                               struct ptlrpc_request_buffer_desc,
3108                                               rqbd_list);
3109                         ptlrpc_free_rqbd(rqbd);
3110                 }
3111                 ptlrpc_wait_replies(svcpt);
3112
3113                 while (!list_empty(&svcpt->scp_rep_idle)) {
3114                         rs = list_entry(svcpt->scp_rep_idle.next,
3115                                             struct ptlrpc_reply_state,
3116                                             rs_list);
3117                         list_del(&rs->rs_list);
3118                         OBD_FREE_LARGE(rs, svc->srv_max_reply_size);
3119                 }
3120         }
3121 }
3122
3123 static void
3124 ptlrpc_service_free(struct ptlrpc_service *svc)
3125 {
3126         struct ptlrpc_service_part      *svcpt;
3127         struct ptlrpc_at_array          *array;
3128         int                             i;
3129
3130         ptlrpc_service_for_each_part(svcpt, i, svc) {
3131                 if (svcpt->scp_service == NULL)
3132                         break;
3133
3134                 /* In case somebody rearmed this in the meantime */
3135                 cfs_timer_disarm(&svcpt->scp_at_timer);
3136                 array = &svcpt->scp_at_array;
3137
3138                 if (array->paa_reqs_array != NULL) {
3139                         OBD_FREE(array->paa_reqs_array,
3140                                  sizeof(struct list_head) * array->paa_size);
3141                         array->paa_reqs_array = NULL;
3142                 }
3143
3144                 if (array->paa_reqs_count != NULL) {
3145                         OBD_FREE(array->paa_reqs_count,
3146                                  sizeof(__u32) * array->paa_size);
3147                         array->paa_reqs_count = NULL;
3148                 }
3149         }
3150
3151         ptlrpc_service_for_each_part(svcpt, i, svc)
3152                 OBD_FREE_PTR(svcpt);
3153
3154         if (svc->srv_cpts != NULL)
3155                 cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts);
3156
3157         OBD_FREE(svc, offsetof(struct ptlrpc_service,
3158                                srv_parts[svc->srv_ncpts]));
3159 }
3160
3161 int ptlrpc_unregister_service(struct ptlrpc_service *service)
3162 {
3163         ENTRY;
3164
3165         CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
3166
3167         service->srv_is_stopping = 1;
3168
3169         mutex_lock(&ptlrpc_all_services_mutex);
3170         list_del_init(&service->srv_list);
3171         mutex_unlock(&ptlrpc_all_services_mutex);
3172
3173         ptlrpc_service_del_atimer(service);
3174         ptlrpc_stop_all_threads(service);
3175
3176         ptlrpc_service_unlink_rqbd(service);
3177         ptlrpc_service_purge_all(service);
3178         ptlrpc_service_nrs_cleanup(service);
3179
3180         ptlrpc_lprocfs_unregister_service(service);
3181
3182         ptlrpc_service_free(service);
3183
3184         RETURN(0);
3185 }
3186 EXPORT_SYMBOL(ptlrpc_unregister_service);
3187
3188 /**
3189  * Returns 0 if the service is healthy.
3190  *
3191  * Right now, it just checks to make sure that requests aren't languishing
3192  * in the queue.  We'll use this health check to govern whether a node needs
3193  * to be shot, so it's intentionally non-aggressive. */
3194 static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
3195 {
3196         struct ptlrpc_request           *request = NULL;
3197         struct timeval                  right_now;
3198         long                            timediff;
3199
3200         do_gettimeofday(&right_now);
3201
3202         spin_lock(&svcpt->scp_req_lock);
3203         /* How long has the next entry been waiting? */
3204         if (ptlrpc_server_high_pending(svcpt, true))
3205                 request = ptlrpc_nrs_req_peek_nolock(svcpt, true);
3206         else if (ptlrpc_server_normal_pending(svcpt, true))
3207                 request = ptlrpc_nrs_req_peek_nolock(svcpt, false);
3208
3209         if (request == NULL) {
3210                 spin_unlock(&svcpt->scp_req_lock);
3211                 return 0;
3212         }
3213
3214         timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
3215         spin_unlock(&svcpt->scp_req_lock);
3216
3217         if ((timediff / ONE_MILLION) >
3218             (AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
3219                 CERROR("%s: unhealthy - request has been waiting %lds\n",
3220                        svcpt->scp_service->srv_name, timediff / ONE_MILLION);
3221                 return -1;
3222         }
3223
3224         return 0;
3225 }
3226
3227 int
3228 ptlrpc_service_health_check(struct ptlrpc_service *svc)
3229 {
3230         struct ptlrpc_service_part      *svcpt;
3231         int                             i;
3232
3233         if (svc == NULL)
3234                 return 0;
3235
3236         ptlrpc_service_for_each_part(svcpt, i, svc) {
3237                 int rc = ptlrpc_svcpt_health_check(svcpt);
3238
3239                 if (rc != 0)
3240                         return rc;
3241         }
3242         return 0;
3243 }
3244 EXPORT_SYMBOL(ptlrpc_service_health_check);