Whamcloud - gitweb
1893f81a9f104ce5e69ca68c537a801f7395faaf
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #ifndef __KERNEL__
39 #include <liblustre.h>
40 #endif
41 #include <obd_support.h>
42 #include <obd_class.h>
43 #include <lustre_net.h>
44 #include <lu_object.h>
45 #include <lnet/types.h>
46 #include "ptlrpc_internal.h"
47
48 /* The following are visible and mutable through /sys/module/ptlrpc */
49 int test_req_buffer_pressure = 0;
50 CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
51                 "set non-zero to put pressure on request buffer pools");
52 CFS_MODULE_PARM(at_min, "i", int, 0644,
53                 "Adaptive timeout minimum (sec)");
54 CFS_MODULE_PARM(at_max, "i", int, 0644,
55                 "Adaptive timeout maximum (sec)");
56 CFS_MODULE_PARM(at_history, "i", int, 0644,
57                 "Adaptive timeouts remember the slowest event that took place "
58                 "within this period (sec)");
59 CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
60                 "How soon before an RPC deadline to send an early reply");
61 CFS_MODULE_PARM(at_extra, "i", int, 0644,
62                 "How much extra time to give with each early reply");
63
64
65 /* forward ref */
66 static int ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc);
67 static void ptlrpc_hpreq_fini(struct ptlrpc_request *req);
68
69 static CFS_LIST_HEAD(ptlrpc_all_services);
70 cfs_spinlock_t ptlrpc_all_services_lock;
71
72 struct ptlrpc_request_buffer_desc *
73 ptlrpc_alloc_rqbd (struct ptlrpc_service *svc)
74 {
75         struct ptlrpc_request_buffer_desc *rqbd;
76
77         OBD_ALLOC_PTR(rqbd);
78         if (rqbd == NULL)
79                 return (NULL);
80
81         rqbd->rqbd_service = svc;
82         rqbd->rqbd_refcount = 0;
83         rqbd->rqbd_cbid.cbid_fn = request_in_callback;
84         rqbd->rqbd_cbid.cbid_arg = rqbd;
85         CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs);
86         OBD_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_buf_size);
87
88         if (rqbd->rqbd_buffer == NULL) {
89                 OBD_FREE_PTR(rqbd);
90                 return (NULL);
91         }
92
93         cfs_spin_lock(&svc->srv_lock);
94         cfs_list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
95         svc->srv_nbufs++;
96         cfs_spin_unlock(&svc->srv_lock);
97
98         return (rqbd);
99 }
100
101 void
102 ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
103 {
104         struct ptlrpc_service *svc = rqbd->rqbd_service;
105
106         LASSERT (rqbd->rqbd_refcount == 0);
107         LASSERT (cfs_list_empty(&rqbd->rqbd_reqs));
108
109         cfs_spin_lock(&svc->srv_lock);
110         cfs_list_del(&rqbd->rqbd_list);
111         svc->srv_nbufs--;
112         cfs_spin_unlock(&svc->srv_lock);
113
114         OBD_FREE_LARGE(rqbd->rqbd_buffer, svc->srv_buf_size);
115         OBD_FREE_PTR(rqbd);
116 }
117
118 int
119 ptlrpc_grow_req_bufs(struct ptlrpc_service *svc)
120 {
121         struct ptlrpc_request_buffer_desc *rqbd;
122         int                                rc = 0;
123         int                                i;
124
125         for (i = 0; i < svc->srv_nbuf_per_group; i++) {
126                 /* NB: another thread might be doing this as well, we need to
127                  * make sure that it wouldn't over-allocate, see LU-1212. */
128                 if (svc->srv_nrqbd_receiving >= svc->srv_nbuf_per_group)
129                         break;
130
131                 rqbd = ptlrpc_alloc_rqbd(svc);
132
133                 if (rqbd == NULL) {
134                         CERROR("%s: Can't allocate request buffer\n",
135                                svc->srv_name);
136                         rc = -ENOMEM;
137                         break;
138                 }
139
140                 if (ptlrpc_server_post_idle_rqbds(svc) < 0) {
141                         rc = -EAGAIN;
142                         break;
143                 }
144         }
145
146         CDEBUG(D_RPCTRACE,
147                "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
148                svc->srv_name, i, svc->srv_buf_size,
149                svc->srv_nrqbd_receiving, svc->srv_nbufs, rc);
150
151         return rc;
152 }
153
154 /**
155  * Part of Rep-Ack logic.
156  * Puts a lock and its mode into reply state assotiated to request reply.
157  */
158 void
159 ptlrpc_save_lock(struct ptlrpc_request *req,
160                  struct lustre_handle *lock, int mode, int no_ack)
161 {
162         struct ptlrpc_reply_state *rs = req->rq_reply_state;
163         int                        idx;
164
165         LASSERT(rs != NULL);
166         LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
167
168         if (req->rq_export->exp_disconnected) {
169                 ldlm_lock_decref(lock, mode);
170         } else {
171                 idx = rs->rs_nlocks++;
172                 rs->rs_locks[idx] = *lock;
173                 rs->rs_modes[idx] = mode;
174                 rs->rs_difficult = 1;
175                 rs->rs_no_ack = !!no_ack;
176         }
177 }
178
179 #ifdef __KERNEL__
180
181 #define HRT_RUNNING 0
182 #define HRT_STOPPING 1
183
184 struct ptlrpc_hr_thread {
185         cfs_spinlock_t        hrt_lock;
186         unsigned long         hrt_flags;
187         cfs_waitq_t           hrt_wait;
188         cfs_list_t            hrt_queue;
189         cfs_completion_t      hrt_completion;
190 };
191
192 struct ptlrpc_hr_service {
193         int                     hr_index;
194         int                     hr_n_threads;
195         int                     hr_size;
196         struct ptlrpc_hr_thread hr_threads[0];
197 };
198
199 struct rs_batch {
200         cfs_list_t              rsb_replies;
201         struct ptlrpc_service  *rsb_svc;
202         unsigned int            rsb_n_replies;
203 };
204
205 /**
206  *  A pointer to per-node reply handling service.
207  */
208 static struct ptlrpc_hr_service *ptlrpc_hr = NULL;
209
210 /**
211  * maximum mumber of replies scheduled in one batch
212  */
213 #define MAX_SCHEDULED 256
214
215 /**
216  * Initialize a reply batch.
217  *
218  * \param b batch
219  */
220 static void rs_batch_init(struct rs_batch *b)
221 {
222         memset(b, 0, sizeof *b);
223         CFS_INIT_LIST_HEAD(&b->rsb_replies);
224 }
225
226 /**
227  * Choose an hr thread to dispatch requests to.
228  */
229 static unsigned int get_hr_thread_index(struct ptlrpc_hr_service *hr)
230 {
231         unsigned int idx;
232
233         /* Concurrent modification of hr_index w/o any spinlock
234            protection is harmless as long as the result fits
235            [0..(hr_n_threads-1)] range and each thread gets near equal
236            load. */
237         idx = hr->hr_index;
238         hr->hr_index = (idx >= hr->hr_n_threads - 1) ? 0 : idx + 1;
239         return idx;
240 }
241
242 /**
243  * Dispatch all replies accumulated in the batch to one from
244  * dedicated reply handling threads.
245  *
246  * \param b batch
247  */
248 static void rs_batch_dispatch(struct rs_batch *b)
249 {
250         if (b->rsb_n_replies != 0) {
251                 struct ptlrpc_hr_service *hr = ptlrpc_hr;
252                 int idx;
253
254                 idx = get_hr_thread_index(hr);
255
256                 cfs_spin_lock(&hr->hr_threads[idx].hrt_lock);
257                 cfs_list_splice_init(&b->rsb_replies,
258                                      &hr->hr_threads[idx].hrt_queue);
259                 cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock);
260                 cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
261                 b->rsb_n_replies = 0;
262         }
263 }
264
265 /**
266  * Add a reply to a batch.
267  * Add one reply object to a batch, schedule batched replies if overload.
268  *
269  * \param b batch
270  * \param rs reply
271  */
272 static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
273 {
274         struct ptlrpc_service *svc = rs->rs_service;
275
276         if (svc != b->rsb_svc || b->rsb_n_replies >= MAX_SCHEDULED) {
277                 if (b->rsb_svc != NULL) {
278                         rs_batch_dispatch(b);
279                         cfs_spin_unlock(&b->rsb_svc->srv_rs_lock);
280                 }
281                 cfs_spin_lock(&svc->srv_rs_lock);
282                 b->rsb_svc = svc;
283         }
284         cfs_spin_lock(&rs->rs_lock);
285         rs->rs_scheduled_ever = 1;
286         if (rs->rs_scheduled == 0) {
287                 cfs_list_move(&rs->rs_list, &b->rsb_replies);
288                 rs->rs_scheduled = 1;
289                 b->rsb_n_replies++;
290         }
291         rs->rs_committed = 1;
292         cfs_spin_unlock(&rs->rs_lock);
293 }
294
295 /**
296  * Reply batch finalization.
297  * Dispatch remaining replies from the batch
298  * and release remaining spinlock.
299  *
300  * \param b batch
301  */
302 static void rs_batch_fini(struct rs_batch *b)
303 {
304         if (b->rsb_svc != 0) {
305                 rs_batch_dispatch(b);
306                 cfs_spin_unlock(&b->rsb_svc->srv_rs_lock);
307         }
308 }
309
310 #define DECLARE_RS_BATCH(b)     struct rs_batch b
311
312 #else /* __KERNEL__ */
313
314 #define rs_batch_init(b)        do{}while(0)
315 #define rs_batch_fini(b)        do{}while(0)
316 #define rs_batch_add(b, r)      ptlrpc_schedule_difficult_reply(r)
317 #define DECLARE_RS_BATCH(b)
318
319 #endif /* __KERNEL__ */
320
321 /**
322  * Put reply state into a queue for processing because we received
323  * ACK from the client
324  */
325 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
326 {
327 #ifdef __KERNEL__
328         struct ptlrpc_hr_service *hr = ptlrpc_hr;
329         int idx;
330         ENTRY;
331
332         LASSERT(cfs_list_empty(&rs->rs_list));
333
334         idx = get_hr_thread_index(hr);
335         cfs_spin_lock(&hr->hr_threads[idx].hrt_lock);
336         cfs_list_add_tail(&rs->rs_list, &hr->hr_threads[idx].hrt_queue);
337         cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock);
338         cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
339         EXIT;
340 #else
341         cfs_list_add_tail(&rs->rs_list, &rs->rs_service->srv_reply_queue);
342 #endif
343 }
344
345 void
346 ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs)
347 {
348         ENTRY;
349
350         LASSERT_SPIN_LOCKED(&rs->rs_service->srv_rs_lock);
351         LASSERT_SPIN_LOCKED(&rs->rs_lock);
352         LASSERT (rs->rs_difficult);
353         rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
354
355         if (rs->rs_scheduled) {     /* being set up or already notified */
356                 EXIT;
357                 return;
358         }
359
360         rs->rs_scheduled = 1;
361         cfs_list_del_init(&rs->rs_list);
362         ptlrpc_dispatch_difficult_reply(rs);
363         EXIT;
364 }
365
366 void ptlrpc_commit_replies(struct obd_export *exp)
367 {
368         struct ptlrpc_reply_state *rs, *nxt;
369         DECLARE_RS_BATCH(batch);
370         ENTRY;
371
372         rs_batch_init(&batch);
373         /* Find any replies that have been committed and get their service
374          * to attend to complete them. */
375
376         /* CAVEAT EMPTOR: spinlock ordering!!! */
377         cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
378         cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
379                                      rs_obd_list) {
380                 LASSERT (rs->rs_difficult);
381                 /* VBR: per-export last_committed */
382                 LASSERT(rs->rs_export);
383                 if (rs->rs_transno <= exp->exp_last_committed) {
384                         cfs_list_del_init(&rs->rs_obd_list);
385                         rs_batch_add(&batch, rs);
386                 }
387         }
388         cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
389         rs_batch_fini(&batch);
390         EXIT;
391 }
392
393 static int
394 ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc)
395 {
396         struct ptlrpc_request_buffer_desc *rqbd;
397         int                                rc;
398         int                                posted = 0;
399
400         for (;;) {
401                 cfs_spin_lock(&svc->srv_lock);
402
403                 if (cfs_list_empty (&svc->srv_idle_rqbds)) {
404                         cfs_spin_unlock(&svc->srv_lock);
405                         return (posted);
406                 }
407
408                 rqbd = cfs_list_entry(svc->srv_idle_rqbds.next,
409                                       struct ptlrpc_request_buffer_desc,
410                                       rqbd_list);
411                 cfs_list_del (&rqbd->rqbd_list);
412
413                 /* assume we will post successfully */
414                 svc->srv_nrqbd_receiving++;
415                 cfs_list_add (&rqbd->rqbd_list, &svc->srv_active_rqbds);
416
417                 cfs_spin_unlock(&svc->srv_lock);
418
419                 rc = ptlrpc_register_rqbd(rqbd);
420                 if (rc != 0)
421                         break;
422
423                 posted = 1;
424         }
425
426         cfs_spin_lock(&svc->srv_lock);
427
428         svc->srv_nrqbd_receiving--;
429         cfs_list_del(&rqbd->rqbd_list);
430         cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
431
432         /* Don't complain if no request buffers are posted right now; LNET
433          * won't drop requests because we set the portal lazy! */
434
435         cfs_spin_unlock(&svc->srv_lock);
436
437         return (-1);
438 }
439
440 /**
441  * Start a service with parameters from struct ptlrpc_service_conf \a c
442  * as opposed to directly calling ptlrpc_init_svc with tons of arguments.
443  */
444 struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
445                                             svc_handler_t h, char *name,
446                                             struct proc_dir_entry *proc_entry,
447                                             svc_req_printfn_t prntfn,
448                                             char *threadname)
449 {
450         return ptlrpc_init_svc(c->psc_nbufs, c->psc_bufsize,
451                                c->psc_max_req_size, c->psc_max_reply_size,
452                                c->psc_req_portal, c->psc_rep_portal,
453                                c->psc_watchdog_factor,
454                                h, name, proc_entry,
455                                prntfn, c->psc_min_threads, c->psc_max_threads,
456                                threadname, c->psc_ctx_tags, NULL);
457 }
458 EXPORT_SYMBOL(ptlrpc_init_svc_conf);
459
460 static void ptlrpc_at_timer(unsigned long castmeharder)
461 {
462         struct ptlrpc_service *svc = (struct ptlrpc_service *)castmeharder;
463         svc->srv_at_check = 1;
464         svc->srv_at_checktime = cfs_time_current();
465         cfs_waitq_signal(&svc->srv_waitq);
466 }
467
468 /**
469  * Initialize service on a given portal.
470  * This includes starting serving threads , allocating and posting rqbds and
471  * so on.
472  * \a nbufs is how many buffers to post
473  * \a bufsize is buffer size to post
474  * \a max_req_size - maximum request size to be accepted for this service
475  * \a max_reply_size maximum reply size this service can ever send
476  * \a req_portal - portal to listed for requests on
477  * \a rep_portal - portal of where to send replies to
478  * \a watchdog_factor soft watchdog timeout multiplifier to print stuck service traces.
479  * \a handler - function to process every new request
480  * \a name - service name
481  * \a proc_entry - entry in the /proc tree for sttistics reporting
482  * \a min_threads \a max_threads - min/max number of service threads to start.
483  * \a threadname should be 11 characters or less - 3 will be added on
484  * \a hp_handler - function to determine priority of the request, also called
485  *                 on every new request.
486  */
487 struct ptlrpc_service *
488 ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size,
489                 int req_portal, int rep_portal, int watchdog_factor,
490                 svc_handler_t handler, char *name,
491                 cfs_proc_dir_entry_t *proc_entry,
492                 svc_req_printfn_t svcreq_printfn,
493                 int min_threads, int max_threads,
494                 char *threadname, __u32 ctx_tags,
495                 svc_hpreq_handler_t hp_handler)
496 {
497         int                     rc;
498         struct ptlrpc_at_array *array;
499         struct ptlrpc_service  *service;
500         unsigned int            size, index;
501         ENTRY;
502
503         LASSERT (nbufs > 0);
504         LASSERT (bufsize >= max_req_size + SPTLRPC_MAX_PAYLOAD);
505         LASSERT (ctx_tags != 0);
506
507         OBD_ALLOC_PTR(service);
508         if (service == NULL)
509                 RETURN(NULL);
510
511         /* First initialise enough for early teardown */
512
513         service->srv_name = name;
514         cfs_spin_lock_init(&service->srv_lock);
515         cfs_spin_lock_init(&service->srv_rq_lock);
516         cfs_spin_lock_init(&service->srv_rs_lock);
517         CFS_INIT_LIST_HEAD(&service->srv_threads);
518         cfs_waitq_init(&service->srv_waitq);
519
520         service->srv_nbuf_per_group = test_req_buffer_pressure ? 1 : nbufs;
521         service->srv_max_req_size = max_req_size + SPTLRPC_MAX_PAYLOAD;
522         service->srv_buf_size = bufsize;
523         service->srv_rep_portal = rep_portal;
524         service->srv_req_portal = req_portal;
525         service->srv_watchdog_factor = watchdog_factor;
526         service->srv_handler = handler;
527         service->srv_req_printfn = svcreq_printfn;
528         service->srv_request_seq = 1;           /* valid seq #s start at 1 */
529         service->srv_request_max_cull_seq = 0;
530         service->srv_threads_min = min_threads;
531         service->srv_threads_max = max_threads;
532         service->srv_thread_name = threadname;
533         service->srv_ctx_tags = ctx_tags;
534         service->srv_hpreq_handler = hp_handler;
535         service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO;
536         service->srv_hpreq_count = 0;
537         service->srv_n_active_hpreq = 0;
538
539         rc = LNetSetLazyPortal(service->srv_req_portal);
540         LASSERT (rc == 0);
541
542         CFS_INIT_LIST_HEAD(&service->srv_request_queue);
543         CFS_INIT_LIST_HEAD(&service->srv_request_hpq);
544         CFS_INIT_LIST_HEAD(&service->srv_idle_rqbds);
545         CFS_INIT_LIST_HEAD(&service->srv_active_rqbds);
546         CFS_INIT_LIST_HEAD(&service->srv_history_rqbds);
547         CFS_INIT_LIST_HEAD(&service->srv_request_history);
548         CFS_INIT_LIST_HEAD(&service->srv_active_replies);
549 #ifndef __KERNEL__
550         CFS_INIT_LIST_HEAD(&service->srv_reply_queue);
551 #endif
552         CFS_INIT_LIST_HEAD(&service->srv_free_rs_list);
553         cfs_waitq_init(&service->srv_free_rs_waitq);
554         cfs_atomic_set(&service->srv_n_difficult_replies, 0);
555
556         cfs_spin_lock_init(&service->srv_at_lock);
557         CFS_INIT_LIST_HEAD(&service->srv_req_in_queue);
558
559         array = &service->srv_at_array;
560         size = at_est2timeout(at_max);
561         array->paa_size = size;
562         array->paa_count = 0;
563         array->paa_deadline = -1;
564
565         /* allocate memory for srv_at_array (ptlrpc_at_array) */
566         OBD_ALLOC(array->paa_reqs_array, sizeof(cfs_list_t) * size);
567         if (array->paa_reqs_array == NULL)
568                 GOTO(failed, NULL);
569
570         for (index = 0; index < size; index++)
571                 CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
572
573         OBD_ALLOC(array->paa_reqs_count, sizeof(__u32) * size);
574         if (array->paa_reqs_count == NULL)
575                 GOTO(failed, NULL);
576
577         cfs_timer_init(&service->srv_at_timer, ptlrpc_at_timer, service);
578         /* At SOW, service time should be quick; 10s seems generous. If client
579            timeout is less than this, we'll be sending an early reply. */
580         at_init(&service->srv_at_estimate, 10, 0);
581
582         cfs_spin_lock (&ptlrpc_all_services_lock);
583         cfs_list_add (&service->srv_list, &ptlrpc_all_services);
584         cfs_spin_unlock (&ptlrpc_all_services_lock);
585
586         /* Now allocate the request buffers */
587         rc = ptlrpc_grow_req_bufs(service);
588         /* We shouldn't be under memory pressure at startup, so
589          * fail if we can't post all our buffers at this time. */
590         if (rc != 0)
591                 GOTO(failed, NULL);
592
593         /* Now allocate pool of reply buffers */
594         /* Increase max reply size to next power of two */
595         service->srv_max_reply_size = 1;
596         while (service->srv_max_reply_size <
597                max_reply_size + SPTLRPC_MAX_PAYLOAD)
598                 service->srv_max_reply_size <<= 1;
599
600         if (proc_entry != NULL)
601                 ptlrpc_lprocfs_register_service(proc_entry, service);
602
603         CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
604                service->srv_name, service->srv_req_portal);
605
606         RETURN(service);
607 failed:
608         ptlrpc_unregister_service(service);
609         return NULL;
610 }
611
612 /**
613  * to actually free the request, must be called without holding svc_lock.
614  * note it's caller's responsibility to unlink req->rq_list.
615  */
616 static void ptlrpc_server_free_request(struct ptlrpc_request *req)
617 {
618         LASSERT(cfs_atomic_read(&req->rq_refcount) == 0);
619         LASSERT(cfs_list_empty(&req->rq_timed_list));
620
621          /* DEBUG_REQ() assumes the reply state of a request with a valid
622           * ref will not be destroyed until that reference is dropped. */
623         ptlrpc_req_drop_rs(req);
624
625         sptlrpc_svc_ctx_decref(req);
626
627         if (req != &req->rq_rqbd->rqbd_req) {
628                 /* NB request buffers use an embedded
629                  * req if the incoming req unlinked the
630                  * MD; this isn't one of them! */
631                 OBD_FREE(req, sizeof(*req));
632         }
633 }
634
635 /**
636  * drop a reference count of the request. if it reaches 0, we either
637  * put it into history list, or free it immediately.
638  */
639 void ptlrpc_server_drop_request(struct ptlrpc_request *req)
640 {
641         struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
642         struct ptlrpc_service             *svc = rqbd->rqbd_service;
643         int                                refcount;
644         cfs_list_t                        *tmp;
645         cfs_list_t                        *nxt;
646
647         if (!cfs_atomic_dec_and_test(&req->rq_refcount))
648                 return;
649
650         cfs_spin_lock(&svc->srv_at_lock);
651         if (req->rq_at_linked) {
652                 struct ptlrpc_at_array *array = &svc->srv_at_array;
653                 __u32 index = req->rq_at_index;
654
655                 LASSERT(!cfs_list_empty(&req->rq_timed_list));
656                 cfs_list_del_init(&req->rq_timed_list);
657                 cfs_spin_lock(&req->rq_lock);
658                 req->rq_at_linked = 0;
659                 cfs_spin_unlock(&req->rq_lock);
660                 array->paa_reqs_count[index]--;
661                 array->paa_count--;
662         } else
663                 LASSERT(cfs_list_empty(&req->rq_timed_list));
664         cfs_spin_unlock(&svc->srv_at_lock);
665
666         /* finalize request */
667         if (req->rq_export) {
668                 class_export_put(req->rq_export);
669                 req->rq_export = NULL;
670         }
671
672         cfs_spin_lock(&svc->srv_lock);
673
674         cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
675
676         refcount = --(rqbd->rqbd_refcount);
677         if (refcount == 0) {
678                 /* request buffer is now idle: add to history */
679                 cfs_list_del(&rqbd->rqbd_list);
680                 cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_history_rqbds);
681                 svc->srv_n_history_rqbds++;
682
683                 /* cull some history?
684                  * I expect only about 1 or 2 rqbds need to be recycled here */
685                 while (svc->srv_n_history_rqbds > svc->srv_max_history_rqbds) {
686                         rqbd = cfs_list_entry(svc->srv_history_rqbds.next,
687                                               struct ptlrpc_request_buffer_desc,
688                                               rqbd_list);
689
690                         cfs_list_del(&rqbd->rqbd_list);
691                         svc->srv_n_history_rqbds--;
692
693                         /* remove rqbd's reqs from svc's req history while
694                          * I've got the service lock */
695                         cfs_list_for_each(tmp, &rqbd->rqbd_reqs) {
696                                 req = cfs_list_entry(tmp, struct ptlrpc_request,
697                                                      rq_list);
698                                 /* Track the highest culled req seq */
699                                 if (req->rq_history_seq >
700                                     svc->srv_request_max_cull_seq)
701                                         svc->srv_request_max_cull_seq =
702                                                 req->rq_history_seq;
703                                 cfs_list_del(&req->rq_history_list);
704                         }
705
706                         cfs_spin_unlock(&svc->srv_lock);
707
708                         cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
709                                 req = cfs_list_entry(rqbd->rqbd_reqs.next,
710                                                      struct ptlrpc_request,
711                                                      rq_list);
712                                 cfs_list_del(&req->rq_list);
713                                 ptlrpc_server_free_request(req);
714                         }
715
716                         cfs_spin_lock(&svc->srv_lock);
717                         /*
718                          * now all reqs including the embedded req has been
719                          * disposed, schedule request buffer for re-use.
720                          */
721                         LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) ==
722                                 0);
723                         cfs_list_add_tail(&rqbd->rqbd_list,
724                                           &svc->srv_idle_rqbds);
725                 }
726
727                 cfs_spin_unlock(&svc->srv_lock);
728         } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
729                 /* If we are low on memory, we are not interested in history */
730                 cfs_list_del(&req->rq_list);
731                 cfs_list_del_init(&req->rq_history_list);
732                 cfs_spin_unlock(&svc->srv_lock);
733
734                 ptlrpc_server_free_request(req);
735         } else {
736                 cfs_spin_unlock(&svc->srv_lock);
737         }
738 }
739
740 /**
741  * to finish a request: stop sending more early replies, and release
742  * the request. should be called after we finished handling the request.
743  */
744 static void ptlrpc_server_finish_request(struct ptlrpc_service *svc,
745                                          struct ptlrpc_request *req)
746 {
747         ptlrpc_hpreq_fini(req);
748
749         cfs_spin_lock(&svc->srv_rq_lock);
750         svc->srv_n_active_reqs--;
751         if (req->rq_hp)
752                 svc->srv_n_active_hpreq--;
753         cfs_spin_unlock(&svc->srv_rq_lock);
754
755         ptlrpc_server_drop_request(req);
756 }
757
758 /**
759  * This function makes sure dead exports are evicted in a timely manner.
760  * This function is only called when some export receives a message (i.e.,
761  * the network is up.)
762  */
763 static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
764 {
765         struct obd_export *oldest_exp;
766         time_t oldest_time, new_time;
767
768         ENTRY;
769
770         LASSERT(exp);
771
772         /* Compensate for slow machines, etc, by faking our request time
773            into the future.  Although this can break the strict time-ordering
774            of the list, we can be really lazy here - we don't have to evict
775            at the exact right moment.  Eventually, all silent exports
776            will make it to the top of the list. */
777
778         /* Do not pay attention on 1sec or smaller renewals. */
779         new_time = cfs_time_current_sec() + extra_delay;
780         if (exp->exp_last_request_time + 1 /*second */ >= new_time)
781                 RETURN_EXIT;
782
783         exp->exp_last_request_time = new_time;
784         CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n",
785                exp->exp_client_uuid.uuid,
786                exp->exp_last_request_time, exp);
787
788         /* exports may get disconnected from the chain even though the
789            export has references, so we must keep the spin lock while
790            manipulating the lists */
791         cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
792
793         if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
794                 /* this one is not timed */
795                 cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
796                 RETURN_EXIT;
797         }
798
799         cfs_list_move_tail(&exp->exp_obd_chain_timed,
800                            &exp->exp_obd->obd_exports_timed);
801
802         oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
803                                     struct obd_export, exp_obd_chain_timed);
804         oldest_time = oldest_exp->exp_last_request_time;
805         cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
806
807         if (exp->exp_obd->obd_recovering) {
808                 /* be nice to everyone during recovery */
809                 EXIT;
810                 return;
811         }
812
813         /* Note - racing to start/reset the obd_eviction timer is safe */
814         if (exp->exp_obd->obd_eviction_timer == 0) {
815                 /* Check if the oldest entry is expired. */
816                 if (cfs_time_current_sec() > (oldest_time + PING_EVICT_TIMEOUT +
817                                               extra_delay)) {
818                         /* We need a second timer, in case the net was down and
819                          * it just came back. Since the pinger may skip every
820                          * other PING_INTERVAL (see note in ptlrpc_pinger_main),
821                          * we better wait for 3. */
822                         exp->exp_obd->obd_eviction_timer =
823                                 cfs_time_current_sec() + 3 * PING_INTERVAL;
824                         CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
825                                exp->exp_obd->obd_name, 
826                                obd_export_nid2str(oldest_exp), oldest_time);
827                 }
828         } else {
829                 if (cfs_time_current_sec() >
830                     (exp->exp_obd->obd_eviction_timer + extra_delay)) {
831                         /* The evictor won't evict anyone who we've heard from
832                          * recently, so we don't have to check before we start
833                          * it. */
834                         if (!ping_evictor_wake(exp))
835                                 exp->exp_obd->obd_eviction_timer = 0;
836                 }
837         }
838
839         EXIT;
840 }
841
842 /**
843  * Sanity check request \a req.
844  * Return 0 if all is ok, error code otherwise.
845  */
846 static int ptlrpc_check_req(struct ptlrpc_request *req)
847 {
848         int rc = 0;
849
850         if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
851                      req->rq_export->exp_conn_cnt)) {
852                 DEBUG_REQ(D_ERROR, req,
853                           "DROPPING req from old connection %d < %d",
854                           lustre_msg_get_conn_cnt(req->rq_reqmsg),
855                           req->rq_export->exp_conn_cnt);
856                 return -EEXIST;
857         }
858         if (unlikely(req->rq_export->exp_obd &&
859                      req->rq_export->exp_obd->obd_fail)) {
860              /* Failing over, don't handle any more reqs, send
861                 error response instead. */
862                 CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
863                        req, req->rq_export->exp_obd->obd_name);
864                 rc = -ENODEV;
865         } else if (lustre_msg_get_flags(req->rq_reqmsg) &
866                    (MSG_REPLAY | MSG_REQ_REPLAY_DONE) &&
867                    !(req->rq_export->exp_obd->obd_recovering)) {
868                         DEBUG_REQ(D_ERROR, req,
869                                   "Invalid replay without recovery");
870                         class_fail_export(req->rq_export);
871                         rc = -ENODEV;
872         } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 &&
873                    !(req->rq_export->exp_obd->obd_recovering)) {
874                         DEBUG_REQ(D_ERROR, req, "Invalid req with transno "
875                                   LPU64" without recovery",
876                                   lustre_msg_get_transno(req->rq_reqmsg));
877                         class_fail_export(req->rq_export);
878                         rc = -ENODEV;
879         }
880
881         if (unlikely(rc < 0)) {
882                 req->rq_status = rc;
883                 ptlrpc_error(req);
884         }
885         return rc;
886 }
887
888 static void ptlrpc_at_set_timer(struct ptlrpc_service *svc)
889 {
890         struct ptlrpc_at_array *array = &svc->srv_at_array;
891         __s32 next;
892
893         cfs_spin_lock(&svc->srv_at_lock);
894         if (array->paa_count == 0) {
895                 cfs_timer_disarm(&svc->srv_at_timer);
896                 cfs_spin_unlock(&svc->srv_at_lock);
897                 return;
898         }
899
900         /* Set timer for closest deadline */
901         next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
902                        at_early_margin);
903         if (next <= 0)
904                 ptlrpc_at_timer((unsigned long)svc);
905         else
906                 cfs_timer_arm(&svc->srv_at_timer, cfs_time_shift(next));
907         cfs_spin_unlock(&svc->srv_at_lock);
908         CDEBUG(D_INFO, "armed %s at %+ds\n", svc->srv_name, next);
909 }
910
911 /* Add rpc to early reply check list */
912 static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
913 {
914         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
915         struct ptlrpc_request *rq = NULL;
916         struct ptlrpc_at_array *array = &svc->srv_at_array;
917         __u32 index;
918         int found = 0;
919
920         if (AT_OFF)
921                 return(0);
922
923         if (req->rq_no_reply)
924                 return 0;
925
926         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
927                 return(-ENOSYS);
928
929         cfs_spin_lock(&svc->srv_at_lock);
930         LASSERT(cfs_list_empty(&req->rq_timed_list));
931
932         index = (unsigned long)req->rq_deadline % array->paa_size;
933         if (array->paa_reqs_count[index] > 0) {
934                 /* latest rpcs will have the latest deadlines in the list,
935                  * so search backward. */
936                 cfs_list_for_each_entry_reverse(rq,
937                                                 &array->paa_reqs_array[index],
938                                                 rq_timed_list) {
939                         if (req->rq_deadline >= rq->rq_deadline) {
940                                 cfs_list_add(&req->rq_timed_list,
941                                              &rq->rq_timed_list);
942                                 break;
943                         }
944                 }
945         }
946
947         /* Add the request at the head of the list */
948         if (cfs_list_empty(&req->rq_timed_list))
949                 cfs_list_add(&req->rq_timed_list,
950                              &array->paa_reqs_array[index]);
951
952         cfs_spin_lock(&req->rq_lock);
953         req->rq_at_linked = 1;
954         cfs_spin_unlock(&req->rq_lock);
955         req->rq_at_index = index;
956         array->paa_reqs_count[index]++;
957         array->paa_count++;
958         if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
959                 array->paa_deadline = req->rq_deadline;
960                 found = 1;
961         }
962         cfs_spin_unlock(&svc->srv_at_lock);
963
964         if (found)
965                 ptlrpc_at_set_timer(svc);
966
967         return 0;
968 }
969
970 static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
971 {
972         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
973         struct ptlrpc_request *reqcopy;
974         struct lustre_msg *reqmsg;
975         cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
976         time_t newdl;
977         int rc;
978         ENTRY;
979
980         /* deadline is when the client expects us to reply, margin is the
981            difference between clients' and servers' expectations */
982         DEBUG_REQ(D_ADAPTTO, req,
983                   "%ssending early reply (deadline %+lds, margin %+lds) for "
984                   "%d+%d", AT_OFF ? "AT off - not " : "",
985                   olddl, olddl - at_get(&svc->srv_at_estimate),
986                   at_get(&svc->srv_at_estimate), at_extra);
987
988         if (AT_OFF)
989                 RETURN(0);
990
991         if (olddl < 0) {
992                 DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
993                           "not sending early reply. Consider increasing "
994                           "at_early_margin (%d)?", olddl, at_early_margin);
995
996                 /* Return an error so we're not re-added to the timed list. */
997                 RETURN(-ETIMEDOUT);
998         }
999
1000         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
1001                 DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
1002                           "but no AT support");
1003                 RETURN(-ENOSYS);
1004         }
1005
1006         if (req->rq_export &&
1007             lustre_msg_get_flags(req->rq_reqmsg) &
1008             (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
1009                 /* During recovery, we don't want to send too many early
1010                  * replies, but on the other hand we want to make sure the
1011                  * client has enough time to resend if the rpc is lost. So
1012                  * during the recovery period send at least 4 early replies,
1013                  * spacing them every at_extra if we can. at_estimate should
1014                  * always equal this fixed value during recovery. */
1015                 at_measured(&svc->srv_at_estimate, min(at_extra,
1016                             req->rq_export->exp_obd->obd_recovery_timeout / 4));
1017         } else {
1018                 /* Fake our processing time into the future to ask the clients
1019                  * for some extra amount of time */
1020                 at_measured(&svc->srv_at_estimate, at_extra +
1021                             cfs_time_current_sec() -
1022                             req->rq_arrival_time.tv_sec);
1023
1024                 /* Check to see if we've actually increased the deadline -
1025                  * we may be past adaptive_max */
1026                 if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
1027                     at_get(&svc->srv_at_estimate)) {
1028                         DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
1029                                   "(%ld/%ld), not sending early reply\n",
1030                                   olddl, req->rq_arrival_time.tv_sec +
1031                                   at_get(&svc->srv_at_estimate) -
1032                                   cfs_time_current_sec());
1033                         RETURN(-ETIMEDOUT);
1034                 }
1035         }
1036         newdl = cfs_time_current_sec() + at_get(&svc->srv_at_estimate);
1037
1038         OBD_ALLOC(reqcopy, sizeof *reqcopy);
1039         if (reqcopy == NULL)
1040                 RETURN(-ENOMEM);
1041         OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
1042         if (!reqmsg) {
1043                 OBD_FREE(reqcopy, sizeof *reqcopy);
1044                 RETURN(-ENOMEM);
1045         }
1046
1047         *reqcopy = *req;
1048         reqcopy->rq_reply_state = NULL;
1049         reqcopy->rq_rep_swab_mask = 0;
1050         reqcopy->rq_pack_bulk = 0;
1051         reqcopy->rq_pack_udesc = 0;
1052         reqcopy->rq_packed_final = 0;
1053         sptlrpc_svc_ctx_addref(reqcopy);
1054         /* We only need the reqmsg for the magic */
1055         reqcopy->rq_reqmsg = reqmsg;
1056         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
1057
1058         LASSERT(cfs_atomic_read(&req->rq_refcount));
1059         /** if it is last refcount then early reply isn't needed */
1060         if (cfs_atomic_read(&req->rq_refcount) == 1) {
1061                 DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
1062                           "abort sending early reply\n");
1063                 GOTO(out, rc = -EINVAL);
1064         }
1065
1066         /* Connection ref */
1067         reqcopy->rq_export = class_conn2export(
1068                                      lustre_msg_get_handle(reqcopy->rq_reqmsg));
1069         if (reqcopy->rq_export == NULL)
1070                 GOTO(out, rc = -ENODEV);
1071
1072         /* RPC ref */
1073         class_export_rpc_get(reqcopy->rq_export);
1074         if (reqcopy->rq_export->exp_obd &&
1075             reqcopy->rq_export->exp_obd->obd_fail)
1076                 GOTO(out_put, rc = -ENODEV);
1077
1078         rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
1079         if (rc)
1080                 GOTO(out_put, rc);
1081
1082         rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
1083
1084         if (!rc) {
1085                 /* Adjust our own deadline to what we told the client */
1086                 req->rq_deadline = newdl;
1087                 req->rq_early_count++; /* number sent, server side */
1088         } else {
1089                 DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
1090         }
1091
1092         /* Free the (early) reply state from lustre_pack_reply.
1093            (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
1094         ptlrpc_req_drop_rs(reqcopy);
1095
1096 out_put:
1097         class_export_rpc_put(reqcopy->rq_export);
1098         class_export_put(reqcopy->rq_export);
1099 out:
1100         sptlrpc_svc_ctx_decref(reqcopy);
1101         OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
1102         OBD_FREE(reqcopy, sizeof *reqcopy);
1103         RETURN(rc);
1104 }
1105
1106 /* Send early replies to everybody expiring within at_early_margin
1107    asking for at_extra time */
1108 static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
1109 {
1110         struct ptlrpc_request *rq, *n;
1111         cfs_list_t work_list;
1112         struct ptlrpc_at_array *array = &svc->srv_at_array;
1113         __u32  index, count;
1114         time_t deadline;
1115         time_t now = cfs_time_current_sec();
1116         cfs_duration_t delay;
1117         int first, counter = 0;
1118         ENTRY;
1119
1120         cfs_spin_lock(&svc->srv_at_lock);
1121         if (svc->srv_at_check == 0) {
1122                 cfs_spin_unlock(&svc->srv_at_lock);
1123                 RETURN(0);
1124         }
1125         delay = cfs_time_sub(cfs_time_current(), svc->srv_at_checktime);
1126         svc->srv_at_check = 0;
1127
1128         if (array->paa_count == 0) {
1129                 cfs_spin_unlock(&svc->srv_at_lock);
1130                 RETURN(0);
1131         }
1132
1133         /* The timer went off, but maybe the nearest rpc already completed. */
1134         first = array->paa_deadline - now;
1135         if (first > at_early_margin) {
1136                 /* We've still got plenty of time.  Reset the timer. */
1137                 cfs_spin_unlock(&svc->srv_at_lock);
1138                 ptlrpc_at_set_timer(svc);
1139                 RETURN(0);
1140         }
1141
1142         /* We're close to a timeout, and we don't know how much longer the
1143            server will take. Send early replies to everyone expiring soon. */
1144         CFS_INIT_LIST_HEAD(&work_list);
1145         deadline = -1;
1146         index = (unsigned long)array->paa_deadline % array->paa_size;
1147         count = array->paa_count;
1148         while (count > 0) {
1149                 count -= array->paa_reqs_count[index];
1150                 cfs_list_for_each_entry_safe(rq, n,
1151                                              &array->paa_reqs_array[index],
1152                                              rq_timed_list) {
1153                         if (rq->rq_deadline <= now + at_early_margin) {
1154                                 cfs_list_del_init(&rq->rq_timed_list);
1155                                 /**
1156                                  * ptlrpc_server_drop_request() may drop
1157                                  * refcount to 0 already. Let's check this and
1158                                  * don't add entry to work_list
1159                                  */
1160                                 if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount)))
1161                                         cfs_list_add(&rq->rq_timed_list, &work_list);
1162                                 counter++;
1163                                 array->paa_reqs_count[index]--;
1164                                 array->paa_count--;
1165                                 cfs_spin_lock(&rq->rq_lock);
1166                                 rq->rq_at_linked = 0;
1167                                 cfs_spin_unlock(&rq->rq_lock);
1168                                 continue;
1169                         }
1170
1171                         /* update the earliest deadline */
1172                         if (deadline == -1 || rq->rq_deadline < deadline)
1173                                 deadline = rq->rq_deadline;
1174
1175                         break;
1176                 }
1177
1178                 if (++index >= array->paa_size)
1179                         index = 0;
1180         }
1181         array->paa_deadline = deadline;
1182         cfs_spin_unlock(&svc->srv_at_lock);
1183
1184         /* we have a new earliest deadline, restart the timer */
1185         ptlrpc_at_set_timer(svc);
1186
1187         CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
1188                "replies\n", first, at_extra, counter);
1189         if (first < 0) {
1190                 /* We're already past request deadlines before we even get a
1191                    chance to send early replies */
1192                 LCONSOLE_WARN("%s: This server is not able to keep up with "
1193                               "request traffic (cpu-bound).\n", svc->srv_name);
1194                 CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
1195                       "delay="CFS_DURATION_T"(jiff)\n",
1196                       counter, svc->srv_n_queued_reqs, svc->srv_n_active_reqs,
1197                       at_get(&svc->srv_at_estimate), delay);
1198         }
1199
1200         /* we took additional refcount so entries can't be deleted from list, no
1201          * locking is needed */
1202         while (!cfs_list_empty(&work_list)) {
1203                 rq = cfs_list_entry(work_list.next, struct ptlrpc_request,
1204                                     rq_timed_list);
1205                 cfs_list_del_init(&rq->rq_timed_list);
1206
1207                 if (ptlrpc_at_send_early_reply(rq) == 0)
1208                         ptlrpc_at_add_timed(rq);
1209
1210                 ptlrpc_server_drop_request(rq);
1211         }
1212
1213         RETURN(0);
1214 }
1215
1216 /**
1217  * Put the request to the export list if the request may become
1218  * a high priority one.
1219  */
1220 static int ptlrpc_hpreq_init(struct ptlrpc_service *svc,
1221                              struct ptlrpc_request *req)
1222 {
1223         int rc = 0;
1224         ENTRY;
1225
1226         if (svc->srv_hpreq_handler) {
1227                 rc = svc->srv_hpreq_handler(req);
1228                 if (rc)
1229                         RETURN(rc);
1230         }
1231         if (req->rq_export && req->rq_ops) {
1232                 /* Perform request specific check. We should do this check
1233                  * before the request is added into exp_hp_rpcs list otherwise
1234                  * it may hit swab race at LU-1044. */
1235                 if (req->rq_ops->hpreq_check)
1236                         rc = req->rq_ops->hpreq_check(req);
1237
1238                 cfs_spin_lock_bh(&req->rq_export->exp_rpc_lock);
1239                 cfs_list_add(&req->rq_exp_list,
1240                              &req->rq_export->exp_hp_rpcs);
1241                 cfs_spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1242         }
1243
1244         RETURN(rc);
1245 }
1246
1247 /** Remove the request from the export list. */
1248 static void ptlrpc_hpreq_fini(struct ptlrpc_request *req)
1249 {
1250         ENTRY;
1251         if (req->rq_export && req->rq_ops) {
1252                 /* refresh lock timeout again so that client has more
1253                  * room to send lock cancel RPC. */
1254                 if (req->rq_ops->hpreq_fini)
1255                         req->rq_ops->hpreq_fini(req);
1256
1257                 cfs_spin_lock_bh(&req->rq_export->exp_rpc_lock);
1258                 cfs_list_del_init(&req->rq_exp_list);
1259                 cfs_spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1260         }
1261         EXIT;
1262 }
1263
1264 /**
1265  * Make the request a high priority one.
1266  *
1267  * All the high priority requests are queued in a separate FIFO
1268  * ptlrpc_service::srv_request_hpq list which is parallel to
1269  * ptlrpc_service::srv_request_queue list but has a higher priority
1270  * for handling.
1271  *
1272  * \see ptlrpc_server_handle_request().
1273  */
1274 static void ptlrpc_hpreq_reorder_nolock(struct ptlrpc_service *svc,
1275                                         struct ptlrpc_request *req)
1276 {
1277         ENTRY;
1278         LASSERT(svc != NULL);
1279         cfs_spin_lock(&req->rq_lock);
1280         if (req->rq_hp == 0) {
1281                 int opc = lustre_msg_get_opc(req->rq_reqmsg);
1282
1283                 /* Add to the high priority queue. */
1284                 cfs_list_move_tail(&req->rq_list, &svc->srv_request_hpq);
1285                 req->rq_hp = 1;
1286                 if (opc != OBD_PING)
1287                         DEBUG_REQ(D_RPCTRACE, req, "high priority req");
1288         }
1289         cfs_spin_unlock(&req->rq_lock);
1290         EXIT;
1291 }
1292
1293 /**
1294  * \see ptlrpc_hpreq_reorder_nolock
1295  */
1296 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req)
1297 {
1298         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
1299         ENTRY;
1300
1301         cfs_spin_lock(&svc->srv_rq_lock);
1302         /* It may happen that the request is already taken for the processing
1303          * but still in the export list, or the request is not in the request
1304          * queue but in the export list already, do not add it into the
1305          * HP list. */
1306         if (!cfs_list_empty(&req->rq_list))
1307                 ptlrpc_hpreq_reorder_nolock(svc, req);
1308         cfs_spin_unlock(&svc->srv_rq_lock);
1309         EXIT;
1310 }
1311
1312 /** Check if the request is a high priority one. */
1313 static int ptlrpc_server_hpreq_check(struct ptlrpc_service *svc,
1314                                      struct ptlrpc_request *req)
1315 {
1316         ENTRY;
1317
1318         /* Check by request opc. */
1319         if (OBD_PING == lustre_msg_get_opc(req->rq_reqmsg))
1320                 RETURN(1);
1321
1322         RETURN(ptlrpc_hpreq_init(svc, req));
1323 }
1324
1325 /** Check if a request is a high priority one. */
1326 static int ptlrpc_server_request_add(struct ptlrpc_service *svc,
1327                                      struct ptlrpc_request *req)
1328 {
1329         int rc;
1330         ENTRY;
1331
1332         rc = ptlrpc_server_hpreq_check(svc, req);
1333         if (rc < 0)
1334                 RETURN(rc);
1335
1336         cfs_spin_lock(&svc->srv_rq_lock);
1337
1338         if (rc)
1339                 ptlrpc_hpreq_reorder_nolock(svc, req);
1340         else
1341                 cfs_list_add_tail(&req->rq_list,
1342                                   &svc->srv_request_queue);
1343
1344         cfs_spin_unlock(&svc->srv_rq_lock);
1345
1346         RETURN(0);
1347 }
1348
1349 /**
1350  * Allow to handle high priority request
1351  * User can call it w/o any lock but need to hold ptlrpc_service::srv_rq_lock
1352  * to get reliable result
1353  */
1354 static int ptlrpc_server_allow_high(struct ptlrpc_service *svc, int force)
1355 {
1356         if (force)
1357                 return 1;
1358
1359         if (svc->srv_n_active_reqs >= svc->srv_threads_running - 1)
1360                 return 0;
1361
1362         return cfs_list_empty(&svc->srv_request_queue) ||
1363                svc->srv_hpreq_count < svc->srv_hpreq_ratio;
1364 }
1365
1366 static int ptlrpc_server_high_pending(struct ptlrpc_service *svc, int force)
1367 {
1368         return ptlrpc_server_allow_high(svc, force) &&
1369                !cfs_list_empty(&svc->srv_request_hpq);
1370 }
1371
1372 /**
1373  * Only allow normal priority requests on a service that has a high-priority
1374  * queue if forced (i.e. cleanup), if there are other high priority requests
1375  * already being processed (i.e. those threads can service more high-priority
1376  * requests), or if there are enough idle threads that a later thread can do
1377  * a high priority request.
1378  * User can call it w/o any lock but need to hold ptlrpc_service::srv_rq_lock
1379  * to get reliable result
1380  */
1381 static int ptlrpc_server_allow_normal(struct ptlrpc_service *svc, int force)
1382 {
1383 #ifndef __KERNEL__
1384         if (1) /* always allow to handle normal request for liblustre */
1385                 return 1;
1386 #endif
1387         if (force ||
1388             svc->srv_n_active_reqs < svc->srv_threads_running - 2)
1389                 return 1;
1390
1391         if (svc->srv_n_active_reqs >= svc->srv_threads_running - 1)
1392                 return 0;
1393
1394         return svc->srv_n_active_hpreq > 0 || svc->srv_hpreq_handler == NULL;
1395 }
1396
1397 static int ptlrpc_server_normal_pending(struct ptlrpc_service *svc, int force)
1398 {
1399         return ptlrpc_server_allow_normal(svc, force) &&
1400                !cfs_list_empty(&svc->srv_request_queue);
1401 }
1402
1403 /**
1404  * Returns true if there are requests available in incoming
1405  * request queue for processing and it is allowed to fetch them.
1406  * User can call it w/o any lock but need to hold ptlrpc_service::srv_rq_lock
1407  * to get reliable result
1408  * \see ptlrpc_server_allow_normal
1409  * \see ptlrpc_server_allow high
1410  */
1411 static inline int
1412 ptlrpc_server_request_pending(struct ptlrpc_service *svc, int force)
1413 {
1414         return ptlrpc_server_high_pending(svc, force) ||
1415                ptlrpc_server_normal_pending(svc, force);
1416 }
1417
1418 /**
1419  * Fetch a request for processing from queue of unprocessed requests.
1420  * Favors high-priority requests.
1421  * Returns a pointer to fetched request.
1422  */
1423 static struct ptlrpc_request *
1424 ptlrpc_server_request_get(struct ptlrpc_service *svc, int force)
1425 {
1426         struct ptlrpc_request *req;
1427         ENTRY;
1428
1429         if (ptlrpc_server_high_pending(svc, force)) {
1430                 req = cfs_list_entry(svc->srv_request_hpq.next,
1431                                      struct ptlrpc_request, rq_list);
1432                 svc->srv_hpreq_count++;
1433                 RETURN(req);
1434
1435         }
1436
1437         if (ptlrpc_server_normal_pending(svc, force)) {
1438                 req = cfs_list_entry(svc->srv_request_queue.next,
1439                                      struct ptlrpc_request, rq_list);
1440                 svc->srv_hpreq_count = 0;
1441                 RETURN(req);
1442         }
1443         RETURN(NULL);
1444 }
1445
1446 /**
1447  * Handle freshly incoming reqs, add to timed early reply list,
1448  * pass on to regular request queue.
1449  * All incoming requests pass through here before getting into
1450  * ptlrpc_server_handle_req later on.
1451  */
1452 static int
1453 ptlrpc_server_handle_req_in(struct ptlrpc_service *svc)
1454 {
1455         struct ptlrpc_request *req;
1456         __u32                  deadline;
1457         int                    rc;
1458         ENTRY;
1459
1460         LASSERT(svc);
1461
1462         cfs_spin_lock(&svc->srv_lock);
1463         if (cfs_list_empty(&svc->srv_req_in_queue)) {
1464                 cfs_spin_unlock(&svc->srv_lock);
1465                 RETURN(0);
1466         }
1467
1468         req = cfs_list_entry(svc->srv_req_in_queue.next,
1469                              struct ptlrpc_request, rq_list);
1470         cfs_list_del_init (&req->rq_list);
1471         svc->srv_n_queued_reqs--;
1472         /* Consider this still a "queued" request as far as stats are
1473            concerned */
1474         cfs_spin_unlock(&svc->srv_lock);
1475
1476         /* go through security check/transform */
1477         rc = sptlrpc_svc_unwrap_request(req);
1478         switch (rc) {
1479         case SECSVC_OK:
1480                 break;
1481         case SECSVC_COMPLETE:
1482                 target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
1483                 goto err_req;
1484         case SECSVC_DROP:
1485                 goto err_req;
1486         default:
1487                 LBUG();
1488         }
1489
1490         /*
1491          * for null-flavored rpc, msg has been unpacked by sptlrpc, although
1492          * redo it wouldn't be harmful.
1493          */
1494         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
1495                 rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
1496                 if (rc != 0) {
1497                         CERROR("error unpacking request: ptl %d from %s "
1498                                "x"LPU64"\n", svc->srv_req_portal,
1499                                libcfs_id2str(req->rq_peer), req->rq_xid);
1500                         goto err_req;
1501                 }
1502         }
1503
1504         rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
1505         if (rc) {
1506                 CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
1507                         LPU64"\n", svc->srv_req_portal,
1508                         libcfs_id2str(req->rq_peer), req->rq_xid);
1509                 goto err_req;
1510         }
1511
1512         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
1513             lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
1514                 CERROR("drop incoming rpc opc %u, x"LPU64"\n",
1515                        cfs_fail_val, req->rq_xid);
1516                 goto err_req;
1517         }
1518
1519         rc = -EINVAL;
1520         if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
1521                 CERROR("wrong packet type received (type=%u) from %s\n",
1522                        lustre_msg_get_type(req->rq_reqmsg),
1523                        libcfs_id2str(req->rq_peer));
1524                 goto err_req;
1525         }
1526
1527         switch(lustre_msg_get_opc(req->rq_reqmsg)) {
1528         case MDS_WRITEPAGE:
1529         case OST_WRITE:
1530                 req->rq_bulk_write = 1;
1531                 break;
1532         case MDS_READPAGE:
1533         case OST_READ:
1534         case MGS_CONFIG_READ:
1535                 req->rq_bulk_read = 1;
1536                 break;
1537         }
1538
1539         CDEBUG(D_RPCTRACE, "got req x"LPU64"\n", req->rq_xid);
1540
1541         req->rq_export = class_conn2export(
1542                 lustre_msg_get_handle(req->rq_reqmsg));
1543         if (req->rq_export) {
1544                 rc = ptlrpc_check_req(req);
1545                 if (rc == 0) {
1546                         rc = sptlrpc_target_export_check(req->rq_export, req);
1547                         if (rc)
1548                                 DEBUG_REQ(D_ERROR, req, "DROPPING req with "
1549                                           "illegal security flavor,");
1550                 }
1551
1552                 if (rc)
1553                         goto err_req;
1554                 ptlrpc_update_export_timer(req->rq_export, 0);
1555         }
1556
1557         /* req_in handling should/must be fast */
1558         if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
1559                 DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
1560                           cfs_time_sub(cfs_time_current_sec(),
1561                                        req->rq_arrival_time.tv_sec));
1562
1563         /* Set rpc server deadline and add it to the timed list */
1564         deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
1565                     MSGHDR_AT_SUPPORT) ?
1566                    /* The max time the client expects us to take */
1567                    lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
1568         req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
1569         if (unlikely(deadline == 0)) {
1570                 DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
1571                 goto err_req;
1572         }
1573
1574         ptlrpc_at_add_timed(req);
1575
1576         /* Move it over to the request processing queue */
1577         rc = ptlrpc_server_request_add(svc, req);
1578         if (rc) {
1579                 ptlrpc_hpreq_fini(req);
1580                 GOTO(err_req, rc);
1581         }
1582         cfs_waitq_signal(&svc->srv_waitq);
1583         RETURN(1);
1584
1585 err_req:
1586         cfs_spin_lock(&svc->srv_rq_lock);
1587         svc->srv_n_active_reqs++;
1588         cfs_spin_unlock(&svc->srv_rq_lock);
1589         ptlrpc_server_finish_request(svc, req);
1590
1591         RETURN(1);
1592 }
1593
1594 /**
1595  * Main incoming request handling logic.
1596  * Calls handler function from service to do actual processing.
1597  */
1598 static int
1599 ptlrpc_server_handle_request(struct ptlrpc_service *svc,
1600                              struct ptlrpc_thread *thread)
1601 {
1602         struct obd_export     *export = NULL;
1603         struct ptlrpc_request *request;
1604         struct timeval         work_start;
1605         struct timeval         work_end;
1606         long                   timediff;
1607         int                    rc;
1608         int                    fail_opc = 0;
1609         ENTRY;
1610
1611         LASSERT(svc);
1612
1613         cfs_spin_lock(&svc->srv_rq_lock);
1614 #ifndef __KERNEL__
1615         /* !@%$# liblustre only has 1 thread */
1616         if (cfs_atomic_read(&svc->srv_n_difficult_replies) != 0) {
1617                 cfs_spin_unlock(&svc->srv_rq_lock);
1618                 RETURN(0);
1619         }
1620 #endif
1621         request = ptlrpc_server_request_get(svc, 0);
1622         if  (request == NULL) {
1623                 cfs_spin_unlock(&svc->srv_rq_lock);
1624                 RETURN(0);
1625         }
1626
1627         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
1628                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
1629         else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
1630                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
1631
1632         if (unlikely(fail_opc)) {
1633                 if (request->rq_export && request->rq_ops) {
1634                         cfs_spin_unlock(&svc->srv_rq_lock);
1635                         OBD_FAIL_TIMEOUT(fail_opc, 4);
1636                         cfs_spin_lock(&svc->srv_rq_lock);
1637                         request = ptlrpc_server_request_get(svc, 0);
1638                         if  (request == NULL) {
1639                                 cfs_spin_unlock(&svc->srv_rq_lock);
1640                                 RETURN(0);
1641                         }
1642                 }
1643         }
1644
1645         cfs_list_del_init(&request->rq_list);
1646         svc->srv_n_active_reqs++;
1647         if (request->rq_hp)
1648                 svc->srv_n_active_hpreq++;
1649
1650         cfs_spin_unlock(&svc->srv_rq_lock);
1651
1652         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
1653
1654         if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
1655                 libcfs_debug_dumplog();
1656
1657         cfs_gettimeofday(&work_start);
1658         timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
1659         if (likely(svc->srv_stats != NULL)) {
1660                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
1661                                     timediff);
1662                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
1663                                     svc->srv_n_queued_reqs);
1664                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
1665                                     svc->srv_n_active_reqs);
1666                 lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
1667                                     at_get(&svc->srv_at_estimate));
1668         }
1669
1670         rc = lu_context_init(&request->rq_session,
1671                              LCT_SESSION|LCT_REMEMBER|LCT_NOREF);
1672         if (rc) {
1673                 CERROR("Failure to initialize session: %d\n", rc);
1674                 goto out_req;
1675         }
1676         request->rq_session.lc_thread = thread;
1677         request->rq_session.lc_cookie = 0x5;
1678         lu_context_enter(&request->rq_session);
1679
1680         CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
1681
1682         request->rq_svc_thread = thread;
1683         if (thread)
1684                 request->rq_svc_thread->t_env->le_ses = &request->rq_session;
1685
1686         if (likely(request->rq_export)) {
1687                 if (unlikely(ptlrpc_check_req(request)))
1688                         goto put_conn;
1689                 ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
1690                 export = class_export_rpc_get(request->rq_export);
1691         }
1692
1693         /* Discard requests queued for longer than the deadline.
1694            The deadline is increased if we send an early reply. */
1695         if (cfs_time_current_sec() > request->rq_deadline) {
1696                 DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
1697                           ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
1698                           libcfs_id2str(request->rq_peer),
1699                           cfs_time_sub(request->rq_deadline,
1700                           request->rq_arrival_time.tv_sec),
1701                           cfs_time_sub(cfs_time_current_sec(),
1702                           request->rq_deadline));
1703                 goto put_rpc_export;
1704         }
1705
1706         CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
1707                "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
1708                (request->rq_export ?
1709                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
1710                (request->rq_export ?
1711                 cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
1712                lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
1713                libcfs_id2str(request->rq_peer),
1714                lustre_msg_get_opc(request->rq_reqmsg));
1715
1716         if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
1717                 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
1718
1719         rc = svc->srv_handler(request);
1720
1721         ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
1722
1723 put_rpc_export:
1724         if (export != NULL)
1725                 class_export_rpc_put(export);
1726 put_conn:
1727         lu_context_exit(&request->rq_session);
1728         lu_context_fini(&request->rq_session);
1729
1730         if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
1731                 DEBUG_REQ(D_WARNING, request, "Request x"LPU64" took longer "
1732                           "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
1733                           " client may timeout.",
1734                           request->rq_xid, cfs_time_sub(request->rq_deadline,
1735                           request->rq_arrival_time.tv_sec),
1736                           cfs_time_sub(cfs_time_current_sec(),
1737                           request->rq_deadline));
1738         }
1739
1740         cfs_gettimeofday(&work_end);
1741         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1742         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
1743                "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
1744                "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
1745                 cfs_curproc_comm(),
1746                 (request->rq_export ?
1747                  (char *)request->rq_export->exp_client_uuid.uuid : "0"),
1748                 (request->rq_export ?
1749                  cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
1750                 lustre_msg_get_status(request->rq_reqmsg),
1751                 request->rq_xid,
1752                 libcfs_id2str(request->rq_peer),
1753                 lustre_msg_get_opc(request->rq_reqmsg),
1754                 timediff,
1755                 cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
1756                 (request->rq_repmsg ?
1757                  lustre_msg_get_transno(request->rq_repmsg) :
1758                  request->rq_transno),
1759                 request->rq_status,
1760                 (request->rq_repmsg ?
1761                  lustre_msg_get_status(request->rq_repmsg) : -999));
1762         if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
1763                 __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
1764                 int opc = opcode_offset(op);
1765                 if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
1766                         LASSERT(opc < LUSTRE_MAX_OPCODES);
1767                         lprocfs_counter_add(svc->srv_stats,
1768                                             opc + EXTRA_MAX_OPCODES,
1769                                             timediff);
1770                 }
1771         }
1772         if (unlikely(request->rq_early_count)) {
1773                 DEBUG_REQ(D_ADAPTTO, request,
1774                           "sent %d early replies before finishing in "
1775                           CFS_DURATION_T"s",
1776                           request->rq_early_count,
1777                           cfs_time_sub(work_end.tv_sec,
1778                           request->rq_arrival_time.tv_sec));
1779         }
1780
1781 out_req:
1782         ptlrpc_server_finish_request(svc, request);
1783
1784         RETURN(1);
1785 }
1786
1787 /**
1788  * An internal function to process a single reply state object.
1789  */
1790 static int
1791 ptlrpc_handle_rs (struct ptlrpc_reply_state *rs)
1792 {
1793         struct ptlrpc_service     *svc = rs->rs_service;
1794         struct obd_export         *exp;
1795         int                        nlocks;
1796         int                        been_handled;
1797         ENTRY;
1798
1799         exp = rs->rs_export;
1800
1801         LASSERT (rs->rs_difficult);
1802         LASSERT (rs->rs_scheduled);
1803         LASSERT (cfs_list_empty(&rs->rs_list));
1804
1805         cfs_spin_lock (&exp->exp_lock);
1806         /* Noop if removed already */
1807         cfs_list_del_init (&rs->rs_exp_list);
1808         cfs_spin_unlock (&exp->exp_lock);
1809
1810         /* The disk commit callback holds exp_uncommitted_replies_lock while it
1811          * iterates over newly committed replies, removing them from
1812          * exp_uncommitted_replies.  It then drops this lock and schedules the
1813          * replies it found for handling here.
1814          *
1815          * We can avoid contention for exp_uncommitted_replies_lock between the
1816          * HRT threads and further commit callbacks by checking rs_committed
1817          * which is set in the commit callback while it holds both
1818          * rs_lock and exp_uncommitted_reples.
1819          *
1820          * If we see rs_committed clear, the commit callback _may_ not have
1821          * handled this reply yet and we race with it to grab
1822          * exp_uncommitted_replies_lock before removing the reply from
1823          * exp_uncommitted_replies.  Note that if we lose the race and the
1824          * reply has already been removed, list_del_init() is a noop.
1825          *
1826          * If we see rs_committed set, we know the commit callback is handling,
1827          * or has handled this reply since store reordering might allow us to
1828          * see rs_committed set out of sequence.  But since this is done
1829          * holding rs_lock, we can be sure it has all completed once we hold
1830          * rs_lock, which we do right next.
1831          */
1832         if (!rs->rs_committed) {
1833                 cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
1834                 cfs_list_del_init(&rs->rs_obd_list);
1835                 cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
1836         }
1837
1838         cfs_spin_lock(&rs->rs_lock);
1839
1840         been_handled = rs->rs_handled;
1841         rs->rs_handled = 1;
1842
1843         nlocks = rs->rs_nlocks;                 /* atomic "steal", but */
1844         rs->rs_nlocks = 0;                      /* locks still on rs_locks! */
1845
1846         if (nlocks == 0 && !been_handled) {
1847                 /* If we see this, we should already have seen the warning
1848                  * in mds_steal_ack_locks()  */
1849                 CWARN("All locks stolen from rs %p x"LPD64".t"LPD64
1850                       " o%d NID %s\n",
1851                       rs,
1852                       rs->rs_xid, rs->rs_transno, rs->rs_opc,
1853                       libcfs_nid2str(exp->exp_connection->c_peer.nid));
1854         }
1855
1856         if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
1857                 cfs_spin_unlock(&rs->rs_lock);
1858
1859                 if (!been_handled && rs->rs_on_net) {
1860                         LNetMDUnlink(rs->rs_md_h);
1861                         /* Ignore return code; we're racing with
1862                          * completion... */
1863                 }
1864
1865                 while (nlocks-- > 0)
1866                         ldlm_lock_decref(&rs->rs_locks[nlocks],
1867                                          rs->rs_modes[nlocks]);
1868
1869                 cfs_spin_lock(&rs->rs_lock);
1870         }
1871
1872         rs->rs_scheduled = 0;
1873
1874         if (!rs->rs_on_net) {
1875                 /* Off the net */
1876                 cfs_spin_unlock(&rs->rs_lock);
1877
1878                 class_export_put (exp);
1879                 rs->rs_export = NULL;
1880                 ptlrpc_rs_decref (rs);
1881                 if (cfs_atomic_dec_and_test(&svc->srv_n_difficult_replies) &&
1882                     svc->srv_is_stopping)
1883                         cfs_waitq_broadcast(&svc->srv_waitq);
1884                 RETURN(1);
1885         }
1886
1887         /* still on the net; callback will schedule */
1888         cfs_spin_unlock(&rs->rs_lock);
1889         RETURN(1);
1890 }
1891
1892 #ifndef __KERNEL__
1893
1894 /**
1895  * Check whether given service has a reply available for processing
1896  * and process it.
1897  *
1898  * \param svc a ptlrpc service
1899  * \retval 0 no replies processed
1900  * \retval 1 one reply processed
1901  */
1902 static int
1903 ptlrpc_server_handle_reply(struct ptlrpc_service *svc)
1904 {
1905         struct ptlrpc_reply_state *rs = NULL;
1906         ENTRY;
1907
1908         cfs_spin_lock(&svc->srv_rs_lock);
1909         if (!cfs_list_empty(&svc->srv_reply_queue)) {
1910                 rs = cfs_list_entry(svc->srv_reply_queue.prev,
1911                                     struct ptlrpc_reply_state,
1912                                     rs_list);
1913                 cfs_list_del_init(&rs->rs_list);
1914         }
1915         cfs_spin_unlock(&svc->srv_rs_lock);
1916         if (rs != NULL)
1917                 ptlrpc_handle_rs(rs);
1918         RETURN(rs != NULL);
1919 }
1920
1921 /* FIXME make use of timeout later */
1922 int
1923 liblustre_check_services (void *arg)
1924 {
1925         int  did_something = 0;
1926         int  rc;
1927         cfs_list_t *tmp, *nxt;
1928         ENTRY;
1929
1930         /* I'm relying on being single threaded, not to have to lock
1931          * ptlrpc_all_services etc */
1932         cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
1933                 struct ptlrpc_service *svc =
1934                         cfs_list_entry (tmp, struct ptlrpc_service, srv_list);
1935
1936                 if (svc->srv_threads_running != 0)     /* I've recursed */
1937                         continue;
1938
1939                 /* service threads can block for bulk, so this limits us
1940                  * (arbitrarily) to recursing 1 stack frame per service.
1941                  * Note that the problem with recursion is that we have to
1942                  * unwind completely before our caller can resume. */
1943
1944                 svc->srv_threads_running++;
1945
1946                 do {
1947                         rc = ptlrpc_server_handle_req_in(svc);
1948                         rc |= ptlrpc_server_handle_reply(svc);
1949                         rc |= ptlrpc_at_check_timed(svc);
1950                         rc |= ptlrpc_server_handle_request(svc, NULL);
1951                         rc |= (ptlrpc_server_post_idle_rqbds(svc) > 0);
1952                         did_something |= rc;
1953                 } while (rc);
1954
1955                 svc->srv_threads_running--;
1956         }
1957
1958         RETURN(did_something);
1959 }
1960 #define ptlrpc_stop_all_threads(s) do {} while (0)
1961
1962 #else /* __KERNEL__ */
1963
1964 static void
1965 ptlrpc_check_rqbd_pool(struct ptlrpc_service *svc)
1966 {
1967         int avail = svc->srv_nrqbd_receiving;
1968         int low_water = test_req_buffer_pressure ? 0 :
1969                         svc->srv_nbuf_per_group / 2;
1970
1971         /* NB I'm not locking; just looking. */
1972
1973         /* CAVEAT EMPTOR: We might be allocating buffers here because we've
1974          * allowed the request history to grow out of control.  We could put a
1975          * sanity check on that here and cull some history if we need the
1976          * space. */
1977
1978         if (avail <= low_water)
1979                 ptlrpc_grow_req_bufs(svc);
1980
1981         if (svc->srv_stats)
1982                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQBUF_AVAIL_CNTR,
1983                                     avail);
1984 }
1985
1986 static int
1987 ptlrpc_retry_rqbds(void *arg)
1988 {
1989         struct ptlrpc_service *svc = (struct ptlrpc_service *)arg;
1990
1991         svc->srv_rqbd_timeout = 0;
1992         return (-ETIMEDOUT);
1993 }
1994
1995 static inline int
1996 ptlrpc_threads_enough(struct ptlrpc_service *svc)
1997 {
1998         return svc->srv_n_active_reqs <
1999                svc->srv_threads_running - 1 - (svc->srv_hpreq_handler != NULL);
2000 }
2001
2002 /**
2003  * allowed to create more threads
2004  * user can call it w/o any lock but need to hold ptlrpc_service::srv_lock to
2005  * get reliable result
2006  */
2007 static inline int
2008 ptlrpc_threads_increasable(struct ptlrpc_service *svc)
2009 {
2010         return svc->srv_threads_running +
2011                svc->srv_threads_starting < svc->srv_threads_max;
2012 }
2013
2014 /**
2015  * too many requests and allowed to create more threads
2016  */
2017 static inline int
2018 ptlrpc_threads_need_create(struct ptlrpc_service *svc)
2019 {
2020         return !ptlrpc_threads_enough(svc) && ptlrpc_threads_increasable(svc);
2021 }
2022
2023 static inline int
2024 ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
2025 {
2026         return thread_is_stopping(thread) ||
2027                thread->t_svc->srv_is_stopping;
2028 }
2029
2030 static inline int
2031 ptlrpc_rqbd_pending(struct ptlrpc_service *svc)
2032 {
2033         return !cfs_list_empty(&svc->srv_idle_rqbds) &&
2034                svc->srv_rqbd_timeout == 0;
2035 }
2036
2037 static inline int
2038 ptlrpc_at_check(struct ptlrpc_service *svc)
2039 {
2040         return svc->srv_at_check;
2041 }
2042
2043 /**
2044  * requests wait on preprocessing
2045  * user can call it w/o any lock but need to hold ptlrpc_service::srv_lock to
2046  * get reliable result
2047  */
2048 static inline int
2049 ptlrpc_server_request_waiting(struct ptlrpc_service *svc)
2050 {
2051         return !cfs_list_empty(&svc->srv_req_in_queue);
2052 }
2053
2054 static __attribute__((__noinline__)) int
2055 ptlrpc_wait_event(struct ptlrpc_service *svc,
2056                   struct ptlrpc_thread *thread)
2057 {
2058         /* Don't exit while there are replies to be handled */
2059         struct l_wait_info lwi = LWI_TIMEOUT(svc->srv_rqbd_timeout,
2060                                              ptlrpc_retry_rqbds, svc);
2061
2062         lc_watchdog_disable(thread->t_watchdog);
2063
2064         cfs_cond_resched();
2065
2066         l_wait_event_exclusive_head(svc->srv_waitq,
2067                                ptlrpc_thread_stopping(thread) ||
2068                                ptlrpc_server_request_waiting(svc) ||
2069                                ptlrpc_server_request_pending(svc, 0) ||
2070                                ptlrpc_rqbd_pending(svc) ||
2071                                ptlrpc_at_check(svc), &lwi);
2072
2073         if (ptlrpc_thread_stopping(thread))
2074                 return -EINTR;
2075
2076         lc_watchdog_touch(thread->t_watchdog, CFS_GET_TIMEOUT(svc));
2077
2078         return 0;
2079 }
2080
2081 /**
2082  * Main thread body for service threads.
2083  * Waits in a loop waiting for new requests to process to appear.
2084  * Every time an incoming requests is added to its queue, a waitq
2085  * is woken up and one of the threads will handle it.
2086  */
2087 static int ptlrpc_main(void *arg)
2088 {
2089         struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
2090         struct ptlrpc_service  *svc = data->svc;
2091         struct ptlrpc_thread   *thread = data->thread;
2092         struct ptlrpc_reply_state *rs;
2093 #ifdef WITH_GROUP_INFO
2094         cfs_group_info_t *ginfo = NULL;
2095 #endif
2096         struct lu_env *env;
2097         int counter = 0, rc = 0;
2098         ENTRY;
2099
2100         thread->t_pid = cfs_curproc_pid();
2101         cfs_daemonize_ctxt(data->name);
2102
2103 #if defined(HAVE_NODE_TO_CPUMASK) && defined(CONFIG_NUMA)
2104         /* we need to do this before any per-thread allocation is done so that
2105          * we get the per-thread allocations on local node.  bug 7342 */
2106         if (svc->srv_cpu_affinity) {
2107                 int cpu, num_cpu;
2108
2109                 for (cpu = 0, num_cpu = 0; cpu < cfs_num_possible_cpus();
2110                      cpu++) {
2111                         if (!cpu_online(cpu))
2112                                 continue;
2113                         if (num_cpu == thread->t_id % cfs_num_online_cpus())
2114                                 break;
2115                         num_cpu++;
2116                 }
2117                 cfs_set_cpus_allowed(cfs_current(),
2118                                      node_to_cpumask(cpu_to_node(cpu)));
2119         }
2120 #endif
2121
2122 #ifdef WITH_GROUP_INFO
2123         ginfo = cfs_groups_alloc(0);
2124         if (!ginfo) {
2125                 rc = -ENOMEM;
2126                 goto out;
2127         }
2128
2129         cfs_set_current_groups(ginfo);
2130         cfs_put_group_info(ginfo);
2131 #endif
2132
2133         if (svc->srv_init != NULL) {
2134                 rc = svc->srv_init(thread);
2135                 if (rc)
2136                         goto out;
2137         }
2138
2139         OBD_ALLOC_PTR(env);
2140         if (env == NULL) {
2141                 rc = -ENOMEM;
2142                 goto out_srv_fini;
2143         }
2144
2145         rc = lu_context_init(&env->le_ctx,
2146                              svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
2147         if (rc)
2148                 goto out_srv_fini;
2149
2150         thread->t_env = env;
2151         env->le_ctx.lc_thread = thread;
2152         env->le_ctx.lc_cookie = 0x6;
2153
2154         /* Alloc reply state structure for this one */
2155         OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
2156         if (!rs) {
2157                 rc = -ENOMEM;
2158                 goto out_srv_fini;
2159         }
2160
2161         cfs_spin_lock(&svc->srv_lock);
2162
2163         LASSERT(thread_is_starting(thread));
2164         thread_clear_flags(thread, SVC_STARTING);
2165         svc->srv_threads_starting--;
2166
2167         /* SVC_STOPPING may already be set here if someone else is trying
2168          * to stop the service while this new thread has been dynamically
2169          * forked. We still set SVC_RUNNING to let our creator know that
2170          * we are now running, however we will exit as soon as possible */
2171         thread_add_flags(thread, SVC_RUNNING);
2172         svc->srv_threads_running++;
2173         cfs_spin_unlock(&svc->srv_lock);
2174
2175         /*
2176          * wake up our creator. Note: @data is invalid after this point,
2177          * because it's allocated on ptlrpc_start_thread() stack.
2178          */
2179         cfs_waitq_signal(&thread->t_ctl_waitq);
2180
2181         thread->t_watchdog = lc_watchdog_add(CFS_GET_TIMEOUT(svc), NULL, NULL);
2182
2183         cfs_spin_lock(&svc->srv_rs_lock);
2184         cfs_list_add(&rs->rs_list, &svc->srv_free_rs_list);
2185         cfs_waitq_signal(&svc->srv_free_rs_waitq);
2186         cfs_spin_unlock(&svc->srv_rs_lock);
2187
2188         CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
2189                svc->srv_threads_running);
2190
2191         /* XXX maintain a list of all managed devices: insert here */
2192         while (!ptlrpc_thread_stopping(thread)) {
2193                 if (ptlrpc_wait_event(svc, thread))
2194                         break;
2195
2196                 ptlrpc_check_rqbd_pool(svc);
2197
2198                 if (ptlrpc_threads_need_create(svc)) {
2199                         /* Ignore return code - we tried... */
2200                         ptlrpc_start_thread(svc);
2201                 }
2202
2203                 /* Process all incoming reqs before handling any */
2204                 if (ptlrpc_server_request_waiting(svc)) {
2205                         ptlrpc_server_handle_req_in(svc);
2206                         /* but limit ourselves in case of flood */
2207                         if (counter++ < 100)
2208                                 continue;
2209                         counter = 0;
2210                 }
2211
2212                 if (ptlrpc_at_check(svc))
2213                         ptlrpc_at_check_timed(svc);
2214
2215                 if (ptlrpc_server_request_pending(svc, 0)) {
2216                         lu_context_enter(&env->le_ctx);
2217                         ptlrpc_server_handle_request(svc, thread);
2218                         lu_context_exit(&env->le_ctx);
2219                 }
2220
2221                 if (ptlrpc_rqbd_pending(svc) &&
2222                     ptlrpc_server_post_idle_rqbds(svc) < 0) {
2223                         /* I just failed to repost request buffers.
2224                          * Wait for a timeout (unless something else
2225                          * happens) before I try again */
2226                         svc->srv_rqbd_timeout = cfs_time_seconds(1)/10;
2227                         CDEBUG(D_RPCTRACE,"Posted buffers: %d\n",
2228                                svc->srv_nrqbd_receiving);
2229                 }
2230         }
2231
2232         lc_watchdog_delete(thread->t_watchdog);
2233         thread->t_watchdog = NULL;
2234
2235 out_srv_fini:
2236         /*
2237          * deconstruct service specific state created by ptlrpc_start_thread()
2238          */
2239         if (svc->srv_done != NULL)
2240                 svc->srv_done(thread);
2241
2242         if (env != NULL) {
2243                 lu_context_fini(&env->le_ctx);
2244                 OBD_FREE_PTR(env);
2245         }
2246 out:
2247         CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
2248                thread, thread->t_pid, thread->t_id, rc);
2249
2250         cfs_spin_lock(&svc->srv_lock);
2251         if (thread_test_and_clear_flags(thread, SVC_STARTING))
2252                 svc->srv_threads_starting--;
2253
2254         if (thread_test_and_clear_flags(thread, SVC_RUNNING))
2255                 /* must know immediately */
2256                 svc->srv_threads_running--;
2257
2258         thread->t_id    = rc;
2259         thread_add_flags(thread, SVC_STOPPED);
2260
2261         cfs_waitq_signal(&thread->t_ctl_waitq);
2262         cfs_spin_unlock(&svc->srv_lock);
2263
2264         return rc;
2265 }
2266
2267 struct ptlrpc_hr_args {
2268         int                       thread_index;
2269         int                       cpu_index;
2270         struct ptlrpc_hr_service *hrs;
2271 };
2272
2273 static int hrt_dont_sleep(struct ptlrpc_hr_thread *t,
2274                           cfs_list_t *replies)
2275 {
2276         int result;
2277
2278         cfs_spin_lock(&t->hrt_lock);
2279         cfs_list_splice_init(&t->hrt_queue, replies);
2280         result = cfs_test_bit(HRT_STOPPING, &t->hrt_flags) ||
2281                 !cfs_list_empty(replies);
2282         cfs_spin_unlock(&t->hrt_lock);
2283         return result;
2284 }
2285
2286 /**
2287  * Main body of "handle reply" function.
2288  * It processes acked reply states
2289  */
2290 static int ptlrpc_hr_main(void *arg)
2291 {
2292         struct ptlrpc_hr_args * hr_args = arg;
2293         struct ptlrpc_hr_service *hr = hr_args->hrs;
2294         struct ptlrpc_hr_thread *t = &hr->hr_threads[hr_args->thread_index];
2295         char threadname[20];
2296         CFS_LIST_HEAD(replies);
2297
2298         snprintf(threadname, sizeof(threadname),
2299                  "ptlrpc_hr_%d", hr_args->thread_index);
2300
2301         cfs_daemonize_ctxt(threadname);
2302 #if defined(CONFIG_NUMA) && defined(HAVE_NODE_TO_CPUMASK)
2303         cfs_set_cpus_allowed(cfs_current(),
2304                              node_to_cpumask(cpu_to_node(hr_args->cpu_index)));
2305 #endif
2306         cfs_set_bit(HRT_RUNNING, &t->hrt_flags);
2307         cfs_waitq_signal(&t->hrt_wait);
2308
2309         while (!cfs_test_bit(HRT_STOPPING, &t->hrt_flags)) {
2310
2311                 l_wait_condition(t->hrt_wait, hrt_dont_sleep(t, &replies));
2312                 while (!cfs_list_empty(&replies)) {
2313                         struct ptlrpc_reply_state *rs;
2314
2315                         rs = cfs_list_entry(replies.prev,
2316                                             struct ptlrpc_reply_state,
2317                                             rs_list);
2318                         cfs_list_del_init(&rs->rs_list);
2319                         ptlrpc_handle_rs(rs);
2320                 }
2321         }
2322
2323         cfs_clear_bit(HRT_RUNNING, &t->hrt_flags);
2324         cfs_complete(&t->hrt_completion);
2325
2326         return 0;
2327 }
2328
2329 static int ptlrpc_start_hr_thread(struct ptlrpc_hr_service *hr, int n, int cpu)
2330 {
2331         struct ptlrpc_hr_thread *t = &hr->hr_threads[n];
2332         struct ptlrpc_hr_args args;
2333         int rc;
2334         ENTRY;
2335
2336         args.thread_index = n;
2337         args.cpu_index = cpu;
2338         args.hrs = hr;
2339
2340         rc = cfs_create_thread(ptlrpc_hr_main, (void*)&args, CFS_DAEMON_FLAGS);
2341         if (rc < 0) {
2342                 cfs_complete(&t->hrt_completion);
2343                 GOTO(out, rc);
2344         }
2345         l_wait_condition(t->hrt_wait, cfs_test_bit(HRT_RUNNING, &t->hrt_flags));
2346         RETURN(0);
2347  out:
2348         return rc;
2349 }
2350
2351 static void ptlrpc_stop_hr_thread(struct ptlrpc_hr_thread *t)
2352 {
2353         ENTRY;
2354
2355         cfs_set_bit(HRT_STOPPING, &t->hrt_flags);
2356         cfs_waitq_signal(&t->hrt_wait);
2357         cfs_wait_for_completion(&t->hrt_completion);
2358
2359         EXIT;
2360 }
2361
2362 static void ptlrpc_stop_hr_threads(struct ptlrpc_hr_service *hrs)
2363 {
2364         int n;
2365         ENTRY;
2366
2367         for (n = 0; n < hrs->hr_n_threads; n++)
2368                 ptlrpc_stop_hr_thread(&hrs->hr_threads[n]);
2369
2370         EXIT;
2371 }
2372
2373 static int ptlrpc_start_hr_threads(struct ptlrpc_hr_service *hr)
2374 {
2375         int rc = -ENOMEM;
2376         int n, cpu, threads_started = 0;
2377         ENTRY;
2378
2379         LASSERT(hr != NULL);
2380         LASSERT(hr->hr_n_threads > 0);
2381
2382         for (n = 0, cpu = 0; n < hr->hr_n_threads; n++) {
2383 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
2384                 while (!cpu_online(cpu)) {
2385                         cpu++;
2386                         if (cpu >= cfs_num_possible_cpus())
2387                                 cpu = 0;
2388                 }
2389 #endif
2390                 rc = ptlrpc_start_hr_thread(hr, n, cpu);
2391                 if (rc != 0)
2392                         break;
2393                 threads_started++;
2394                 cpu++;
2395         }
2396         if (threads_started == 0) {
2397                 CERROR("No reply handling threads started\n");
2398                 RETURN(-ESRCH);
2399         }
2400         if (threads_started < hr->hr_n_threads) {
2401                 CWARN("Started only %d reply handling threads from %d\n",
2402                       threads_started, hr->hr_n_threads);
2403                 hr->hr_n_threads = threads_started;
2404         }
2405         RETURN(0);
2406 }
2407
2408 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
2409                                struct ptlrpc_thread *thread)
2410 {
2411         struct l_wait_info lwi = { 0 };
2412         ENTRY;
2413
2414         CDEBUG(D_RPCTRACE, "Stopping thread [ %p : %u ]\n",
2415                thread, thread->t_pid);
2416
2417         cfs_spin_lock(&svc->srv_lock);
2418         /* let the thread know that we would like it to stop asap */
2419         thread_add_flags(thread, SVC_STOPPING);
2420         cfs_spin_unlock(&svc->srv_lock);
2421
2422         cfs_waitq_broadcast(&svc->srv_waitq);
2423         l_wait_event(thread->t_ctl_waitq,
2424                      thread_is_stopped(thread), &lwi);
2425
2426         cfs_spin_lock(&svc->srv_lock);
2427         cfs_list_del(&thread->t_link);
2428         cfs_spin_unlock(&svc->srv_lock);
2429
2430         OBD_FREE_PTR(thread);
2431         EXIT;
2432 }
2433
2434 /**
2435  * Stops all threads of a particular service \a svc
2436  */
2437 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
2438 {
2439         struct ptlrpc_thread *thread;
2440         ENTRY;
2441
2442         cfs_spin_lock(&svc->srv_lock);
2443         while (!cfs_list_empty(&svc->srv_threads)) {
2444                 thread = cfs_list_entry(svc->srv_threads.next,
2445                                         struct ptlrpc_thread, t_link);
2446
2447                 cfs_spin_unlock(&svc->srv_lock);
2448                 ptlrpc_stop_thread(svc, thread);
2449                 cfs_spin_lock(&svc->srv_lock);
2450         }
2451
2452         cfs_spin_unlock(&svc->srv_lock);
2453         EXIT;
2454 }
2455
2456 int ptlrpc_start_threads(struct ptlrpc_service *svc)
2457 {
2458         int i, rc = 0;
2459         ENTRY;
2460
2461         /* We require 2 threads min - see note in
2462            ptlrpc_server_handle_request */
2463         LASSERT(svc->srv_threads_min >= 2);
2464         for (i = 0; i < svc->srv_threads_min; i++) {
2465                 rc = ptlrpc_start_thread(svc);
2466                 /* We have enough threads, don't start more.  b=15759 */
2467                 if (rc == -EMFILE) {
2468                         rc = 0;
2469                         break;
2470                 }
2471                 if (rc) {
2472                         CERROR("cannot start %s thread #%d: rc %d\n",
2473                                svc->srv_thread_name, i, rc);
2474                         ptlrpc_stop_all_threads(svc);
2475                         break;
2476                 }
2477         }
2478         RETURN(rc);
2479 }
2480
2481 int ptlrpc_start_thread(struct ptlrpc_service *svc)
2482 {
2483         struct l_wait_info lwi = { 0 };
2484         struct ptlrpc_svc_data d;
2485         struct ptlrpc_thread *thread;
2486         char name[32];
2487         int rc;
2488         ENTRY;
2489
2490         CDEBUG(D_RPCTRACE, "%s started %d min %d max %d running %d\n",
2491                svc->srv_name, svc->srv_threads_running, svc->srv_threads_min,
2492                svc->srv_threads_max, svc->srv_threads_running);
2493
2494         if (unlikely(svc->srv_is_stopping))
2495                 RETURN(-ESRCH);
2496
2497         if (!ptlrpc_threads_increasable(svc) ||
2498             (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
2499              svc->srv_threads_running == svc->srv_threads_min - 1))
2500                 RETURN(-EMFILE);
2501
2502         OBD_ALLOC_PTR(thread);
2503         if (thread == NULL)
2504                 RETURN(-ENOMEM);
2505         cfs_waitq_init(&thread->t_ctl_waitq);
2506
2507         cfs_spin_lock(&svc->srv_lock);
2508         if (!ptlrpc_threads_increasable(svc)) {
2509                 cfs_spin_unlock(&svc->srv_lock);
2510                 OBD_FREE_PTR(thread);
2511                 RETURN(-EMFILE);
2512         }
2513
2514         svc->srv_threads_starting++;
2515         thread->t_id    = svc->srv_threads_next_id++;
2516         thread_add_flags(thread, SVC_STARTING);
2517         thread->t_svc   = svc;
2518
2519         cfs_list_add(&thread->t_link, &svc->srv_threads);
2520         cfs_spin_unlock(&svc->srv_lock);
2521
2522         sprintf(name, "%s_%02d", svc->srv_thread_name, thread->t_id);
2523         d.svc = svc;
2524         d.name = name;
2525         d.thread = thread;
2526
2527         CDEBUG(D_RPCTRACE, "starting thread '%s'\n", name);
2528
2529         /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
2530          * just drop the VM and FILES in cfs_daemonize_ctxt() right away.
2531          */
2532         rc = cfs_create_thread(ptlrpc_main, &d, CFS_DAEMON_FLAGS);
2533         if (rc < 0) {
2534                 CERROR("cannot start thread '%s': rc %d\n", name, rc);
2535
2536                 cfs_spin_lock(&svc->srv_lock);
2537                 cfs_list_del(&thread->t_link);
2538                 --svc->srv_threads_starting;
2539                 cfs_spin_unlock(&svc->srv_lock);
2540
2541                 OBD_FREE(thread, sizeof(*thread));
2542                 RETURN(rc);
2543         }
2544         l_wait_event(thread->t_ctl_waitq,
2545                      thread_is_running(thread) || thread_is_stopped(thread),
2546                      &lwi);
2547
2548         rc = thread_is_stopped(thread) ? thread->t_id : 0;
2549         RETURN(rc);
2550 }
2551
2552
2553 int ptlrpc_hr_init(void)
2554 {
2555         int i;
2556         int n_cpus = cfs_num_online_cpus();
2557         struct ptlrpc_hr_service *hr;
2558         int size;
2559         int rc;
2560         ENTRY;
2561
2562         LASSERT(ptlrpc_hr == NULL);
2563
2564         size = offsetof(struct ptlrpc_hr_service, hr_threads[n_cpus]);
2565         OBD_ALLOC(hr, size);
2566         if (hr == NULL)
2567                 RETURN(-ENOMEM);
2568         for (i = 0; i < n_cpus; i++) {
2569                 struct ptlrpc_hr_thread *t = &hr->hr_threads[i];
2570
2571                 cfs_spin_lock_init(&t->hrt_lock);
2572                 cfs_waitq_init(&t->hrt_wait);
2573                 CFS_INIT_LIST_HEAD(&t->hrt_queue);
2574                 cfs_init_completion(&t->hrt_completion);
2575         }
2576         hr->hr_n_threads = n_cpus;
2577         hr->hr_size = size;
2578         ptlrpc_hr = hr;
2579
2580         rc = ptlrpc_start_hr_threads(hr);
2581         if (rc) {
2582                 OBD_FREE(hr, hr->hr_size);
2583                 ptlrpc_hr = NULL;
2584         }
2585         RETURN(rc);
2586 }
2587
2588 void ptlrpc_hr_fini(void)
2589 {
2590         if (ptlrpc_hr != NULL) {
2591                 ptlrpc_stop_hr_threads(ptlrpc_hr);
2592                 OBD_FREE(ptlrpc_hr, ptlrpc_hr->hr_size);
2593                 ptlrpc_hr = NULL;
2594         }
2595 }
2596
2597 #endif /* __KERNEL__ */
2598
2599 /**
2600  * Wait until all already scheduled replies are processed.
2601  */
2602 static void ptlrpc_wait_replies(struct ptlrpc_service *svc)
2603 {
2604         while (1) {
2605                 int rc;
2606                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
2607                                                      NULL, NULL);
2608                 rc = l_wait_event(svc->srv_waitq, cfs_atomic_read(&svc-> \
2609                                   srv_n_difficult_replies) == 0,
2610                                   &lwi);
2611                 if (rc == 0)
2612                         break;
2613                 CWARN("Unexpectedly long timeout %p\n", svc);
2614         }
2615 }
2616
2617 int ptlrpc_unregister_service(struct ptlrpc_service *service)
2618 {
2619         int                   rc;
2620         struct l_wait_info    lwi;
2621         cfs_list_t           *tmp;
2622         struct ptlrpc_reply_state *rs, *t;
2623         struct ptlrpc_at_array *array = &service->srv_at_array;
2624         ENTRY;
2625
2626         service->srv_is_stopping = 1;
2627         cfs_timer_disarm(&service->srv_at_timer);
2628
2629         ptlrpc_stop_all_threads(service);
2630         LASSERT(cfs_list_empty(&service->srv_threads));
2631
2632         cfs_spin_lock (&ptlrpc_all_services_lock);
2633         cfs_list_del_init (&service->srv_list);
2634         cfs_spin_unlock (&ptlrpc_all_services_lock);
2635
2636         ptlrpc_lprocfs_unregister_service(service);
2637
2638         /* All history will be culled when the next request buffer is
2639          * freed */
2640         service->srv_max_history_rqbds = 0;
2641
2642         CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
2643
2644         rc = LNetClearLazyPortal(service->srv_req_portal);
2645         LASSERT (rc == 0);
2646
2647         /* Unlink all the request buffers.  This forces a 'final' event with
2648          * its 'unlink' flag set for each posted rqbd */
2649         cfs_list_for_each(tmp, &service->srv_active_rqbds) {
2650                 struct ptlrpc_request_buffer_desc *rqbd =
2651                         cfs_list_entry(tmp, struct ptlrpc_request_buffer_desc,
2652                                        rqbd_list);
2653
2654                 rc = LNetMDUnlink(rqbd->rqbd_md_h);
2655                 LASSERT (rc == 0 || rc == -ENOENT);
2656         }
2657
2658         /* Wait for the network to release any buffers it's currently
2659          * filling */
2660         for (;;) {
2661                 cfs_spin_lock(&service->srv_lock);
2662                 rc = service->srv_nrqbd_receiving;
2663                 cfs_spin_unlock(&service->srv_lock);
2664
2665                 if (rc == 0)
2666                         break;
2667
2668                 /* Network access will complete in finite time but the HUGE
2669                  * timeout lets us CWARN for visibility of sluggish NALs */
2670                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
2671                                            cfs_time_seconds(1), NULL, NULL);
2672                 rc = l_wait_event(service->srv_waitq,
2673                                   service->srv_nrqbd_receiving == 0,
2674                                   &lwi);
2675                 if (rc == -ETIMEDOUT)
2676                         CWARN("Service %s waiting for request buffers\n",
2677                               service->srv_name);
2678         }
2679
2680         /* schedule all outstanding replies to terminate them */
2681         cfs_spin_lock(&service->srv_rs_lock);
2682         while (!cfs_list_empty(&service->srv_active_replies)) {
2683                 struct ptlrpc_reply_state *rs =
2684                         cfs_list_entry(service->srv_active_replies.next,
2685                                        struct ptlrpc_reply_state, rs_list);
2686                 cfs_spin_lock(&rs->rs_lock);
2687                 ptlrpc_schedule_difficult_reply(rs);
2688                 cfs_spin_unlock(&rs->rs_lock);
2689         }
2690         cfs_spin_unlock(&service->srv_rs_lock);
2691
2692         /* purge the request queue.  NB No new replies (rqbds all unlinked)
2693          * and no service threads, so I'm the only thread noodling the
2694          * request queue now */
2695         while (!cfs_list_empty(&service->srv_req_in_queue)) {
2696                 struct ptlrpc_request *req =
2697                         cfs_list_entry(service->srv_req_in_queue.next,
2698                                        struct ptlrpc_request,
2699                                        rq_list);
2700
2701                 cfs_list_del(&req->rq_list);
2702                 service->srv_n_queued_reqs--;
2703                 service->srv_n_active_reqs++;
2704                 ptlrpc_server_finish_request(service, req);
2705         }
2706         while (ptlrpc_server_request_pending(service, 1)) {
2707                 struct ptlrpc_request *req;
2708
2709                 req = ptlrpc_server_request_get(service, 1);
2710                 cfs_list_del(&req->rq_list);
2711                 service->srv_n_active_reqs++;
2712                 ptlrpc_server_finish_request(service, req);
2713         }
2714         LASSERT(service->srv_n_queued_reqs == 0);
2715         LASSERT(service->srv_n_active_reqs == 0);
2716         LASSERT(service->srv_n_history_rqbds == 0);
2717         LASSERT(cfs_list_empty(&service->srv_active_rqbds));
2718
2719         /* Now free all the request buffers since nothing references them
2720          * any more... */
2721         while (!cfs_list_empty(&service->srv_idle_rqbds)) {
2722                 struct ptlrpc_request_buffer_desc *rqbd =
2723                         cfs_list_entry(service->srv_idle_rqbds.next,
2724                                        struct ptlrpc_request_buffer_desc,
2725                                        rqbd_list);
2726
2727                 ptlrpc_free_rqbd(rqbd);
2728         }
2729
2730         ptlrpc_wait_replies(service);
2731
2732         cfs_list_for_each_entry_safe(rs, t, &service->srv_free_rs_list,
2733                                      rs_list) {
2734                 cfs_list_del(&rs->rs_list);
2735                 OBD_FREE_LARGE(rs, service->srv_max_reply_size);
2736         }
2737
2738         /* In case somebody rearmed this in the meantime */
2739         cfs_timer_disarm(&service->srv_at_timer);
2740
2741         if (array->paa_reqs_array != NULL) {
2742                 OBD_FREE(array->paa_reqs_array,
2743                          sizeof(cfs_list_t) * array->paa_size);
2744                 array->paa_reqs_array = NULL;
2745         }
2746
2747         if (array->paa_reqs_count != NULL) {
2748                 OBD_FREE(array->paa_reqs_count,
2749                          sizeof(__u32) * array->paa_size);
2750                 array->paa_reqs_count= NULL;
2751         }
2752
2753         OBD_FREE_PTR(service);
2754         RETURN(0);
2755 }
2756
2757 /**
2758  * Returns 0 if the service is healthy.
2759  *
2760  * Right now, it just checks to make sure that requests aren't languishing
2761  * in the queue.  We'll use this health check to govern whether a node needs
2762  * to be shot, so it's intentionally non-aggressive. */
2763 int ptlrpc_service_health_check(struct ptlrpc_service *svc)
2764 {
2765         struct ptlrpc_request *request;
2766         struct timeval         right_now;
2767         long                   timediff;
2768
2769         if (svc == NULL)
2770                 return 0;
2771
2772         cfs_gettimeofday(&right_now);
2773
2774         cfs_spin_lock(&svc->srv_rq_lock);
2775         if (!ptlrpc_server_request_pending(svc, 1)) {
2776                 cfs_spin_unlock(&svc->srv_rq_lock);
2777                 return 0;
2778         }
2779
2780         /* How long has the next entry been waiting? */
2781         if (cfs_list_empty(&svc->srv_request_queue))
2782                 request = cfs_list_entry(svc->srv_request_hpq.next,
2783                                          struct ptlrpc_request, rq_list);
2784         else
2785                 request = cfs_list_entry(svc->srv_request_queue.next,
2786                                          struct ptlrpc_request, rq_list);
2787         timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
2788         cfs_spin_unlock(&svc->srv_rq_lock);
2789
2790         if ((timediff / ONE_MILLION) > (AT_OFF ? obd_timeout * 3/2 :
2791                                         at_max)) {
2792                 CERROR("%s: unhealthy - request has been waiting %lds\n",
2793                        svc->srv_name, timediff / ONE_MILLION);
2794                 return (-1);
2795         }
2796
2797         return 0;
2798 }