Whamcloud - gitweb
branch: HEAD
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #ifndef __KERNEL__
39 #include <liblustre.h>
40 #endif
41 #include <obd_support.h>
42 #include <obd_class.h>
43 #include <lustre_net.h>
44 #include <lu_object.h>
45 #include <lnet/types.h>
46 #include "ptlrpc_internal.h"
47
48 /* The following are visible and mutable through /sys/module/ptlrpc */
49 int test_req_buffer_pressure = 0;
50 CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
51                 "set non-zero to put pressure on request buffer pools");
52 CFS_MODULE_PARM(at_min, "i", int, 0644,
53                 "Adaptive timeout minimum (sec)");
54 CFS_MODULE_PARM(at_max, "i", int, 0644,
55                 "Adaptive timeout maximum (sec)");
56 CFS_MODULE_PARM(at_history, "i", int, 0644,
57                 "Adaptive timeouts remember the slowest event that took place "
58                 "within this period (sec)");
59 CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
60                 "How soon before an RPC deadline to send an early reply");
61 CFS_MODULE_PARM(at_extra, "i", int, 0644,
62                 "How much extra time to give with each early reply");
63
64
65 /* forward ref */
66 static int ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc);
67
68 static CFS_LIST_HEAD(ptlrpc_all_services);
69 spinlock_t ptlrpc_all_services_lock;
70
71 static char *
72 ptlrpc_alloc_request_buffer (int size)
73 {
74         char *ptr;
75
76         if (size > SVC_BUF_VMALLOC_THRESHOLD)
77                 OBD_VMALLOC(ptr, size);
78         else
79                 OBD_ALLOC(ptr, size);
80
81         return (ptr);
82 }
83
84 static void
85 ptlrpc_free_request_buffer (char *ptr, int size)
86 {
87         if (size > SVC_BUF_VMALLOC_THRESHOLD)
88                 OBD_VFREE(ptr, size);
89         else
90                 OBD_FREE(ptr, size);
91 }
92
93 struct ptlrpc_request_buffer_desc *
94 ptlrpc_alloc_rqbd (struct ptlrpc_service *svc)
95 {
96         struct ptlrpc_request_buffer_desc *rqbd;
97
98         OBD_ALLOC_PTR(rqbd);
99         if (rqbd == NULL)
100                 return (NULL);
101
102         rqbd->rqbd_service = svc;
103         rqbd->rqbd_refcount = 0;
104         rqbd->rqbd_cbid.cbid_fn = request_in_callback;
105         rqbd->rqbd_cbid.cbid_arg = rqbd;
106         CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs);
107         rqbd->rqbd_buffer = ptlrpc_alloc_request_buffer(svc->srv_buf_size);
108
109         if (rqbd->rqbd_buffer == NULL) {
110                 OBD_FREE_PTR(rqbd);
111                 return (NULL);
112         }
113
114         spin_lock(&svc->srv_lock);
115         list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
116         svc->srv_nbufs++;
117         spin_unlock(&svc->srv_lock);
118
119         return (rqbd);
120 }
121
122 void
123 ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
124 {
125         struct ptlrpc_service *svc = rqbd->rqbd_service;
126
127         LASSERT (rqbd->rqbd_refcount == 0);
128         LASSERT (list_empty(&rqbd->rqbd_reqs));
129
130         spin_lock(&svc->srv_lock);
131         list_del(&rqbd->rqbd_list);
132         svc->srv_nbufs--;
133         spin_unlock(&svc->srv_lock);
134
135         ptlrpc_free_request_buffer (rqbd->rqbd_buffer, svc->srv_buf_size);
136         OBD_FREE_PTR(rqbd);
137 }
138
139 int
140 ptlrpc_grow_req_bufs(struct ptlrpc_service *svc)
141 {
142         struct ptlrpc_request_buffer_desc *rqbd;
143         int                                i;
144
145         CDEBUG(D_RPCTRACE, "%s: allocate %d new %d-byte reqbufs (%d/%d left)\n",
146                svc->srv_name, svc->srv_nbuf_per_group, svc->srv_buf_size,
147                svc->srv_nrqbd_receiving, svc->srv_nbufs);
148         for (i = 0; i < svc->srv_nbuf_per_group; i++) {
149                 rqbd = ptlrpc_alloc_rqbd(svc);
150
151                 if (rqbd == NULL) {
152                         CERROR ("%s: Can't allocate request buffer\n",
153                                 svc->srv_name);
154                         return (-ENOMEM);
155                 }
156
157                 if (ptlrpc_server_post_idle_rqbds(svc) < 0)
158                         return (-EAGAIN);
159         }
160
161         return (0);
162 }
163
164 void
165 ptlrpc_save_lock(struct ptlrpc_request *req,
166                  struct lustre_handle *lock, int mode, int no_ack)
167 {
168         struct ptlrpc_reply_state *rs = req->rq_reply_state;
169         int                        idx;
170
171         LASSERT(rs != NULL);
172         LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
173
174         if (req->rq_export->exp_disconnected) {
175                 ldlm_lock_decref(lock, mode);
176         } else {
177                 idx = rs->rs_nlocks++;
178                 rs->rs_locks[idx] = *lock;
179                 rs->rs_modes[idx] = mode;
180                 rs->rs_difficult = 1;
181                 rs->rs_no_ack = !!no_ack;
182         }
183 }
184
185 #ifdef __KERNEL__
186
187 #define HRT_RUNNING 0
188 #define HRT_STOPPING 1
189
190 struct ptlrpc_hr_thread {
191         spinlock_t        hrt_lock;
192         unsigned long     hrt_flags;
193         cfs_waitq_t       hrt_wait;
194         struct list_head  hrt_queue;
195         struct completion hrt_completion;
196 };
197
198 struct ptlrpc_hr_service {
199         int                     hr_index;
200         int                     hr_n_threads;
201         int                     hr_size;
202         struct ptlrpc_hr_thread hr_threads[0];
203 };
204
205 struct rs_batch {
206         struct list_head        rsb_replies;
207         struct ptlrpc_service  *rsb_svc;
208         unsigned int            rsb_n_replies;
209 };
210
211 /**
212  *  A pointer to per-node reply handling service.
213  */
214 static struct ptlrpc_hr_service *ptlrpc_hr = NULL;
215
216 /**
217  * maximum mumber of replies scheduled in one batch
218  */
219 #define MAX_SCHEDULED 256
220
221 /**
222  * Initialize a reply batch.
223  *
224  * \param b batch
225  */
226 static void rs_batch_init(struct rs_batch *b)
227 {
228         memset(b, 0, sizeof *b);
229         CFS_INIT_LIST_HEAD(&b->rsb_replies);
230 }
231
232 /**
233  * Dispatch all replies accumulated in the batch to one from
234  * dedicated reply handing threads.
235  *
236  * \param b batch
237  */
238 static void rs_batch_dispatch(struct rs_batch *b)
239 {
240         if (b->rsb_n_replies != 0) {
241                 struct ptlrpc_hr_service *hr = ptlrpc_hr;
242                 int idx;
243
244                 idx = hr->hr_index++;
245                 if (hr->hr_index >= hr->hr_n_threads)
246                         hr->hr_index = 0;
247
248                 spin_lock(&hr->hr_threads[idx].hrt_lock);
249                 list_splice_init(&b->rsb_replies,
250                                  &hr->hr_threads[idx].hrt_queue);
251                 spin_unlock(&hr->hr_threads[idx].hrt_lock);
252                 cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
253                 b->rsb_n_replies = 0;
254         }
255 }
256
257 /**
258  * Add a reply to a batch.
259  * Add one reply object to a batch, schedule batched replies if overload.
260  *
261  * \param b batch
262  * \param rs reply
263  */
264 static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
265 {
266         struct ptlrpc_service *svc = rs->rs_service;
267
268         if (svc != b->rsb_svc || b->rsb_n_replies >= MAX_SCHEDULED) {
269                 if (b->rsb_svc != NULL) {
270                         rs_batch_dispatch(b);
271                         spin_unlock(&b->rsb_svc->srv_lock);
272                 }
273                 spin_lock(&svc->srv_lock);
274                 b->rsb_svc = svc;
275         }
276         spin_lock(&rs->rs_lock);
277         rs->rs_scheduled_ever = 1;
278         if (rs->rs_scheduled == 0) {
279                 list_move(&rs->rs_list, &b->rsb_replies);
280                 rs->rs_scheduled = 1;
281                 b->rsb_n_replies++;
282         }
283         spin_unlock(&rs->rs_lock);
284 }
285
286 /**
287  * Reply batch finalization.
288  * Dispatch remaining replies from the batch
289  * and release remaining spinlock.
290  *
291  * \param b batch
292  */
293 static void rs_batch_fini(struct rs_batch *b)
294 {
295         if (b->rsb_svc != 0) {
296                 rs_batch_dispatch(b);
297                 spin_unlock(&b->rsb_svc->srv_lock);
298         }
299 }
300
301 #define DECLARE_RS_BATCH(b)     struct rs_batch b
302
303 #else /* __KERNEL__ */
304
305 #define rs_batch_init(b)        do{}while(0)
306 #define rs_batch_fini(b)        do{}while(0)
307 #define rs_batch_add(b, r)      ptlrpc_schedule_difficult_reply(r)
308 #define DECLARE_RS_BATCH(b)
309
310 #endif /* __KERNEL__ */
311
312 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
313 {
314 #ifdef __KERNEL__
315         struct ptlrpc_hr_service *hr = ptlrpc_hr;
316         int idx;
317         ENTRY;
318
319         LASSERT(list_empty(&rs->rs_list));
320
321         idx = hr->hr_index++;
322         if (hr->hr_index >= hr->hr_n_threads)
323                 hr->hr_index = 0;
324         spin_lock(&hr->hr_threads[idx].hrt_lock);
325         list_add_tail(&rs->rs_list, &hr->hr_threads[idx].hrt_queue);
326         spin_unlock(&hr->hr_threads[idx].hrt_lock);
327         cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
328         EXIT;
329 #else
330         list_add_tail(&rs->rs_list, &rs->rs_service->srv_reply_queue);
331 #endif
332 }
333
334 void
335 ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs)
336 {
337         ENTRY;
338
339         LASSERT_SPIN_LOCKED(&rs->rs_service->srv_lock);
340         LASSERT_SPIN_LOCKED(&rs->rs_lock);
341         LASSERT (rs->rs_difficult);
342         rs->rs_scheduled_ever = 1;              /* flag any notification attempt */
343
344         if (rs->rs_scheduled) {                  /* being set up or already notified */
345                 EXIT;
346                 return;
347         }
348
349         rs->rs_scheduled = 1;
350         list_del_init(&rs->rs_list);
351         ptlrpc_dispatch_difficult_reply(rs);
352         EXIT;
353 }
354
355 void ptlrpc_commit_replies(struct obd_export *exp)
356 {
357         struct ptlrpc_reply_state *rs, *nxt;
358         DECLARE_RS_BATCH(batch);
359         ENTRY;
360
361         rs_batch_init(&batch);
362         /* Find any replies that have been committed and get their service
363          * to attend to complete them. */
364
365         /* CAVEAT EMPTOR: spinlock ordering!!! */
366         spin_lock(&exp->exp_uncommitted_replies_lock);
367         list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
368                                  rs_obd_list) {
369                 LASSERT (rs->rs_difficult);
370                 /* VBR: per-export last_committed */
371                 LASSERT(rs->rs_export);
372                 if (rs->rs_transno <= exp->exp_last_committed) {
373                         list_del_init(&rs->rs_obd_list);
374                         rs_batch_add(&batch, rs);
375                 }
376         }
377         spin_unlock(&exp->exp_uncommitted_replies_lock);
378         rs_batch_fini(&batch);
379         EXIT;
380 }
381
382 static int
383 ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc)
384 {
385         struct ptlrpc_request_buffer_desc *rqbd;
386         int                                rc;
387         int                                posted = 0;
388
389         for (;;) {
390                 spin_lock(&svc->srv_lock);
391
392                 if (list_empty (&svc->srv_idle_rqbds)) {
393                         spin_unlock(&svc->srv_lock);
394                         return (posted);
395                 }
396
397                 rqbd = list_entry(svc->srv_idle_rqbds.next,
398                                   struct ptlrpc_request_buffer_desc,
399                                   rqbd_list);
400                 list_del (&rqbd->rqbd_list);
401
402                 /* assume we will post successfully */
403                 svc->srv_nrqbd_receiving++;
404                 list_add (&rqbd->rqbd_list, &svc->srv_active_rqbds);
405
406                 spin_unlock(&svc->srv_lock);
407
408                 rc = ptlrpc_register_rqbd(rqbd);
409                 if (rc != 0)
410                         break;
411
412                 posted = 1;
413         }
414
415         spin_lock(&svc->srv_lock);
416
417         svc->srv_nrqbd_receiving--;
418         list_del(&rqbd->rqbd_list);
419         list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
420
421         /* Don't complain if no request buffers are posted right now; LNET
422          * won't drop requests because we set the portal lazy! */
423
424         spin_unlock(&svc->srv_lock);
425
426         return (-1);
427 }
428
429 struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
430                                             svc_handler_t h, char *name,
431                                             struct proc_dir_entry *proc_entry,
432                                             svcreq_printfn_t prntfn,
433                                             char *threadname)
434 {
435         return ptlrpc_init_svc(c->psc_nbufs, c->psc_bufsize,
436                                c->psc_max_req_size, c->psc_max_reply_size,
437                                c->psc_req_portal, c->psc_rep_portal,
438                                c->psc_watchdog_factor,
439                                h, name, proc_entry,
440                                prntfn, c->psc_min_threads, c->psc_max_threads,
441                                threadname, c->psc_ctx_tags, NULL);
442 }
443 EXPORT_SYMBOL(ptlrpc_init_svc_conf);
444
445 static void ptlrpc_at_timer(unsigned long castmeharder)
446 {
447         struct ptlrpc_service *svc = (struct ptlrpc_service *)castmeharder;
448         svc->srv_at_check = 1;
449         svc->srv_at_checktime = cfs_time_current();
450         cfs_waitq_signal(&svc->srv_waitq);
451 }
452
453 /* @threadname should be 11 characters or less - 3 will be added on */
454 struct ptlrpc_service *
455 ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size,
456                 int req_portal, int rep_portal, int watchdog_factor,
457                 svc_handler_t handler, char *name,
458                 cfs_proc_dir_entry_t *proc_entry,
459                 svcreq_printfn_t svcreq_printfn,
460                 int min_threads, int max_threads,
461                 char *threadname, __u32 ctx_tags,
462                 svc_hpreq_handler_t hp_handler)
463 {
464         int                     rc;
465         struct ptlrpc_at_array *array;
466         struct ptlrpc_service  *service;
467         unsigned int            size, index;
468         ENTRY;
469
470         LASSERT (nbufs > 0);
471         LASSERT (bufsize >= max_req_size + SPTLRPC_MAX_PAYLOAD);
472         LASSERT (ctx_tags != 0);
473
474         OBD_ALLOC_PTR(service);
475         if (service == NULL)
476                 RETURN(NULL);
477
478         /* First initialise enough for early teardown */
479
480         service->srv_name = name;
481         spin_lock_init(&service->srv_lock);
482         CFS_INIT_LIST_HEAD(&service->srv_threads);
483         cfs_waitq_init(&service->srv_waitq);
484
485         service->srv_nbuf_per_group = test_req_buffer_pressure ? 1 : nbufs;
486         service->srv_max_req_size = max_req_size + SPTLRPC_MAX_PAYLOAD;
487         service->srv_buf_size = bufsize;
488         service->srv_rep_portal = rep_portal;
489         service->srv_req_portal = req_portal;
490         service->srv_watchdog_factor = watchdog_factor;
491         service->srv_handler = handler;
492         service->srv_request_history_print_fn = svcreq_printfn;
493         service->srv_request_seq = 1;           /* valid seq #s start at 1 */
494         service->srv_request_max_cull_seq = 0;
495         service->srv_threads_min = min_threads;
496         service->srv_threads_max = max_threads;
497         service->srv_thread_name = threadname;
498         service->srv_ctx_tags = ctx_tags;
499         service->srv_hpreq_handler = hp_handler;
500         service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO;
501         service->srv_hpreq_count = 0;
502         service->srv_n_hpreq = 0;
503
504         rc = LNetSetLazyPortal(service->srv_req_portal);
505         LASSERT (rc == 0);
506
507         CFS_INIT_LIST_HEAD(&service->srv_request_queue);
508         CFS_INIT_LIST_HEAD(&service->srv_request_hpq);
509         CFS_INIT_LIST_HEAD(&service->srv_idle_rqbds);
510         CFS_INIT_LIST_HEAD(&service->srv_active_rqbds);
511         CFS_INIT_LIST_HEAD(&service->srv_history_rqbds);
512         CFS_INIT_LIST_HEAD(&service->srv_request_history);
513         CFS_INIT_LIST_HEAD(&service->srv_active_replies);
514 #ifndef __KERNEL__
515         CFS_INIT_LIST_HEAD(&service->srv_reply_queue);
516 #endif
517         CFS_INIT_LIST_HEAD(&service->srv_free_rs_list);
518         cfs_waitq_init(&service->srv_free_rs_waitq);
519         atomic_set(&service->srv_n_difficult_replies, 0);
520
521         spin_lock_init(&service->srv_at_lock);
522         CFS_INIT_LIST_HEAD(&service->srv_req_in_queue);
523
524         array = &service->srv_at_array;
525         size = at_est2timeout(at_max);
526         array->paa_size = size;
527         array->paa_count = 0;
528         array->paa_deadline = -1;
529
530         /* allocate memory for srv_at_array (ptlrpc_at_array) */
531         OBD_ALLOC(array->paa_reqs_array, sizeof(struct list_head) * size);
532         if (array->paa_reqs_array == NULL)
533                 GOTO(failed, NULL);
534
535         for (index = 0; index < size; index++)
536                 CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
537
538         OBD_ALLOC(array->paa_reqs_count, sizeof(__u32) * size);
539         if (array->paa_reqs_count == NULL)
540                 GOTO(failed, NULL);
541
542         cfs_timer_init(&service->srv_at_timer, ptlrpc_at_timer, service);
543         /* At SOW, service time should be quick; 10s seems generous. If client
544            timeout is less than this, we'll be sending an early reply. */
545         at_init(&service->srv_at_estimate, 10, 0);
546
547         spin_lock (&ptlrpc_all_services_lock);
548         list_add (&service->srv_list, &ptlrpc_all_services);
549         spin_unlock (&ptlrpc_all_services_lock);
550
551         /* Now allocate the request buffers */
552         rc = ptlrpc_grow_req_bufs(service);
553         /* We shouldn't be under memory pressure at startup, so
554          * fail if we can't post all our buffers at this time. */
555         if (rc != 0)
556                 GOTO(failed, NULL);
557
558         /* Now allocate pool of reply buffers */
559         /* Increase max reply size to next power of two */
560         service->srv_max_reply_size = 1;
561         while (service->srv_max_reply_size <
562                max_reply_size + SPTLRPC_MAX_PAYLOAD)
563                 service->srv_max_reply_size <<= 1;
564
565         if (proc_entry != NULL)
566                 ptlrpc_lprocfs_register_service(proc_entry, service);
567
568         CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
569                service->srv_name, service->srv_req_portal);
570
571         RETURN(service);
572 failed:
573         ptlrpc_unregister_service(service);
574         return NULL;
575 }
576
577 /**
578  * to actually free the request, must be called without holding svc_lock.
579  * note it's caller's responsibility to unlink req->rq_list.
580  */
581 static void ptlrpc_server_free_request(struct ptlrpc_request *req)
582 {
583         LASSERT(atomic_read(&req->rq_refcount) == 0);
584         LASSERT(list_empty(&req->rq_timed_list));
585
586          /* DEBUG_REQ() assumes the reply state of a request with a valid
587           * ref will not be destroyed until that reference is dropped. */
588         ptlrpc_req_drop_rs(req);
589
590         sptlrpc_svc_ctx_decref(req);
591
592         if (req != &req->rq_rqbd->rqbd_req) {
593                 /* NB request buffers use an embedded
594                  * req if the incoming req unlinked the
595                  * MD; this isn't one of them! */
596                 OBD_FREE(req, sizeof(*req));
597         }
598 }
599
600 /**
601  * drop a reference count of the request. if it reaches 0, we either
602  * put it into history list, or free it immediately.
603  */
604 static void ptlrpc_server_drop_request(struct ptlrpc_request *req)
605 {
606         struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
607         struct ptlrpc_service             *svc = rqbd->rqbd_service;
608         int                                refcount;
609         struct list_head                  *tmp;
610         struct list_head                  *nxt;
611
612         if (!atomic_dec_and_test(&req->rq_refcount))
613                 return;
614
615         spin_lock(&svc->srv_lock);
616
617         svc->srv_n_active_reqs--;
618         list_add(&req->rq_list, &rqbd->rqbd_reqs);
619
620         refcount = --(rqbd->rqbd_refcount);
621         if (refcount == 0) {
622                 /* request buffer is now idle: add to history */
623                 list_del(&rqbd->rqbd_list);
624                 list_add_tail(&rqbd->rqbd_list, &svc->srv_history_rqbds);
625                 svc->srv_n_history_rqbds++;
626
627                 /* cull some history?
628                  * I expect only about 1 or 2 rqbds need to be recycled here */
629                 while (svc->srv_n_history_rqbds > svc->srv_max_history_rqbds) {
630                         rqbd = list_entry(svc->srv_history_rqbds.next,
631                                           struct ptlrpc_request_buffer_desc,
632                                           rqbd_list);
633
634                         list_del(&rqbd->rqbd_list);
635                         svc->srv_n_history_rqbds--;
636
637                         /* remove rqbd's reqs from svc's req history while
638                          * I've got the service lock */
639                         list_for_each(tmp, &rqbd->rqbd_reqs) {
640                                 req = list_entry(tmp, struct ptlrpc_request,
641                                                  rq_list);
642                                 /* Track the highest culled req seq */
643                                 if (req->rq_history_seq >
644                                     svc->srv_request_max_cull_seq)
645                                         svc->srv_request_max_cull_seq =
646                                                 req->rq_history_seq;
647                                 list_del(&req->rq_history_list);
648                         }
649
650                         spin_unlock(&svc->srv_lock);
651
652                         list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
653                                 req = list_entry(rqbd->rqbd_reqs.next,
654                                                  struct ptlrpc_request,
655                                                  rq_list);
656                                 list_del(&req->rq_list);
657                                 ptlrpc_server_free_request(req);
658                         }
659
660                         spin_lock(&svc->srv_lock);
661                         /*
662                          * now all reqs including the embedded req has been
663                          * disposed, schedule request buffer for re-use.
664                          */
665                         LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0);
666                         list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
667                 }
668
669                 spin_unlock(&svc->srv_lock);
670         } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
671                 /* If we are low on memory, we are not interested in history */
672                 list_del(&req->rq_list);
673                 list_del_init(&req->rq_history_list);
674                 spin_unlock(&svc->srv_lock);
675
676                 ptlrpc_server_free_request(req);
677         } else {
678                 spin_unlock(&svc->srv_lock);
679         }
680 }
681
682 /**
683  * to finish a request: stop sending more early replies, and release
684  * the request. should be called after we finished handling the request.
685  */
686 static void ptlrpc_server_finish_request(struct ptlrpc_request *req)
687 {
688         struct ptlrpc_service  *svc = req->rq_rqbd->rqbd_service;
689
690         if (req->rq_export) {
691                 class_export_put(req->rq_export);
692                 req->rq_export = NULL;
693         }
694
695         if (req->rq_phase != RQ_PHASE_NEW) /* incorrect message magic */
696                 DEBUG_REQ(D_INFO, req, "free req");
697
698         spin_lock(&svc->srv_at_lock);
699         req->rq_sent_final = 1;
700         list_del_init(&req->rq_timed_list);
701         if (req->rq_at_linked) {
702                 struct ptlrpc_at_array *array = &svc->srv_at_array;
703                 __u32 index = req->rq_at_index;
704
705                 req->rq_at_linked = 0;
706                 array->paa_reqs_count[index]--;
707                 array->paa_count--;
708         }
709         spin_unlock(&svc->srv_at_lock);
710
711         ptlrpc_server_drop_request(req);
712 }
713
714 /* This function makes sure dead exports are evicted in a timely manner.
715    This function is only called when some export receives a message (i.e.,
716    the network is up.) */
717 static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
718 {
719         struct obd_export *oldest_exp;
720         time_t oldest_time, new_time;
721
722         ENTRY;
723
724         LASSERT(exp);
725
726         /* Compensate for slow machines, etc, by faking our request time
727            into the future.  Although this can break the strict time-ordering
728            of the list, we can be really lazy here - we don't have to evict
729            at the exact right moment.  Eventually, all silent exports
730            will make it to the top of the list. */
731
732         /* Do not pay attention on 1sec or smaller renewals. */
733         new_time = cfs_time_current_sec() + extra_delay;
734         if (exp->exp_last_request_time + 1 /*second */ >= new_time)
735                 RETURN_EXIT;
736
737         exp->exp_last_request_time = new_time;
738         CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n",
739                exp->exp_client_uuid.uuid,
740                exp->exp_last_request_time, exp);
741
742         /* exports may get disconnected from the chain even though the
743            export has references, so we must keep the spin lock while
744            manipulating the lists */
745         spin_lock(&exp->exp_obd->obd_dev_lock);
746
747         if (list_empty(&exp->exp_obd_chain_timed)) {
748                 /* this one is not timed */
749                 spin_unlock(&exp->exp_obd->obd_dev_lock);
750                 RETURN_EXIT;
751         }
752
753         list_move_tail(&exp->exp_obd_chain_timed,
754                        &exp->exp_obd->obd_exports_timed);
755
756         oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next,
757                                 struct obd_export, exp_obd_chain_timed);
758         oldest_time = oldest_exp->exp_last_request_time;
759         spin_unlock(&exp->exp_obd->obd_dev_lock);
760
761         if (exp->exp_obd->obd_recovering) {
762                 /* be nice to everyone during recovery */
763                 EXIT;
764                 return;
765         }
766
767         /* Note - racing to start/reset the obd_eviction timer is safe */
768         if (exp->exp_obd->obd_eviction_timer == 0) {
769                 /* Check if the oldest entry is expired. */
770                 if (cfs_time_current_sec() > (oldest_time + PING_EVICT_TIMEOUT +
771                                               extra_delay)) {
772                         /* We need a second timer, in case the net was down and
773                          * it just came back. Since the pinger may skip every
774                          * other PING_INTERVAL (see note in ptlrpc_pinger_main),
775                          * we better wait for 3. */
776                         exp->exp_obd->obd_eviction_timer =
777                                 cfs_time_current_sec() + 3 * PING_INTERVAL;
778                         CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
779                                exp->exp_obd->obd_name, obd_export_nid2str(exp),
780                                oldest_time);
781                 }
782         } else {
783                 if (cfs_time_current_sec() >
784                     (exp->exp_obd->obd_eviction_timer + extra_delay)) {
785                         /* The evictor won't evict anyone who we've heard from
786                          * recently, so we don't have to check before we start
787                          * it. */
788                         if (!ping_evictor_wake(exp))
789                                 exp->exp_obd->obd_eviction_timer = 0;
790                 }
791         }
792
793         EXIT;
794 }
795
796 static int ptlrpc_check_req(struct ptlrpc_request *req)
797 {
798         if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
799                      req->rq_export->exp_conn_cnt)) {
800                 DEBUG_REQ(D_ERROR, req,
801                           "DROPPING req from old connection %d < %d",
802                           lustre_msg_get_conn_cnt(req->rq_reqmsg),
803                           req->rq_export->exp_conn_cnt);
804                 return -EEXIST;
805         }
806         if (unlikely(req->rq_export->exp_obd &&
807                      req->rq_export->exp_obd->obd_fail)) {
808              /* Failing over, don't handle any more reqs, send
809                 error response instead. */
810                 CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
811                        req, req->rq_export->exp_obd->obd_name);
812                 req->rq_status = -ENODEV;
813                 ptlrpc_error(req);
814                 return -ENODEV;
815         }
816
817         return 0;
818 }
819
820 static void ptlrpc_at_set_timer(struct ptlrpc_service *svc)
821 {
822         struct ptlrpc_at_array *array = &svc->srv_at_array;
823         __s32 next;
824
825         spin_lock(&svc->srv_at_lock);
826         if (array->paa_count == 0) {
827                 cfs_timer_disarm(&svc->srv_at_timer);
828                 spin_unlock(&svc->srv_at_lock);
829                 return;
830         }
831
832         /* Set timer for closest deadline */
833         next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
834                        at_early_margin);
835         if (next <= 0)
836                 ptlrpc_at_timer((unsigned long)svc);
837         else
838                 cfs_timer_arm(&svc->srv_at_timer, cfs_time_shift(next));
839         spin_unlock(&svc->srv_at_lock);
840         CDEBUG(D_INFO, "armed %s at %+ds\n", svc->srv_name, next);
841 }
842
843 /* Add rpc to early reply check list */
844 static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
845 {
846         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
847         struct ptlrpc_request *rq = NULL;
848         struct ptlrpc_at_array *array = &svc->srv_at_array;
849         __u32 index;
850         int found = 0;
851
852         if (AT_OFF)
853                 return(0);
854
855         if (req->rq_no_reply)
856                 return 0;
857
858         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
859                 return(-ENOSYS);
860
861         spin_lock(&svc->srv_at_lock);
862
863         if (unlikely(req->rq_sent_final)) {
864                 spin_unlock(&svc->srv_at_lock);
865                 return 0;
866         }
867
868         LASSERT(list_empty(&req->rq_timed_list));
869
870         index = (unsigned long)req->rq_deadline % array->paa_size;
871         if (array->paa_reqs_count[index] > 0) {
872                 /* latest rpcs will have the latest deadlines in the list,
873                  * so search backward. */
874                 list_for_each_entry_reverse(rq, &array->paa_reqs_array[index],
875                                             rq_timed_list) {
876                         if (req->rq_deadline >= rq->rq_deadline) {
877                                 list_add(&req->rq_timed_list,
878                                          &rq->rq_timed_list);
879                                 break;
880                         }
881                 }
882         }
883
884         /* Add the request at the head of the list */
885         if (list_empty(&req->rq_timed_list))
886                 list_add(&req->rq_timed_list, &array->paa_reqs_array[index]);
887
888         req->rq_at_linked = 1;
889         req->rq_at_index = index;
890         array->paa_reqs_count[index]++;
891         array->paa_count++;
892         if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
893                 array->paa_deadline = req->rq_deadline;
894                 found = 1;
895         }
896         spin_unlock(&svc->srv_at_lock);
897
898         if (found)
899                 ptlrpc_at_set_timer(svc);
900
901         return 0;
902 }
903
904 static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req,
905                                       int extra_time)
906 {
907         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
908         struct ptlrpc_request *reqcopy;
909         struct lustre_msg *reqmsg;
910         cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
911         time_t newdl;
912         int rc;
913         ENTRY;
914
915         /* deadline is when the client expects us to reply, margin is the
916            difference between clients' and servers' expectations */
917         DEBUG_REQ(D_ADAPTTO, req,
918                   "%ssending early reply (deadline %+lds, margin %+lds) for "
919                   "%d+%d", AT_OFF ? "AT off - not " : "",
920                   olddl, olddl - at_get(&svc->srv_at_estimate),
921                   at_get(&svc->srv_at_estimate), extra_time);
922
923         if (AT_OFF)
924                 RETURN(0);
925
926         if (olddl < 0) {
927                 DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
928                           "not sending early reply. Consider increasing "
929                           "at_early_margin (%d)?", olddl, at_early_margin);
930
931                 /* Return an error so we're not re-added to the timed list. */
932                 RETURN(-ETIMEDOUT);
933         }
934
935         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
936                 DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
937                           "but no AT support");
938                 RETURN(-ENOSYS);
939         }
940
941         if (req->rq_export && req->rq_export->exp_in_recovery) {
942                 /* don't increase server estimates during recovery, and give
943                    clients the full recovery time. */
944                 newdl = cfs_time_current_sec() +
945                         req->rq_export->exp_obd->obd_recovery_timeout;
946         } else {
947                 if (extra_time) {
948                         /* Fake our processing time into the future to ask the
949                            clients for some extra amount of time */
950                         extra_time += cfs_time_current_sec() -
951                                 req->rq_arrival_time.tv_sec;
952                         at_add(&svc->srv_at_estimate, extra_time);
953                 }
954                 newdl = req->rq_arrival_time.tv_sec +
955                         at_get(&svc->srv_at_estimate);
956         }
957         if (req->rq_deadline >= newdl) {
958                 /* We're not adding any time, no need to send an early reply
959                    (e.g. maybe at adaptive_max) */
960                 DEBUG_REQ(D_WARNING, req, "Couldn't add any time ("
961                           CFS_DURATION_T"/"CFS_DURATION_T"), "
962                           "not sending early reply\n", olddl,
963                           cfs_time_sub(newdl, cfs_time_current_sec()));
964                 RETURN(-ETIMEDOUT);
965         }
966
967         OBD_ALLOC(reqcopy, sizeof *reqcopy);
968         if (reqcopy == NULL)
969                 RETURN(-ENOMEM);
970         OBD_ALLOC(reqmsg, req->rq_reqlen);
971         if (!reqmsg) {
972                 OBD_FREE(reqcopy, sizeof *reqcopy);
973                 RETURN(-ENOMEM);
974         }
975
976         *reqcopy = *req;
977         reqcopy->rq_reply_state = NULL;
978         reqcopy->rq_rep_swab_mask = 0;
979         reqcopy->rq_pack_bulk = 0;
980         reqcopy->rq_pack_udesc = 0;
981         reqcopy->rq_packed_final = 0;
982         sptlrpc_svc_ctx_addref(reqcopy);
983         /* We only need the reqmsg for the magic */
984         reqcopy->rq_reqmsg = reqmsg;
985         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
986
987         if (req->rq_sent_final) {
988                 DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
989                           "abort sending early reply\n");
990                 GOTO(out, rc = 0);
991         }
992
993         /* Connection ref */
994         reqcopy->rq_export = class_conn2export(
995                                      lustre_msg_get_handle(reqcopy->rq_reqmsg));
996         if (reqcopy->rq_export == NULL)
997                 GOTO(out, rc = -ENODEV);
998
999         /* RPC ref */
1000         class_export_rpc_get(reqcopy->rq_export);
1001         if (reqcopy->rq_export->exp_obd &&
1002             reqcopy->rq_export->exp_obd->obd_fail)
1003                 GOTO(out_put, rc = -ENODEV);
1004
1005         rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
1006         if (rc)
1007                 GOTO(out_put, rc);
1008
1009         rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
1010
1011         if (!rc) {
1012                 /* Adjust our own deadline to what we told the client */
1013                 req->rq_deadline = newdl;
1014                 req->rq_early_count++; /* number sent, server side */
1015         } else {
1016                 DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
1017         }
1018
1019         /* Free the (early) reply state from lustre_pack_reply.
1020            (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
1021         ptlrpc_req_drop_rs(reqcopy);
1022
1023 out_put:
1024         class_export_rpc_put(reqcopy->rq_export);
1025         class_export_put(reqcopy->rq_export);
1026 out:
1027         sptlrpc_svc_ctx_decref(reqcopy);
1028         OBD_FREE(reqmsg, req->rq_reqlen);
1029         OBD_FREE(reqcopy, sizeof *reqcopy);
1030         RETURN(rc);
1031 }
1032
1033 /* Send early replies to everybody expiring within at_early_margin
1034    asking for at_extra time */
1035 static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
1036 {
1037         struct ptlrpc_request *rq, *n;
1038         struct list_head work_list;
1039         struct ptlrpc_at_array *array = &svc->srv_at_array;
1040         __u32  index, count;
1041         time_t deadline;
1042         time_t now = cfs_time_current_sec();
1043         cfs_duration_t delay;
1044         int first, counter = 0;
1045         ENTRY;
1046
1047         spin_lock(&svc->srv_at_lock);
1048         if (svc->srv_at_check == 0) {
1049                 spin_unlock(&svc->srv_at_lock);
1050                 RETURN(0);
1051         }
1052         delay = cfs_time_sub(cfs_time_current(), svc->srv_at_checktime);
1053         svc->srv_at_check = 0;
1054
1055         if (array->paa_count == 0) {
1056                 spin_unlock(&svc->srv_at_lock);
1057                 RETURN(0);
1058         }
1059
1060         /* The timer went off, but maybe the nearest rpc already completed. */
1061         first = array->paa_deadline - now;
1062         if (first > at_early_margin) {
1063                 /* We've still got plenty of time.  Reset the timer. */
1064                 spin_unlock(&svc->srv_at_lock);
1065                 ptlrpc_at_set_timer(svc);
1066                 RETURN(0);
1067         }
1068
1069         /* We're close to a timeout, and we don't know how much longer the
1070            server will take. Send early replies to everyone expiring soon. */
1071         CFS_INIT_LIST_HEAD(&work_list);
1072         deadline = -1;
1073         index = (unsigned long)array->paa_deadline % array->paa_size;
1074         count = array->paa_count;
1075         while (count > 0) {
1076                 count -= array->paa_reqs_count[index];
1077                 list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index],
1078                                          rq_timed_list) {
1079                         if (rq->rq_deadline <= now + at_early_margin) {
1080                                 list_move(&rq->rq_timed_list, &work_list);
1081                                 counter++;
1082                                 array->paa_reqs_count[index]--;
1083                                 array->paa_count--;
1084                                 rq->rq_at_linked = 0;
1085                                 continue;
1086                         }
1087
1088                         /* update the earliest deadline */
1089                         if (deadline == -1 || rq->rq_deadline < deadline)
1090                                 deadline = rq->rq_deadline;
1091
1092                         break;
1093                 }
1094
1095                 if (++index >= array->paa_size)
1096                         index = 0;
1097         }
1098         array->paa_deadline = deadline;
1099         spin_unlock(&svc->srv_at_lock);
1100
1101         /* we have a new earliest deadline, restart the timer */
1102         ptlrpc_at_set_timer(svc);
1103
1104         CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
1105                "replies\n", first, at_extra, counter);
1106         if (first < 0) {
1107                 /* We're already past request deadlines before we even get a
1108                    chance to send early replies */
1109                 LCONSOLE_WARN("%s: This server is not able to keep up with "
1110                               "request traffic (cpu-bound).\n", svc->srv_name);
1111                 CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
1112                       "delay="CFS_DURATION_T"(jiff)\n",
1113                       counter, svc->srv_n_queued_reqs, svc->srv_n_active_reqs,
1114                       at_get(&svc->srv_at_estimate), delay);
1115         }
1116
1117         /* ptlrpc_server_finish_request may delete an entry out of
1118          * the work list */
1119         spin_lock(&svc->srv_at_lock);
1120         while (!list_empty(&work_list)) {
1121                 rq = list_entry(work_list.next, struct ptlrpc_request,
1122                                 rq_timed_list);
1123                 list_del_init(&rq->rq_timed_list);
1124                 /* if the entry is still in the worklist, it hasn't been
1125                    deleted, and is safe to take a ref to keep the req around */
1126                 atomic_inc(&rq->rq_refcount);
1127                 spin_unlock(&svc->srv_at_lock);
1128
1129                 if (ptlrpc_at_send_early_reply(rq, at_extra) == 0)
1130                         ptlrpc_at_add_timed(rq);
1131
1132                 ptlrpc_server_drop_request(rq);
1133                 spin_lock(&svc->srv_at_lock);
1134         }
1135         spin_unlock(&svc->srv_at_lock);
1136
1137         RETURN(0);
1138 }
1139
1140 /**
1141  * Put the request to the export list if the request may become
1142  * a high priority one.
1143  */
1144 static int ptlrpc_hpreq_init(struct ptlrpc_service *svc,
1145                              struct ptlrpc_request *req)
1146 {
1147         int rc;
1148         ENTRY;
1149
1150         if (svc->srv_hpreq_handler) {
1151                 rc = svc->srv_hpreq_handler(req);
1152                 if (rc)
1153                         RETURN(rc);
1154         }
1155         if (req->rq_export && req->rq_ops) {
1156                 spin_lock(&req->rq_export->exp_lock);
1157                 list_add(&req->rq_exp_list, &req->rq_export->exp_queued_rpc);
1158                 spin_unlock(&req->rq_export->exp_lock);
1159         }
1160
1161         RETURN(0);
1162 }
1163
1164 /** Remove the request from the export list. */
1165 static void ptlrpc_hpreq_fini(struct ptlrpc_request *req)
1166 {
1167         ENTRY;
1168         if (req->rq_export && req->rq_ops) {
1169                 spin_lock(&req->rq_export->exp_lock);
1170                 list_del_init(&req->rq_exp_list);
1171                 spin_unlock(&req->rq_export->exp_lock);
1172         }
1173         EXIT;
1174 }
1175
1176 /**
1177  * Make the request a high priority one.
1178  *
1179  * All the high priority requests are queued in a separate FIFO
1180  * ptlrpc_service::srv_request_hpq list which is parallel to
1181  * ptlrpc_service::srv_request_queue list but has a higher priority
1182  * for handling.
1183  *
1184  * \see ptlrpc_server_handle_request().
1185  */
1186 static void ptlrpc_hpreq_reorder_nolock(struct ptlrpc_service *svc,
1187                                         struct ptlrpc_request *req)
1188 {
1189         ENTRY;
1190         LASSERT(svc != NULL);
1191         spin_lock(&req->rq_lock);
1192         if (req->rq_hp == 0) {
1193                 int opc = lustre_msg_get_opc(req->rq_reqmsg);
1194
1195                 /* Add to the high priority queue. */
1196                 list_move_tail(&req->rq_list, &svc->srv_request_hpq);
1197                 req->rq_hp = 1;
1198                 if (opc != OBD_PING)
1199                         DEBUG_REQ(D_NET, req, "high priority req");
1200         }
1201         spin_unlock(&req->rq_lock);
1202         EXIT;
1203 }
1204
1205 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req)
1206 {
1207         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
1208         ENTRY;
1209
1210         spin_lock(&svc->srv_lock);
1211         /* It may happen that the request is already taken for the processing
1212          * but still in the export list, do not re-add it into the HP list. */
1213         if (req->rq_phase == RQ_PHASE_NEW)
1214                 ptlrpc_hpreq_reorder_nolock(svc, req);
1215         spin_unlock(&svc->srv_lock);
1216         EXIT;
1217 }
1218
1219 /** Check if the request if a high priority one. */
1220 static int ptlrpc_server_hpreq_check(struct ptlrpc_request *req)
1221 {
1222         int opc, rc = 0;
1223         ENTRY;
1224
1225         /* Check by request opc. */
1226         opc = lustre_msg_get_opc(req->rq_reqmsg);
1227         if (opc == OBD_PING)
1228                 RETURN(1);
1229
1230         /* Perform request specific check. */
1231         if (req->rq_ops && req->rq_ops->hpreq_check)
1232                 rc = req->rq_ops->hpreq_check(req);
1233         RETURN(rc);
1234 }
1235
1236 /** Check if a request is a high priority one. */
1237 static int ptlrpc_server_request_add(struct ptlrpc_service *svc,
1238                                      struct ptlrpc_request *req)
1239 {
1240         int rc;
1241         ENTRY;
1242
1243         rc = ptlrpc_server_hpreq_check(req);
1244         if (rc < 0)
1245                 RETURN(rc);
1246
1247         spin_lock(&svc->srv_lock);
1248         /* Before inserting the request into the queue, check if it is not
1249          * inserted yet, or even already handled -- it may happen due to
1250          * a racing ldlm_server_blocking_ast(). */
1251         if (req->rq_phase == RQ_PHASE_NEW && list_empty(&req->rq_list)) {
1252                 if (rc)
1253                         ptlrpc_hpreq_reorder_nolock(svc, req);
1254                 else
1255                         list_add_tail(&req->rq_list, &svc->srv_request_queue);
1256         }
1257         spin_unlock(&svc->srv_lock);
1258
1259         RETURN(0);
1260 }
1261
1262 /* Only allow normal priority requests on a service that has a high-priority
1263  * queue if forced (i.e. cleanup), if there are other high priority requests
1264  * already being processed (i.e. those threads can service more high-priority
1265  * requests), or if there are enough idle threads that a later thread can do
1266  * a high priority request. */
1267 static int ptlrpc_server_allow_normal(struct ptlrpc_service *svc, int force)
1268 {
1269         return force || !svc->srv_hpreq_handler || svc->srv_n_hpreq > 0 ||
1270                svc->srv_n_active_reqs < svc->srv_threads_running - 2;
1271 }
1272
1273 static struct ptlrpc_request *
1274 ptlrpc_server_request_get(struct ptlrpc_service *svc, int force)
1275 {
1276         struct ptlrpc_request *req = NULL;
1277         ENTRY;
1278
1279         if (ptlrpc_server_allow_normal(svc, force) &&
1280             !list_empty(&svc->srv_request_queue) &&
1281             (list_empty(&svc->srv_request_hpq) ||
1282              svc->srv_hpreq_count >= svc->srv_hpreq_ratio)) {
1283                 req = list_entry(svc->srv_request_queue.next,
1284                                  struct ptlrpc_request, rq_list);
1285                 svc->srv_hpreq_count = 0;
1286         } else if (!list_empty(&svc->srv_request_hpq)) {
1287                 req = list_entry(svc->srv_request_hpq.next,
1288                                  struct ptlrpc_request, rq_list);
1289                 svc->srv_hpreq_count++;
1290         }
1291         RETURN(req);
1292 }
1293
1294 static int ptlrpc_server_request_pending(struct ptlrpc_service *svc, int force)
1295 {
1296         return ((ptlrpc_server_allow_normal(svc, force) &&
1297                  !list_empty(&svc->srv_request_queue)) ||
1298                 !list_empty(&svc->srv_request_hpq));
1299 }
1300
1301 /* Handle freshly incoming reqs, add to timed early reply list,
1302    pass on to regular request queue */
1303 static int
1304 ptlrpc_server_handle_req_in(struct ptlrpc_service *svc)
1305 {
1306         struct ptlrpc_request *req;
1307         __u32                  deadline;
1308         int                    rc;
1309         ENTRY;
1310
1311         LASSERT(svc);
1312
1313         spin_lock(&svc->srv_lock);
1314         if (list_empty(&svc->srv_req_in_queue)) {
1315                 spin_unlock(&svc->srv_lock);
1316                 RETURN(0);
1317         }
1318
1319         req = list_entry(svc->srv_req_in_queue.next,
1320                          struct ptlrpc_request, rq_list);
1321         list_del_init (&req->rq_list);
1322         /* Consider this still a "queued" request as far as stats are
1323            concerned */
1324         spin_unlock(&svc->srv_lock);
1325
1326         /* go through security check/transform */
1327         rc = sptlrpc_svc_unwrap_request(req);
1328         switch (rc) {
1329         case SECSVC_OK:
1330                 break;
1331         case SECSVC_COMPLETE:
1332                 target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
1333                 goto err_req;
1334         case SECSVC_DROP:
1335                 goto err_req;
1336         default:
1337                 LBUG();
1338         }
1339
1340         /* Clear request swab mask; this is a new request */
1341         req->rq_req_swab_mask = 0;
1342
1343         rc = lustre_unpack_msg(req->rq_reqmsg, req->rq_reqlen);
1344         if (rc != 0) {
1345                 CERROR("error unpacking request: ptl %d from %s x"LPU64"\n",
1346                        svc->srv_req_portal, libcfs_id2str(req->rq_peer),
1347                        req->rq_xid);
1348                 goto err_req;
1349         }
1350
1351         rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
1352         if (rc) {
1353                 CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
1354                         LPU64"\n", svc->srv_req_portal,
1355                         libcfs_id2str(req->rq_peer), req->rq_xid);
1356                 goto err_req;
1357         }
1358
1359         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
1360             lustre_msg_get_opc(req->rq_reqmsg) == obd_fail_val) {
1361                 CERROR("drop incoming rpc opc %u, x"LPU64"\n",
1362                        obd_fail_val, req->rq_xid);
1363                 goto err_req;
1364         }
1365
1366         rc = -EINVAL;
1367         if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
1368                 CERROR("wrong packet type received (type=%u) from %s\n",
1369                        lustre_msg_get_type(req->rq_reqmsg),
1370                        libcfs_id2str(req->rq_peer));
1371                 goto err_req;
1372         }
1373
1374         switch(lustre_msg_get_opc(req->rq_reqmsg)) {
1375         case MDS_WRITEPAGE:
1376         case OST_WRITE:
1377                 req->rq_bulk_write = 1;
1378                 break;
1379         case MDS_READPAGE:
1380         case OST_READ:
1381                 req->rq_bulk_read = 1;
1382                 break;
1383         }
1384
1385         CDEBUG(D_NET, "got req "LPU64"\n", req->rq_xid);
1386
1387         req->rq_export = class_conn2export(
1388                 lustre_msg_get_handle(req->rq_reqmsg));
1389         if (req->rq_export) {
1390                 rc = ptlrpc_check_req(req);
1391                 if (rc == 0) {
1392                         rc = sptlrpc_target_export_check(req->rq_export, req);
1393                         if (rc)
1394                                 DEBUG_REQ(D_ERROR, req, "DROPPING req with "
1395                                           "illegal security flavor,");
1396                 }
1397
1398                 if (rc)
1399                         goto err_req;
1400                 ptlrpc_update_export_timer(req->rq_export, 0);
1401         }
1402
1403         /* req_in handling should/must be fast */
1404         if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
1405                 DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
1406                           cfs_time_sub(cfs_time_current_sec(),
1407                                        req->rq_arrival_time.tv_sec));
1408
1409         /* Set rpc server deadline and add it to the timed list */
1410         deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
1411                     MSGHDR_AT_SUPPORT) ?
1412                    /* The max time the client expects us to take */
1413                    lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
1414         req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
1415         if (unlikely(deadline == 0)) {
1416                 DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
1417                 goto err_req;
1418         }
1419
1420         ptlrpc_at_add_timed(req);
1421         rc = ptlrpc_hpreq_init(svc, req);
1422         if (rc)
1423                 GOTO(err_req, rc);
1424
1425         /* Move it over to the request processing queue */
1426         rc = ptlrpc_server_request_add(svc, req);
1427         if (rc)
1428                 GOTO(err_req, rc);
1429         cfs_waitq_signal(&svc->srv_waitq);
1430         RETURN(1);
1431
1432 err_req:
1433         spin_lock(&svc->srv_lock);
1434         svc->srv_n_queued_reqs--;
1435         svc->srv_n_active_reqs++;
1436         spin_unlock(&svc->srv_lock);
1437         ptlrpc_server_finish_request(req);
1438
1439         RETURN(1);
1440 }
1441
1442 static int
1443 ptlrpc_server_handle_request(struct ptlrpc_service *svc,
1444                              struct ptlrpc_thread *thread)
1445 {
1446         struct obd_export     *export = NULL;
1447         struct ptlrpc_request *request;
1448         struct timeval         work_start;
1449         struct timeval         work_end;
1450         long                   timediff;
1451         int                    opc, rc;
1452         int                    fail_opc = 0;
1453         ENTRY;
1454
1455         LASSERT(svc);
1456
1457         spin_lock(&svc->srv_lock);
1458         if (unlikely(!ptlrpc_server_request_pending(svc, 0) ||
1459             (
1460 #ifndef __KERNEL__
1461              /* !@%$# liblustre only has 1 thread */
1462              atomic_read(&svc->srv_n_difficult_replies) != 0 &&
1463 #endif
1464              svc->srv_n_active_reqs >= (svc->srv_threads_running - 1)))) {
1465                  /* Don't handle regular requests in the last thread, in order               * re
1466                   * to handle difficult replies (which might block other threads)
1467                   * as well as handle any incoming reqs, early replies, etc.
1468                   * That means we always need at least 2 service threads. */
1469                 spin_unlock(&svc->srv_lock);
1470                 RETURN(0);
1471              }
1472
1473         request = ptlrpc_server_request_get(svc, 0);
1474         if  (request == NULL) {
1475                 spin_unlock(&svc->srv_lock);
1476                 RETURN(0);
1477         }
1478
1479         opc = lustre_msg_get_opc(request->rq_reqmsg);
1480         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
1481                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
1482         else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
1483                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
1484
1485         if (unlikely(fail_opc)) {
1486                 if (request->rq_export && request->rq_ops) {
1487                         spin_unlock(&svc->srv_lock);
1488                         OBD_FAIL_TIMEOUT(fail_opc, 4);
1489                         spin_lock(&svc->srv_lock);
1490                         request = ptlrpc_server_request_get(svc, 0);
1491                         if  (request == NULL) {
1492                                 spin_unlock(&svc->srv_lock);
1493                                 RETURN(0);
1494                         }
1495                         LASSERT(ptlrpc_server_request_pending(svc, 0));
1496                 }
1497         }
1498
1499         list_del_init(&request->rq_list);
1500         svc->srv_n_queued_reqs--;
1501         svc->srv_n_active_reqs++;
1502         if (request->rq_hp)
1503                 svc->srv_n_hpreq++;
1504
1505         /* The phase is changed under the lock here because we need to know
1506          * the request is under processing (see ptlrpc_hpreq_reorder()). */
1507         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
1508         spin_unlock(&svc->srv_lock);
1509
1510         ptlrpc_hpreq_fini(request);
1511
1512         if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
1513                 libcfs_debug_dumplog();
1514
1515         do_gettimeofday(&work_start);
1516         timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
1517         if (likely(svc->srv_stats != NULL)) {
1518                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
1519                                     timediff);
1520                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
1521                                     svc->srv_n_queued_reqs);
1522                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
1523                                     svc->srv_n_active_reqs);
1524                 lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
1525                                     at_get(&svc->srv_at_estimate));
1526         }
1527
1528         rc = lu_context_init(&request->rq_session,
1529                              LCT_SESSION|LCT_REMEMBER|LCT_NOREF);
1530         if (rc) {
1531                 CERROR("Failure to initialize session: %d\n", rc);
1532                 goto out_req;
1533         }
1534         request->rq_session.lc_thread = thread;
1535         request->rq_session.lc_cookie = 0x5;
1536         lu_context_enter(&request->rq_session);
1537
1538         CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
1539
1540         request->rq_svc_thread = thread;
1541         if (thread)
1542                 request->rq_svc_thread->t_env->le_ses = &request->rq_session;
1543
1544         if (likely(request->rq_export)) {
1545                 if (unlikely(ptlrpc_check_req(request)))
1546                         goto put_conn;
1547                 ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
1548                 export = class_export_rpc_get(request->rq_export);
1549         }
1550
1551         /* Discard requests queued for longer than the deadline.
1552            The deadline is increased if we send an early reply. */
1553         if (cfs_time_current_sec() > request->rq_deadline) {
1554                 DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
1555                           ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
1556                           libcfs_id2str(request->rq_peer),
1557                           cfs_time_sub(request->rq_deadline,
1558                           request->rq_arrival_time.tv_sec),
1559                           cfs_time_sub(cfs_time_current_sec(),
1560                           request->rq_deadline));
1561                 goto put_rpc_export;
1562         }
1563
1564         CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
1565                "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
1566                (request->rq_export ?
1567                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
1568                (request->rq_export ?
1569                 atomic_read(&request->rq_export->exp_refcount) : -99),
1570                lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
1571                libcfs_id2str(request->rq_peer),
1572                lustre_msg_get_opc(request->rq_reqmsg));
1573
1574         OBD_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, obd_fail_val);
1575
1576         rc = svc->srv_handler(request);
1577
1578         ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
1579
1580         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
1581                "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
1582                (request->rq_export ?
1583                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
1584                (request->rq_export ?
1585                 atomic_read(&request->rq_export->exp_refcount) : -99),
1586                lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
1587                libcfs_id2str(request->rq_peer),
1588                lustre_msg_get_opc(request->rq_reqmsg));
1589
1590 put_rpc_export:
1591         if (export != NULL)
1592                 class_export_rpc_put(export);
1593 put_conn:
1594         lu_context_exit(&request->rq_session);
1595         lu_context_fini(&request->rq_session);
1596
1597         if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
1598                 DEBUG_REQ(D_WARNING, request, "Request x"LPU64" took longer "
1599                           "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
1600                           " client may timeout.",
1601                           request->rq_xid, cfs_time_sub(request->rq_deadline,
1602                           request->rq_arrival_time.tv_sec),
1603                           cfs_time_sub(cfs_time_current_sec(),
1604                           request->rq_deadline));
1605         }
1606
1607         do_gettimeofday(&work_end);
1608         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1609         CDEBUG(D_RPCTRACE, "request x"LPU64" opc %u from %s processed in "
1610                "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
1611                request->rq_xid, lustre_msg_get_opc(request->rq_reqmsg),
1612                libcfs_id2str(request->rq_peer), timediff,
1613                cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
1614                request->rq_repmsg ? lustre_msg_get_transno(request->rq_repmsg) :
1615                request->rq_transno, request->rq_status,
1616                request->rq_repmsg ? lustre_msg_get_status(request->rq_repmsg):
1617                -999);
1618         if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
1619                 __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
1620                 int opc = opcode_offset(op);
1621                 if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
1622                         LASSERT(opc < LUSTRE_MAX_OPCODES);
1623                         lprocfs_counter_add(svc->srv_stats,
1624                                             opc + EXTRA_MAX_OPCODES,
1625                                             timediff);
1626                 }
1627         }
1628         if (unlikely(request->rq_early_count)) {
1629                 DEBUG_REQ(D_ADAPTTO, request,
1630                           "sent %d early replies before finishing in "
1631                           CFS_DURATION_T"s",
1632                           request->rq_early_count,
1633                           cfs_time_sub(work_end.tv_sec,
1634                           request->rq_arrival_time.tv_sec));
1635         }
1636
1637 out_req:
1638         spin_lock(&svc->srv_lock);
1639         if (request->rq_hp)
1640                 svc->srv_n_hpreq--;
1641         spin_unlock(&svc->srv_lock);
1642         ptlrpc_server_finish_request(request);
1643
1644         RETURN(1);
1645 }
1646
1647 /**
1648  * An internal function to process a single reply state object.
1649  */
1650 static int
1651 ptlrpc_handle_rs (struct ptlrpc_reply_state *rs)
1652 {
1653         struct ptlrpc_service     *svc = rs->rs_service;
1654         struct obd_export         *exp;
1655         struct obd_device         *obd;
1656         int                        nlocks;
1657         int                        been_handled;
1658         ENTRY;
1659
1660         exp = rs->rs_export;
1661         obd = exp->exp_obd;
1662
1663         LASSERT (rs->rs_difficult);
1664         LASSERT (rs->rs_scheduled);
1665         LASSERT (list_empty(&rs->rs_list));
1666
1667         spin_lock (&exp->exp_lock);
1668         /* Noop if removed already */
1669         list_del_init (&rs->rs_exp_list);
1670         spin_unlock (&exp->exp_lock);
1671
1672         /* Avoid exp_uncommitted_replies_lock contention if we 100% sure that
1673          * rs has been removed from the list already */
1674         if (!list_empty_careful(&rs->rs_obd_list)) {
1675                 spin_lock(&exp->exp_uncommitted_replies_lock);
1676                 list_del_init(&rs->rs_obd_list);
1677                 spin_unlock(&exp->exp_uncommitted_replies_lock);
1678         }
1679
1680         spin_lock(&rs->rs_lock);
1681
1682         been_handled = rs->rs_handled;
1683         rs->rs_handled = 1;
1684
1685         nlocks = rs->rs_nlocks;                 /* atomic "steal", but */
1686         rs->rs_nlocks = 0;                      /* locks still on rs_locks! */
1687
1688         if (nlocks == 0 && !been_handled) {
1689                 /* If we see this, we should already have seen the warning
1690                  * in mds_steal_ack_locks()  */
1691                 CWARN("All locks stolen from rs %p x"LPD64".t"LPD64
1692                       " o%d NID %s\n",
1693                       rs,
1694                       rs->rs_xid, rs->rs_transno, rs->rs_opc,
1695                       libcfs_nid2str(exp->exp_connection->c_peer.nid));
1696         }
1697
1698         if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
1699                 spin_unlock(&rs->rs_lock);
1700
1701                 if (!been_handled && rs->rs_on_net) {
1702                         LNetMDUnlink(rs->rs_md_h);
1703                         /* Ignore return code; we're racing with
1704                          * completion... */
1705                 }
1706
1707                 while (nlocks-- > 0)
1708                         ldlm_lock_decref(&rs->rs_locks[nlocks],
1709                                          rs->rs_modes[nlocks]);
1710
1711                 spin_lock(&rs->rs_lock);
1712         }
1713
1714         rs->rs_scheduled = 0;
1715
1716         if (!rs->rs_on_net) {
1717                 /* Off the net */
1718                 spin_unlock(&rs->rs_lock);
1719
1720                 class_export_put (exp);
1721                 rs->rs_export = NULL;
1722                 ptlrpc_rs_decref (rs);
1723                 atomic_dec (&svc->srv_outstanding_replies);
1724                 if (atomic_dec_and_test(&svc->srv_n_difficult_replies) &&
1725                     svc->srv_is_stopping)
1726                         cfs_waitq_broadcast(&svc->srv_waitq);
1727                 RETURN(1);
1728         }
1729
1730         /* still on the net; callback will schedule */
1731         spin_unlock(&rs->rs_lock);
1732         RETURN(1);
1733 }
1734
1735 #ifndef __KERNEL__
1736
1737 /**
1738  * Check whether given service has a reply available for processing
1739  * and process it.
1740  *
1741  * \param svc a ptlrpc service
1742  * \retval 0 no replies processes
1743  * \retval 1 one reply processed
1744  */
1745 static int
1746 ptlrpc_server_handle_reply(struct ptlrpc_service *svc)
1747 {
1748         struct ptlrpc_reply_state *rs = NULL;
1749         ENTRY;
1750
1751         spin_lock(&svc->srv_lock);
1752         if (!list_empty(&svc->srv_reply_queue)) {
1753                 rs = list_entry(svc->srv_reply_queue.prev,
1754                                 struct ptlrpc_reply_state,
1755                                 rs_list);
1756                 list_del_init(&rs->rs_list);
1757         }
1758         spin_unlock(&svc->srv_lock);
1759         if (rs != NULL)
1760                 ptlrpc_handle_rs(rs);
1761         RETURN(rs != NULL);
1762 }
1763
1764 /* FIXME make use of timeout later */
1765 int
1766 liblustre_check_services (void *arg)
1767 {
1768         int  did_something = 0;
1769         int  rc;
1770         struct list_head *tmp, *nxt;
1771         ENTRY;
1772
1773         /* I'm relying on being single threaded, not to have to lock
1774          * ptlrpc_all_services etc */
1775         list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
1776                 struct ptlrpc_service *svc =
1777                         list_entry (tmp, struct ptlrpc_service, srv_list);
1778
1779                 if (svc->srv_threads_running != 0)     /* I've recursed */
1780                         continue;
1781
1782                 /* service threads can block for bulk, so this limits us
1783                  * (arbitrarily) to recursing 1 stack frame per service.
1784                  * Note that the problem with recursion is that we have to
1785                  * unwind completely before our caller can resume. */
1786
1787                 svc->srv_threads_running++;
1788
1789                 do {
1790                         rc = ptlrpc_server_handle_req_in(svc);
1791                         rc |= ptlrpc_server_handle_reply(svc);
1792                         rc |= ptlrpc_at_check_timed(svc);
1793                         rc |= ptlrpc_server_handle_request(svc, NULL);
1794                         rc |= (ptlrpc_server_post_idle_rqbds(svc) > 0);
1795                         did_something |= rc;
1796                 } while (rc);
1797
1798                 svc->srv_threads_running--;
1799         }
1800
1801         RETURN(did_something);
1802 }
1803 #define ptlrpc_stop_all_threads(s) do {} while (0)
1804
1805 #else /* __KERNEL__ */
1806
1807 static void
1808 ptlrpc_check_rqbd_pool(struct ptlrpc_service *svc)
1809 {
1810         int avail = svc->srv_nrqbd_receiving;
1811         int low_water = test_req_buffer_pressure ? 0 :
1812                         svc->srv_nbuf_per_group/2;
1813
1814         /* NB I'm not locking; just looking. */
1815
1816         /* CAVEAT EMPTOR: We might be allocating buffers here because we've
1817          * allowed the request history to grow out of control.  We could put a
1818          * sanity check on that here and cull some history if we need the
1819          * space. */
1820
1821         if (avail <= low_water)
1822                 ptlrpc_grow_req_bufs(svc);
1823
1824         if (svc->srv_stats)
1825                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQBUF_AVAIL_CNTR,
1826                                     avail);
1827 }
1828
1829 static int
1830 ptlrpc_retry_rqbds(void *arg)
1831 {
1832         struct ptlrpc_service *svc = (struct ptlrpc_service *)arg;
1833
1834         svc->srv_rqbd_timeout = 0;
1835         return (-ETIMEDOUT);
1836 }
1837
1838 static int ptlrpc_main(void *arg)
1839 {
1840         struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
1841         struct ptlrpc_service  *svc = data->svc;
1842         struct ptlrpc_thread   *thread = data->thread;
1843         struct obd_device      *dev = data->dev;
1844         struct ptlrpc_reply_state *rs;
1845 #ifdef WITH_GROUP_INFO
1846         struct group_info *ginfo = NULL;
1847 #endif
1848         struct lu_env env;
1849         int counter = 0, rc = 0;
1850         ENTRY;
1851
1852         cfs_daemonize_ctxt(data->name);
1853
1854 #if defined(HAVE_NODE_TO_CPUMASK) && defined(CONFIG_NUMA)
1855         /* we need to do this before any per-thread allocation is done so that
1856          * we get the per-thread allocations on local node.  bug 7342 */
1857         if (svc->srv_cpu_affinity) {
1858                 int cpu, num_cpu;
1859
1860                 for (cpu = 0, num_cpu = 0; cpu < num_possible_cpus(); cpu++) {
1861                         if (!cpu_online(cpu))
1862                                 continue;
1863                         if (num_cpu == thread->t_id % num_online_cpus())
1864                                 break;
1865                         num_cpu++;
1866                 }
1867                 set_cpus_allowed(cfs_current(), node_to_cpumask(cpu_to_node(cpu)));
1868         }
1869 #endif
1870
1871 #ifdef WITH_GROUP_INFO
1872         ginfo = groups_alloc(0);
1873         if (!ginfo) {
1874                 rc = -ENOMEM;
1875                 goto out;
1876         }
1877
1878         set_current_groups(ginfo);
1879         put_group_info(ginfo);
1880 #endif
1881
1882         if (svc->srv_init != NULL) {
1883                 rc = svc->srv_init(thread);
1884                 if (rc)
1885                         goto out;
1886         }
1887
1888         rc = lu_context_init(&env.le_ctx,
1889                              svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
1890         if (rc)
1891                 goto out_srv_fini;
1892
1893         thread->t_env = &env;
1894         env.le_ctx.lc_thread = thread;
1895         env.le_ctx.lc_cookie = 0x6;
1896
1897         /* Alloc reply state structure for this one */
1898         OBD_ALLOC_GFP(rs, svc->srv_max_reply_size, CFS_ALLOC_STD);
1899         if (!rs) {
1900                 rc = -ENOMEM;
1901                 goto out_srv_fini;
1902         }
1903
1904         /* Record that the thread is running */
1905         thread->t_flags = SVC_RUNNING;
1906         /*
1907          * wake up our creator. Note: @data is invalid after this point,
1908          * because it's allocated on ptlrpc_start_thread() stack.
1909          */
1910         cfs_waitq_signal(&thread->t_ctl_waitq);
1911
1912         thread->t_watchdog = lc_watchdog_add(GET_TIMEOUT(svc), NULL, NULL);
1913
1914         spin_lock(&svc->srv_lock);
1915         svc->srv_threads_running++;
1916         list_add(&rs->rs_list, &svc->srv_free_rs_list);
1917         spin_unlock(&svc->srv_lock);
1918         cfs_waitq_signal(&svc->srv_free_rs_waitq);
1919
1920         CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
1921                svc->srv_threads_running);
1922
1923         /* XXX maintain a list of all managed devices: insert here */
1924
1925         while ((thread->t_flags & SVC_STOPPING) == 0) {
1926                 /* Don't exit while there are replies to be handled */
1927                 struct l_wait_info lwi = LWI_TIMEOUT(svc->srv_rqbd_timeout,
1928                                                      ptlrpc_retry_rqbds, svc);
1929
1930                 lc_watchdog_disable(thread->t_watchdog);
1931
1932                 cond_resched();
1933
1934                 l_wait_event_exclusive (svc->srv_waitq,
1935                               ((thread->t_flags & SVC_STOPPING) != 0) ||
1936                               (!list_empty(&svc->srv_idle_rqbds) &&
1937                                svc->srv_rqbd_timeout == 0) ||
1938                               !list_empty(&svc->srv_req_in_queue) ||
1939                               (ptlrpc_server_request_pending(svc, 0) &&
1940                                (svc->srv_n_active_reqs <
1941                                 (svc->srv_threads_running - 1))) ||
1942                               svc->srv_at_check,
1943                               &lwi);
1944
1945                 lc_watchdog_touch(thread->t_watchdog, GET_TIMEOUT(svc));
1946
1947                 ptlrpc_check_rqbd_pool(svc);
1948
1949                 if ((svc->srv_threads_started < svc->srv_threads_max) &&
1950                     (svc->srv_n_active_reqs >= (svc->srv_threads_started - 1))){
1951                         /* Ignore return code - we tried... */
1952                         ptlrpc_start_thread(dev, svc);
1953                 }
1954
1955                 if (!list_empty(&svc->srv_req_in_queue)) {
1956                         /* Process all incoming reqs before handling any */
1957                         ptlrpc_server_handle_req_in(svc);
1958                         /* but limit ourselves in case of flood */
1959                         if (counter++ < 1000)
1960                                 continue;
1961                         counter = 0;
1962                 }
1963
1964                 if (svc->srv_at_check)
1965                         ptlrpc_at_check_timed(svc);
1966
1967                 /* don't handle requests in the last thread */
1968                 if (ptlrpc_server_request_pending(svc, 0) &&
1969                     (svc->srv_n_active_reqs < (svc->srv_threads_running - 1))) {
1970                         lu_context_enter(&env.le_ctx);
1971                         ptlrpc_server_handle_request(svc, thread);
1972                         lu_context_exit(&env.le_ctx);
1973                 }
1974
1975                 if (!list_empty(&svc->srv_idle_rqbds) &&
1976                     ptlrpc_server_post_idle_rqbds(svc) < 0) {
1977                         /* I just failed to repost request buffers.  Wait
1978                          * for a timeout (unless something else happens)
1979                          * before I try again */
1980                         svc->srv_rqbd_timeout = cfs_time_seconds(1)/10;
1981                         CDEBUG(D_RPCTRACE,"Posted buffers: %d\n",
1982                                svc->srv_nrqbd_receiving);
1983                 }
1984         }
1985
1986         lc_watchdog_delete(thread->t_watchdog);
1987         thread->t_watchdog = NULL;
1988
1989 out_srv_fini:
1990         /*
1991          * deconstruct service specific state created by ptlrpc_start_thread()
1992          */
1993         if (svc->srv_done != NULL)
1994                 svc->srv_done(thread);
1995
1996         lu_context_fini(&env.le_ctx);
1997 out:
1998         CDEBUG(D_NET, "service thread %d exiting: rc %d\n", thread->t_id, rc);
1999
2000         spin_lock(&svc->srv_lock);
2001         svc->srv_threads_running--; /* must know immediately */
2002         thread->t_id = rc;
2003         thread->t_flags = SVC_STOPPED;
2004
2005         cfs_waitq_signal(&thread->t_ctl_waitq);
2006         spin_unlock(&svc->srv_lock);
2007
2008         return rc;
2009 }
2010
2011 struct ptlrpc_hr_args {
2012         int                       thread_index;
2013         int                       cpu_index;
2014         struct ptlrpc_hr_service *hrs;
2015 };
2016
2017 static int hrt_dont_sleep(struct ptlrpc_hr_thread *t,
2018                           struct list_head *replies)
2019 {
2020         int result;
2021
2022         spin_lock(&t->hrt_lock);
2023         list_splice_init(&t->hrt_queue, replies);
2024         result = test_bit(HRT_STOPPING, &t->hrt_flags) ||
2025                 !list_empty(replies);
2026         spin_unlock(&t->hrt_lock);
2027         return result;
2028 }
2029
2030 static int ptlrpc_hr_main(void *arg)
2031 {
2032         struct ptlrpc_hr_args * hr_args = arg;
2033         struct ptlrpc_hr_service *hr = hr_args->hrs;
2034         struct ptlrpc_hr_thread *t = &hr->hr_threads[hr_args->thread_index];
2035         char threadname[20];
2036         CFS_LIST_HEAD(replies);
2037
2038         snprintf(threadname, sizeof(threadname),
2039                  "ptlrpc_hr_%d", hr_args->thread_index);
2040
2041         cfs_daemonize_ctxt(threadname);
2042 #if defined(HAVE_NODE_TO_CPUMASK)
2043         set_cpus_allowed(cfs_current(),
2044                          node_to_cpumask(cpu_to_node(hr_args->cpu_index)));
2045 #endif
2046         set_bit(HRT_RUNNING, &t->hrt_flags);
2047         cfs_waitq_signal(&t->hrt_wait);
2048
2049         while (!test_bit(HRT_STOPPING, &t->hrt_flags)) {
2050
2051                 cfs_wait_event(t->hrt_wait, hrt_dont_sleep(t, &replies));
2052                 while (!list_empty(&replies)) {
2053                         struct ptlrpc_reply_state *rs;
2054
2055                         rs = list_entry(replies.prev,
2056                                         struct ptlrpc_reply_state,
2057                                         rs_list);
2058                         list_del_init(&rs->rs_list);
2059                         ptlrpc_handle_rs(rs);
2060                 }
2061         }
2062
2063         clear_bit(HRT_RUNNING, &t->hrt_flags);
2064         complete(&t->hrt_completion);
2065
2066         return 0;
2067 }
2068
2069 static int ptlrpc_start_hr_thread(struct ptlrpc_hr_service *hr, int n, int cpu)
2070 {
2071         struct ptlrpc_hr_thread *t = &hr->hr_threads[n];
2072         struct ptlrpc_hr_args args;
2073         int rc;
2074         ENTRY;
2075
2076         args.thread_index = n;
2077         args.cpu_index = cpu;
2078         args.hrs = hr;
2079
2080         rc = cfs_kernel_thread(ptlrpc_hr_main, (void*)&args,
2081                                CLONE_VM|CLONE_FILES);
2082         if (rc < 0) {
2083                 complete(&t->hrt_completion);
2084                 GOTO(out, rc);
2085         }
2086         cfs_wait_event(t->hrt_wait, test_bit(HRT_RUNNING, &t->hrt_flags));
2087         RETURN(0);
2088  out:
2089         return rc;
2090 }
2091
2092 static void ptlrpc_stop_hr_thread(struct ptlrpc_hr_thread *t)
2093 {
2094         ENTRY;
2095
2096         set_bit(HRT_STOPPING, &t->hrt_flags);
2097         cfs_waitq_signal(&t->hrt_wait);
2098         wait_for_completion(&t->hrt_completion);
2099
2100         EXIT;
2101 }
2102
2103 static void ptlrpc_stop_hr_threads(struct ptlrpc_hr_service *hrs)
2104 {
2105         int n;
2106         ENTRY;
2107
2108         for (n = 0; n < hrs->hr_n_threads; n++)
2109                 ptlrpc_stop_hr_thread(&hrs->hr_threads[n]);
2110
2111         EXIT;
2112 }
2113
2114 static int ptlrpc_start_hr_threads(struct ptlrpc_hr_service *hr)
2115 {
2116         int rc = -ENOMEM;
2117         int n, cpu, threads_started = 0;
2118         ENTRY;
2119
2120         LASSERT(hr != NULL);
2121         LASSERT(hr->hr_n_threads > 0);
2122
2123         for (n = 0, cpu = 0; n < hr->hr_n_threads; n++) {
2124 #if defined(HAVE_NODE_TO_CPUMASK)
2125                 while(!cpu_online(cpu)) {
2126                         cpu++;
2127                         if (cpu >= num_possible_cpus())
2128                                 cpu = 0;
2129                 }
2130 #endif
2131                 rc = ptlrpc_start_hr_thread(hr, n, cpu);
2132                 if (rc != 0)
2133                         break;
2134                 threads_started++;
2135                 cpu++;
2136         }
2137         if (threads_started == 0) {
2138                 CERROR("No reply handling threads started\n");
2139                 RETURN(-ESRCH);
2140         }
2141         if (threads_started < hr->hr_n_threads) {
2142                 CWARN("Started only %d reply handling threads from %d\n",
2143                       threads_started, hr->hr_n_threads);
2144                 hr->hr_n_threads = threads_started;
2145         }
2146         RETURN(0);
2147 }
2148
2149 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
2150                                struct ptlrpc_thread *thread)
2151 {
2152         struct l_wait_info lwi = { 0 };
2153         ENTRY;
2154
2155         CDEBUG(D_RPCTRACE, "Stopping thread %p\n", thread);
2156         spin_lock(&svc->srv_lock);
2157         thread->t_flags = SVC_STOPPING;
2158         spin_unlock(&svc->srv_lock);
2159
2160         cfs_waitq_broadcast(&svc->srv_waitq);
2161         l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
2162                      &lwi);
2163
2164         spin_lock(&svc->srv_lock);
2165         list_del(&thread->t_link);
2166         spin_unlock(&svc->srv_lock);
2167
2168         OBD_FREE_PTR(thread);
2169         EXIT;
2170 }
2171
2172 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
2173 {
2174         struct ptlrpc_thread *thread;
2175         ENTRY;
2176
2177         spin_lock(&svc->srv_lock);
2178         while (!list_empty(&svc->srv_threads)) {
2179                 thread = list_entry(svc->srv_threads.next,
2180                                     struct ptlrpc_thread, t_link);
2181
2182                 spin_unlock(&svc->srv_lock);
2183                 ptlrpc_stop_thread(svc, thread);
2184                 spin_lock(&svc->srv_lock);
2185         }
2186
2187         spin_unlock(&svc->srv_lock);
2188         EXIT;
2189 }
2190
2191 int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc)
2192 {
2193         int i, rc = 0;
2194         ENTRY;
2195
2196         /* We require 2 threads min - see note in
2197            ptlrpc_server_handle_request */
2198         LASSERT(svc->srv_threads_min >= 2);
2199         for (i = 0; i < svc->srv_threads_min; i++) {
2200                 rc = ptlrpc_start_thread(dev, svc);
2201                 /* We have enough threads, don't start more.  b=15759 */
2202                 if (rc == -EMFILE)
2203                         break;
2204                 if (rc) {
2205                         CERROR("cannot start %s thread #%d: rc %d\n",
2206                                svc->srv_thread_name, i, rc);
2207                         ptlrpc_stop_all_threads(svc);
2208                 }
2209         }
2210         RETURN(rc);
2211 }
2212
2213 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc)
2214 {
2215         struct l_wait_info lwi = { 0 };
2216         struct ptlrpc_svc_data d;
2217         struct ptlrpc_thread *thread;
2218         char name[32];
2219         int id, rc;
2220         ENTRY;
2221
2222         CDEBUG(D_RPCTRACE, "%s started %d min %d max %d running %d\n",
2223                svc->srv_name, svc->srv_threads_started, svc->srv_threads_min,
2224                svc->srv_threads_max, svc->srv_threads_running);
2225         if (unlikely(svc->srv_threads_started >= svc->srv_threads_max) ||
2226             (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
2227              svc->srv_threads_started == svc->srv_threads_min - 1))
2228                 RETURN(-EMFILE);
2229
2230         OBD_ALLOC_PTR(thread);
2231         if (thread == NULL)
2232                 RETURN(-ENOMEM);
2233         cfs_waitq_init(&thread->t_ctl_waitq);
2234
2235         spin_lock(&svc->srv_lock);
2236         if (svc->srv_threads_started >= svc->srv_threads_max) {
2237                 spin_unlock(&svc->srv_lock);
2238                 OBD_FREE_PTR(thread);
2239                 RETURN(-EMFILE);
2240         }
2241         list_add(&thread->t_link, &svc->srv_threads);
2242         id = svc->srv_threads_started++;
2243         spin_unlock(&svc->srv_lock);
2244
2245         thread->t_svc = svc;
2246         thread->t_id = id;
2247         sprintf(name, "%s_%02d", svc->srv_thread_name, id);
2248         d.dev = dev;
2249         d.svc = svc;
2250         d.name = name;
2251         d.thread = thread;
2252
2253         CDEBUG(D_RPCTRACE, "starting thread '%s'\n", name);
2254
2255         /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
2256          * just drop the VM and FILES in cfs_daemonize_ctxt() right away.
2257          */
2258         rc = cfs_kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
2259         if (rc < 0) {
2260                 CERROR("cannot start thread '%s': rc %d\n", name, rc);
2261
2262                 spin_lock(&svc->srv_lock);
2263                 list_del(&thread->t_link);
2264                 --svc->srv_threads_started;
2265                 spin_unlock(&svc->srv_lock);
2266
2267                 OBD_FREE(thread, sizeof(*thread));
2268                 RETURN(rc);
2269         }
2270         l_wait_event(thread->t_ctl_waitq,
2271                      thread->t_flags & (SVC_RUNNING | SVC_STOPPED), &lwi);
2272
2273         rc = (thread->t_flags & SVC_STOPPED) ? thread->t_id : 0;
2274         RETURN(rc);
2275 }
2276
2277
2278 int ptlrpc_hr_init(void)
2279 {
2280         int i;
2281         int n_cpus = num_online_cpus();
2282         struct ptlrpc_hr_service *hr;
2283         int size;
2284         int rc;
2285         ENTRY;
2286
2287         LASSERT(ptlrpc_hr == NULL);
2288
2289         size = offsetof(struct ptlrpc_hr_service, hr_threads[n_cpus]);
2290         OBD_ALLOC(hr, size);
2291         if (hr == NULL)
2292                 RETURN(-ENOMEM);
2293         for (i = 0; i < n_cpus; i++) {
2294                 struct ptlrpc_hr_thread *t = &hr->hr_threads[i];
2295
2296                 spin_lock_init(&t->hrt_lock);
2297                 cfs_waitq_init(&t->hrt_wait);
2298                 CFS_INIT_LIST_HEAD(&t->hrt_queue);
2299                 init_completion(&t->hrt_completion);
2300         }
2301         hr->hr_n_threads = n_cpus;
2302         hr->hr_size = size;
2303         ptlrpc_hr = hr;
2304
2305         rc = ptlrpc_start_hr_threads(hr);
2306         if (rc) {
2307                 OBD_FREE(hr, hr->hr_size);
2308                 ptlrpc_hr = NULL;
2309         }
2310         RETURN(rc);
2311 }
2312
2313 void ptlrpc_hr_fini(void)
2314 {
2315         if (ptlrpc_hr != NULL) {
2316                 ptlrpc_stop_hr_threads(ptlrpc_hr);
2317                 OBD_FREE(ptlrpc_hr, ptlrpc_hr->hr_size);
2318                 ptlrpc_hr = NULL;
2319         }
2320 }
2321
2322 #endif /* __KERNEL__ */
2323
2324 /**
2325  * Wait until all already scheduled replies are processed.
2326  */
2327 static void ptlrpc_wait_replies(struct ptlrpc_service *svc)
2328 {
2329         while (1) {
2330                 int rc;
2331                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
2332                                                      NULL, NULL);
2333                 rc = l_wait_event(svc->srv_waitq,
2334                                   atomic_read(&svc->srv_n_difficult_replies) == 0,
2335                                   &lwi);
2336                 if (rc == 0)
2337                         break;
2338                 CWARN("Unexpectedly long timeout %p\n", svc);
2339         }
2340 }
2341
2342 int ptlrpc_unregister_service(struct ptlrpc_service *service)
2343 {
2344         int                   rc;
2345         struct l_wait_info    lwi;
2346         struct list_head     *tmp;
2347         struct ptlrpc_reply_state *rs, *t;
2348         struct ptlrpc_at_array *array = &service->srv_at_array;
2349         ENTRY;
2350
2351         service->srv_is_stopping = 1;
2352         cfs_timer_disarm(&service->srv_at_timer);
2353
2354         ptlrpc_stop_all_threads(service);
2355         LASSERT(list_empty(&service->srv_threads));
2356
2357         spin_lock (&ptlrpc_all_services_lock);
2358         list_del_init (&service->srv_list);
2359         spin_unlock (&ptlrpc_all_services_lock);
2360
2361         ptlrpc_lprocfs_unregister_service(service);
2362
2363         /* All history will be culled when the next request buffer is
2364          * freed */
2365         service->srv_max_history_rqbds = 0;
2366
2367         CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
2368
2369         rc = LNetClearLazyPortal(service->srv_req_portal);
2370         LASSERT (rc == 0);
2371
2372         /* Unlink all the request buffers.  This forces a 'final' event with
2373          * its 'unlink' flag set for each posted rqbd */
2374         list_for_each(tmp, &service->srv_active_rqbds) {
2375                 struct ptlrpc_request_buffer_desc *rqbd =
2376                         list_entry(tmp, struct ptlrpc_request_buffer_desc,
2377                                    rqbd_list);
2378
2379                 rc = LNetMDUnlink(rqbd->rqbd_md_h);
2380                 LASSERT (rc == 0 || rc == -ENOENT);
2381         }
2382
2383         /* Wait for the network to release any buffers it's currently
2384          * filling */
2385         for (;;) {
2386                 spin_lock(&service->srv_lock);
2387                 rc = service->srv_nrqbd_receiving;
2388                 spin_unlock(&service->srv_lock);
2389
2390                 if (rc == 0)
2391                         break;
2392
2393                 /* Network access will complete in finite time but the HUGE
2394                  * timeout lets us CWARN for visibility of sluggish NALs */
2395                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
2396                                            cfs_time_seconds(1), NULL, NULL);
2397                 rc = l_wait_event(service->srv_waitq,
2398                                   service->srv_nrqbd_receiving == 0,
2399                                   &lwi);
2400                 if (rc == -ETIMEDOUT)
2401                         CWARN("Service %s waiting for request buffers\n",
2402                               service->srv_name);
2403         }
2404
2405         /* schedule all outstanding replies to terminate them */
2406         spin_lock(&service->srv_lock);
2407         while (!list_empty(&service->srv_active_replies)) {
2408                 struct ptlrpc_reply_state *rs =
2409                         list_entry(service->srv_active_replies.next,
2410                                    struct ptlrpc_reply_state, rs_list);
2411                 spin_lock(&rs->rs_lock);
2412                 ptlrpc_schedule_difficult_reply(rs);
2413                 spin_unlock(&rs->rs_lock);
2414         }
2415         spin_unlock(&service->srv_lock);
2416
2417         /* purge the request queue.  NB No new replies (rqbds all unlinked)
2418          * and no service threads, so I'm the only thread noodling the
2419          * request queue now */
2420         while (!list_empty(&service->srv_req_in_queue)) {
2421                 struct ptlrpc_request *req =
2422                         list_entry(service->srv_req_in_queue.next,
2423                                    struct ptlrpc_request,
2424                                    rq_list);
2425
2426                 list_del(&req->rq_list);
2427                 service->srv_n_queued_reqs--;
2428                 service->srv_n_active_reqs++;
2429                 ptlrpc_server_finish_request(req);
2430         }
2431         while (ptlrpc_server_request_pending(service, 1)) {
2432                 struct ptlrpc_request *req;
2433
2434                 req = ptlrpc_server_request_get(service, 1);
2435                 list_del(&req->rq_list);
2436                 service->srv_n_queued_reqs--;
2437                 service->srv_n_active_reqs++;
2438                 ptlrpc_hpreq_fini(req);
2439                 ptlrpc_server_finish_request(req);
2440         }
2441         LASSERT(service->srv_n_queued_reqs == 0);
2442         LASSERT(service->srv_n_active_reqs == 0);
2443         LASSERT(service->srv_n_history_rqbds == 0);
2444         LASSERT(list_empty(&service->srv_active_rqbds));
2445
2446         /* Now free all the request buffers since nothing references them
2447          * any more... */
2448         while (!list_empty(&service->srv_idle_rqbds)) {
2449                 struct ptlrpc_request_buffer_desc *rqbd =
2450                         list_entry(service->srv_idle_rqbds.next,
2451                                    struct ptlrpc_request_buffer_desc,
2452                                    rqbd_list);
2453
2454                 ptlrpc_free_rqbd(rqbd);
2455         }
2456
2457         ptlrpc_wait_replies(service);
2458
2459         list_for_each_entry_safe(rs, t, &service->srv_free_rs_list, rs_list) {
2460                 list_del(&rs->rs_list);
2461                 OBD_FREE(rs, service->srv_max_reply_size);
2462         }
2463
2464         /* In case somebody rearmed this in the meantime */
2465         cfs_timer_disarm(&service->srv_at_timer);
2466
2467         if (array->paa_reqs_array != NULL) {
2468                 OBD_FREE(array->paa_reqs_array,
2469                          sizeof(struct list_head) * array->paa_size);
2470                 array->paa_reqs_array = NULL;
2471         }
2472
2473         if (array->paa_reqs_count != NULL) {
2474                 OBD_FREE(array->paa_reqs_count,
2475                          sizeof(__u32) * array->paa_size);
2476                 array->paa_reqs_count= NULL;
2477         }
2478
2479         OBD_FREE_PTR(service);
2480         RETURN(0);
2481 }
2482
2483 /* Returns 0 if the service is healthy.
2484  *
2485  * Right now, it just checks to make sure that requests aren't languishing
2486  * in the queue.  We'll use this health check to govern whether a node needs
2487  * to be shot, so it's intentionally non-aggressive. */
2488 int ptlrpc_service_health_check(struct ptlrpc_service *svc)
2489 {
2490         struct ptlrpc_request *request;
2491         struct timeval         right_now;
2492         long                   timediff;
2493
2494         if (svc == NULL)
2495                 return 0;
2496
2497         do_gettimeofday(&right_now);
2498
2499         spin_lock(&svc->srv_lock);
2500         if (!ptlrpc_server_request_pending(svc, 1)) {
2501                 spin_unlock(&svc->srv_lock);
2502                 return 0;
2503         }
2504
2505         /* How long has the next entry been waiting? */
2506         if (list_empty(&svc->srv_request_queue))
2507                 request = list_entry(svc->srv_request_hpq.next,
2508                                      struct ptlrpc_request, rq_list);
2509         else
2510                 request = list_entry(svc->srv_request_queue.next,
2511                                      struct ptlrpc_request, rq_list);
2512         timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
2513         spin_unlock(&svc->srv_lock);
2514
2515         if ((timediff / ONE_MILLION) > (AT_OFF ? obd_timeout * 3/2 :
2516                                         at_max)) {
2517                 CERROR("%s: unhealthy - request has been waiting %lds\n",
2518                        svc->srv_name, timediff / ONE_MILLION);
2519                 return (-1);
2520         }
2521
2522         return 0;
2523 }