Whamcloud - gitweb
b=23289 revert patch on 21828
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #ifndef __KERNEL__
39 #include <liblustre.h>
40 #endif
41 #include <obd_support.h>
42 #include <obd_class.h>
43 #include <lustre_net.h>
44 #include <lu_object.h>
45 #include <lnet/types.h>
46 #include "ptlrpc_internal.h"
47
48 /* The following are visible and mutable through /sys/module/ptlrpc */
49 int test_req_buffer_pressure = 0;
50 CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
51                 "set non-zero to put pressure on request buffer pools");
52 CFS_MODULE_PARM(at_min, "i", int, 0644,
53                 "Adaptive timeout minimum (sec)");
54 CFS_MODULE_PARM(at_max, "i", int, 0644,
55                 "Adaptive timeout maximum (sec)");
56 CFS_MODULE_PARM(at_history, "i", int, 0644,
57                 "Adaptive timeouts remember the slowest event that took place "
58                 "within this period (sec)");
59 CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
60                 "How soon before an RPC deadline to send an early reply");
61 CFS_MODULE_PARM(at_extra, "i", int, 0644,
62                 "How much extra time to give with each early reply");
63
64
65 /* forward ref */
66 static int ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc);
67
68 static CFS_LIST_HEAD(ptlrpc_all_services);
69 cfs_spinlock_t ptlrpc_all_services_lock;
70
71 static char *
72 ptlrpc_alloc_request_buffer (int size)
73 {
74         char *ptr;
75
76         if (size > SVC_BUF_VMALLOC_THRESHOLD)
77                 OBD_VMALLOC(ptr, size);
78         else
79                 OBD_ALLOC(ptr, size);
80
81         return (ptr);
82 }
83
84 static void
85 ptlrpc_free_request_buffer (char *ptr, int size)
86 {
87         if (size > SVC_BUF_VMALLOC_THRESHOLD)
88                 OBD_VFREE(ptr, size);
89         else
90                 OBD_FREE(ptr, size);
91 }
92
93 struct ptlrpc_request_buffer_desc *
94 ptlrpc_alloc_rqbd (struct ptlrpc_service *svc)
95 {
96         struct ptlrpc_request_buffer_desc *rqbd;
97
98         OBD_ALLOC_PTR(rqbd);
99         if (rqbd == NULL)
100                 return (NULL);
101
102         rqbd->rqbd_service = svc;
103         rqbd->rqbd_refcount = 0;
104         rqbd->rqbd_cbid.cbid_fn = request_in_callback;
105         rqbd->rqbd_cbid.cbid_arg = rqbd;
106         CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs);
107         rqbd->rqbd_buffer = ptlrpc_alloc_request_buffer(svc->srv_buf_size);
108
109         if (rqbd->rqbd_buffer == NULL) {
110                 OBD_FREE_PTR(rqbd);
111                 return (NULL);
112         }
113
114         cfs_spin_lock(&svc->srv_lock);
115         cfs_list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
116         svc->srv_nbufs++;
117         cfs_spin_unlock(&svc->srv_lock);
118
119         return (rqbd);
120 }
121
122 void
123 ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
124 {
125         struct ptlrpc_service *svc = rqbd->rqbd_service;
126
127         LASSERT (rqbd->rqbd_refcount == 0);
128         LASSERT (cfs_list_empty(&rqbd->rqbd_reqs));
129
130         cfs_spin_lock(&svc->srv_lock);
131         cfs_list_del(&rqbd->rqbd_list);
132         svc->srv_nbufs--;
133         cfs_spin_unlock(&svc->srv_lock);
134
135         ptlrpc_free_request_buffer (rqbd->rqbd_buffer, svc->srv_buf_size);
136         OBD_FREE_PTR(rqbd);
137 }
138
139 int
140 ptlrpc_grow_req_bufs(struct ptlrpc_service *svc)
141 {
142         struct ptlrpc_request_buffer_desc *rqbd;
143         int                                i;
144
145         CDEBUG(D_RPCTRACE, "%s: allocate %d new %d-byte reqbufs (%d/%d left)\n",
146                svc->srv_name, svc->srv_nbuf_per_group, svc->srv_buf_size,
147                svc->srv_nrqbd_receiving, svc->srv_nbufs);
148         for (i = 0; i < svc->srv_nbuf_per_group; i++) {
149                 rqbd = ptlrpc_alloc_rqbd(svc);
150
151                 if (rqbd == NULL) {
152                         CERROR ("%s: Can't allocate request buffer\n",
153                                 svc->srv_name);
154                         return (-ENOMEM);
155                 }
156
157                 if (ptlrpc_server_post_idle_rqbds(svc) < 0)
158                         return (-EAGAIN);
159         }
160
161         return (0);
162 }
163
164 /**
165  * Part of Rep-Ack logic.
166  * Puts a lock and its mode into reply state assotiated to request reply.
167  */
168 void
169 ptlrpc_save_lock(struct ptlrpc_request *req,
170                  struct lustre_handle *lock, int mode, int no_ack)
171 {
172         struct ptlrpc_reply_state *rs = req->rq_reply_state;
173         int                        idx;
174
175         LASSERT(rs != NULL);
176         LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
177
178         if (req->rq_export->exp_disconnected) {
179                 ldlm_lock_decref(lock, mode);
180         } else {
181                 idx = rs->rs_nlocks++;
182                 rs->rs_locks[idx] = *lock;
183                 rs->rs_modes[idx] = mode;
184                 rs->rs_difficult = 1;
185                 rs->rs_no_ack = !!no_ack;
186         }
187 }
188
189 #ifdef __KERNEL__
190
191 #define HRT_RUNNING 0
192 #define HRT_STOPPING 1
193
194 struct ptlrpc_hr_thread {
195         cfs_spinlock_t        hrt_lock;
196         unsigned long         hrt_flags;
197         cfs_waitq_t           hrt_wait;
198         cfs_list_t            hrt_queue;
199         cfs_completion_t      hrt_completion;
200 };
201
202 struct ptlrpc_hr_service {
203         int                     hr_index;
204         int                     hr_n_threads;
205         int                     hr_size;
206         struct ptlrpc_hr_thread hr_threads[0];
207 };
208
209 struct rs_batch {
210         cfs_list_t              rsb_replies;
211         struct ptlrpc_service  *rsb_svc;
212         unsigned int            rsb_n_replies;
213 };
214
215 /**
216  *  A pointer to per-node reply handling service.
217  */
218 static struct ptlrpc_hr_service *ptlrpc_hr = NULL;
219
220 /**
221  * maximum mumber of replies scheduled in one batch
222  */
223 #define MAX_SCHEDULED 256
224
225 /**
226  * Initialize a reply batch.
227  *
228  * \param b batch
229  */
230 static void rs_batch_init(struct rs_batch *b)
231 {
232         memset(b, 0, sizeof *b);
233         CFS_INIT_LIST_HEAD(&b->rsb_replies);
234 }
235
236 /**
237  * Choose an hr thread to dispatch requests to.
238  */
239 static unsigned int get_hr_thread_index(struct ptlrpc_hr_service *hr)
240 {
241         unsigned int idx;
242
243         /* Concurrent modification of hr_index w/o any spinlock
244            protection is harmless as long as the result fits
245            [0..(hr_n_threads-1)] range and each thread gets near equal
246            load. */
247         idx = hr->hr_index;
248         hr->hr_index = (idx >= hr->hr_n_threads - 1) ? 0 : idx + 1;
249         return idx;
250 }
251
252 /**
253  * Dispatch all replies accumulated in the batch to one from
254  * dedicated reply handling threads.
255  *
256  * \param b batch
257  */
258 static void rs_batch_dispatch(struct rs_batch *b)
259 {
260         if (b->rsb_n_replies != 0) {
261                 struct ptlrpc_hr_service *hr = ptlrpc_hr;
262                 int idx;
263
264                 idx = get_hr_thread_index(hr);
265
266                 cfs_spin_lock(&hr->hr_threads[idx].hrt_lock);
267                 cfs_list_splice_init(&b->rsb_replies,
268                                      &hr->hr_threads[idx].hrt_queue);
269                 cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock);
270                 cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
271                 b->rsb_n_replies = 0;
272         }
273 }
274
275 /**
276  * Add a reply to a batch.
277  * Add one reply object to a batch, schedule batched replies if overload.
278  *
279  * \param b batch
280  * \param rs reply
281  */
282 static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
283 {
284         struct ptlrpc_service *svc = rs->rs_service;
285
286         if (svc != b->rsb_svc || b->rsb_n_replies >= MAX_SCHEDULED) {
287                 if (b->rsb_svc != NULL) {
288                         rs_batch_dispatch(b);
289                         cfs_spin_unlock(&b->rsb_svc->srv_rs_lock);
290                 }
291                 cfs_spin_lock(&svc->srv_rs_lock);
292                 b->rsb_svc = svc;
293         }
294         cfs_spin_lock(&rs->rs_lock);
295         rs->rs_scheduled_ever = 1;
296         if (rs->rs_scheduled == 0) {
297                 cfs_list_move(&rs->rs_list, &b->rsb_replies);
298                 rs->rs_scheduled = 1;
299                 b->rsb_n_replies++;
300         }
301         rs->rs_committed = 1;
302         cfs_spin_unlock(&rs->rs_lock);
303 }
304
305 /**
306  * Reply batch finalization.
307  * Dispatch remaining replies from the batch
308  * and release remaining spinlock.
309  *
310  * \param b batch
311  */
312 static void rs_batch_fini(struct rs_batch *b)
313 {
314         if (b->rsb_svc != 0) {
315                 rs_batch_dispatch(b);
316                 cfs_spin_unlock(&b->rsb_svc->srv_rs_lock);
317         }
318 }
319
320 #define DECLARE_RS_BATCH(b)     struct rs_batch b
321
322 #else /* __KERNEL__ */
323
324 #define rs_batch_init(b)        do{}while(0)
325 #define rs_batch_fini(b)        do{}while(0)
326 #define rs_batch_add(b, r)      ptlrpc_schedule_difficult_reply(r)
327 #define DECLARE_RS_BATCH(b)
328
329 #endif /* __KERNEL__ */
330
331 /**
332  * Put reply state into a queue for processing because we received
333  * ACK from the client
334  */
335 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
336 {
337 #ifdef __KERNEL__
338         struct ptlrpc_hr_service *hr = ptlrpc_hr;
339         int idx;
340         ENTRY;
341
342         LASSERT(cfs_list_empty(&rs->rs_list));
343
344         idx = get_hr_thread_index(hr);
345         cfs_spin_lock(&hr->hr_threads[idx].hrt_lock);
346         cfs_list_add_tail(&rs->rs_list, &hr->hr_threads[idx].hrt_queue);
347         cfs_spin_unlock(&hr->hr_threads[idx].hrt_lock);
348         cfs_waitq_signal(&hr->hr_threads[idx].hrt_wait);
349         EXIT;
350 #else
351         cfs_list_add_tail(&rs->rs_list, &rs->rs_service->srv_reply_queue);
352 #endif
353 }
354
355 void
356 ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs)
357 {
358         ENTRY;
359
360         LASSERT_SPIN_LOCKED(&rs->rs_service->srv_rs_lock);
361         LASSERT_SPIN_LOCKED(&rs->rs_lock);
362         LASSERT (rs->rs_difficult);
363         rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
364
365         if (rs->rs_scheduled) {     /* being set up or already notified */
366                 EXIT;
367                 return;
368         }
369
370         rs->rs_scheduled = 1;
371         cfs_list_del_init(&rs->rs_list);
372         ptlrpc_dispatch_difficult_reply(rs);
373         EXIT;
374 }
375
376 void ptlrpc_commit_replies(struct obd_export *exp)
377 {
378         struct ptlrpc_reply_state *rs, *nxt;
379         DECLARE_RS_BATCH(batch);
380         ENTRY;
381
382         rs_batch_init(&batch);
383         /* Find any replies that have been committed and get their service
384          * to attend to complete them. */
385
386         /* CAVEAT EMPTOR: spinlock ordering!!! */
387         cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
388         cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
389                                      rs_obd_list) {
390                 LASSERT (rs->rs_difficult);
391                 /* VBR: per-export last_committed */
392                 LASSERT(rs->rs_export);
393                 if (rs->rs_transno <= exp->exp_last_committed) {
394                         cfs_list_del_init(&rs->rs_obd_list);
395                         rs_batch_add(&batch, rs);
396                 }
397         }
398         cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
399         rs_batch_fini(&batch);
400         EXIT;
401 }
402
403 static int
404 ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc)
405 {
406         struct ptlrpc_request_buffer_desc *rqbd;
407         int                                rc;
408         int                                posted = 0;
409
410         for (;;) {
411                 cfs_spin_lock(&svc->srv_lock);
412
413                 if (cfs_list_empty (&svc->srv_idle_rqbds)) {
414                         cfs_spin_unlock(&svc->srv_lock);
415                         return (posted);
416                 }
417
418                 rqbd = cfs_list_entry(svc->srv_idle_rqbds.next,
419                                       struct ptlrpc_request_buffer_desc,
420                                       rqbd_list);
421                 cfs_list_del (&rqbd->rqbd_list);
422
423                 /* assume we will post successfully */
424                 svc->srv_nrqbd_receiving++;
425                 cfs_list_add (&rqbd->rqbd_list, &svc->srv_active_rqbds);
426
427                 cfs_spin_unlock(&svc->srv_lock);
428
429                 rc = ptlrpc_register_rqbd(rqbd);
430                 if (rc != 0)
431                         break;
432
433                 posted = 1;
434         }
435
436         cfs_spin_lock(&svc->srv_lock);
437
438         svc->srv_nrqbd_receiving--;
439         cfs_list_del(&rqbd->rqbd_list);
440         cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
441
442         /* Don't complain if no request buffers are posted right now; LNET
443          * won't drop requests because we set the portal lazy! */
444
445         cfs_spin_unlock(&svc->srv_lock);
446
447         return (-1);
448 }
449
450 /**
451  * Start a service with parameters from struct ptlrpc_service_conf \a c
452  * as opposed to directly calling ptlrpc_init_svc with tons of arguments.
453  */
454 struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
455                                             svc_handler_t h, char *name,
456                                             struct proc_dir_entry *proc_entry,
457                                             svc_req_printfn_t prntfn,
458                                             char *threadname)
459 {
460         return ptlrpc_init_svc(c->psc_nbufs, c->psc_bufsize,
461                                c->psc_max_req_size, c->psc_max_reply_size,
462                                c->psc_req_portal, c->psc_rep_portal,
463                                c->psc_watchdog_factor,
464                                h, name, proc_entry,
465                                prntfn, c->psc_min_threads, c->psc_max_threads,
466                                threadname, c->psc_ctx_tags, NULL);
467 }
468 EXPORT_SYMBOL(ptlrpc_init_svc_conf);
469
470 static void ptlrpc_at_timer(unsigned long castmeharder)
471 {
472         struct ptlrpc_service *svc = (struct ptlrpc_service *)castmeharder;
473         svc->srv_at_check = 1;
474         svc->srv_at_checktime = cfs_time_current();
475         cfs_waitq_signal(&svc->srv_waitq);
476 }
477
478 /**
479  * Initialize service on a given portal.
480  * This includes starting serving threads , allocating and posting rqbds and
481  * so on.
482  * \a nbufs is how many buffers to post
483  * \a bufsize is buffer size to post
484  * \a max_req_size - maximum request size to be accepted for this service
485  * \a max_reply_size maximum reply size this service can ever send
486  * \a req_portal - portal to listed for requests on
487  * \a rep_portal - portal of where to send replies to
488  * \a watchdog_factor soft watchdog timeout multiplifier to print stuck service traces.
489  * \a handler - function to process every new request
490  * \a name - service name
491  * \a proc_entry - entry in the /proc tree for sttistics reporting
492  * \a min_threads \a max_threads - min/max number of service threads to start.
493  * \a threadname should be 11 characters or less - 3 will be added on
494  * \a hp_handler - function to determine priority of the request, also called
495  *                 on every new request.
496  */
497 struct ptlrpc_service *
498 ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size,
499                 int req_portal, int rep_portal, int watchdog_factor,
500                 svc_handler_t handler, char *name,
501                 cfs_proc_dir_entry_t *proc_entry,
502                 svc_req_printfn_t svcreq_printfn,
503                 int min_threads, int max_threads,
504                 char *threadname, __u32 ctx_tags,
505                 svc_hpreq_handler_t hp_handler)
506 {
507         int                     rc;
508         struct ptlrpc_at_array *array;
509         struct ptlrpc_service  *service;
510         unsigned int            size, index;
511         ENTRY;
512
513         LASSERT (nbufs > 0);
514         LASSERT (bufsize >= max_req_size + SPTLRPC_MAX_PAYLOAD);
515         LASSERT (ctx_tags != 0);
516
517         OBD_ALLOC_PTR(service);
518         if (service == NULL)
519                 RETURN(NULL);
520
521         /* First initialise enough for early teardown */
522
523         service->srv_name = name;
524         cfs_spin_lock_init(&service->srv_lock);
525         cfs_spin_lock_init(&service->srv_rq_lock);
526         cfs_spin_lock_init(&service->srv_rs_lock);
527         CFS_INIT_LIST_HEAD(&service->srv_threads);
528         cfs_waitq_init(&service->srv_waitq);
529
530         service->srv_nbuf_per_group = test_req_buffer_pressure ? 1 : nbufs;
531         service->srv_max_req_size = max_req_size + SPTLRPC_MAX_PAYLOAD;
532         service->srv_buf_size = bufsize;
533         service->srv_rep_portal = rep_portal;
534         service->srv_req_portal = req_portal;
535         service->srv_watchdog_factor = watchdog_factor;
536         service->srv_handler = handler;
537         service->srv_req_printfn = svcreq_printfn;
538         service->srv_request_seq = 1;           /* valid seq #s start at 1 */
539         service->srv_request_max_cull_seq = 0;
540         service->srv_threads_min = min_threads;
541         service->srv_threads_max = max_threads;
542         service->srv_thread_name = threadname;
543         service->srv_ctx_tags = ctx_tags;
544         service->srv_hpreq_handler = hp_handler;
545         service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO;
546         service->srv_hpreq_count = 0;
547         service->srv_n_active_hpreq = 0;
548
549         rc = LNetSetLazyPortal(service->srv_req_portal);
550         LASSERT (rc == 0);
551
552         CFS_INIT_LIST_HEAD(&service->srv_request_queue);
553         CFS_INIT_LIST_HEAD(&service->srv_request_hpq);
554         CFS_INIT_LIST_HEAD(&service->srv_idle_rqbds);
555         CFS_INIT_LIST_HEAD(&service->srv_active_rqbds);
556         CFS_INIT_LIST_HEAD(&service->srv_history_rqbds);
557         CFS_INIT_LIST_HEAD(&service->srv_request_history);
558         CFS_INIT_LIST_HEAD(&service->srv_active_replies);
559 #ifndef __KERNEL__
560         CFS_INIT_LIST_HEAD(&service->srv_reply_queue);
561 #endif
562         CFS_INIT_LIST_HEAD(&service->srv_free_rs_list);
563         cfs_waitq_init(&service->srv_free_rs_waitq);
564         cfs_atomic_set(&service->srv_n_difficult_replies, 0);
565
566         cfs_spin_lock_init(&service->srv_at_lock);
567         CFS_INIT_LIST_HEAD(&service->srv_req_in_queue);
568
569         array = &service->srv_at_array;
570         size = at_est2timeout(at_max);
571         array->paa_size = size;
572         array->paa_count = 0;
573         array->paa_deadline = -1;
574
575         /* allocate memory for srv_at_array (ptlrpc_at_array) */
576         OBD_ALLOC(array->paa_reqs_array, sizeof(cfs_list_t) * size);
577         if (array->paa_reqs_array == NULL)
578                 GOTO(failed, NULL);
579
580         for (index = 0; index < size; index++)
581                 CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
582
583         OBD_ALLOC(array->paa_reqs_count, sizeof(__u32) * size);
584         if (array->paa_reqs_count == NULL)
585                 GOTO(failed, NULL);
586
587         cfs_timer_init(&service->srv_at_timer, ptlrpc_at_timer, service);
588         /* At SOW, service time should be quick; 10s seems generous. If client
589            timeout is less than this, we'll be sending an early reply. */
590         at_init(&service->srv_at_estimate, 10, 0);
591
592         cfs_spin_lock (&ptlrpc_all_services_lock);
593         cfs_list_add (&service->srv_list, &ptlrpc_all_services);
594         cfs_spin_unlock (&ptlrpc_all_services_lock);
595
596         /* Now allocate the request buffers */
597         rc = ptlrpc_grow_req_bufs(service);
598         /* We shouldn't be under memory pressure at startup, so
599          * fail if we can't post all our buffers at this time. */
600         if (rc != 0)
601                 GOTO(failed, NULL);
602
603         /* Now allocate pool of reply buffers */
604         /* Increase max reply size to next power of two */
605         service->srv_max_reply_size = 1;
606         while (service->srv_max_reply_size <
607                max_reply_size + SPTLRPC_MAX_PAYLOAD)
608                 service->srv_max_reply_size <<= 1;
609
610         if (proc_entry != NULL)
611                 ptlrpc_lprocfs_register_service(proc_entry, service);
612
613         CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
614                service->srv_name, service->srv_req_portal);
615
616         RETURN(service);
617 failed:
618         ptlrpc_unregister_service(service);
619         return NULL;
620 }
621
622 /**
623  * to actually free the request, must be called without holding svc_lock.
624  * note it's caller's responsibility to unlink req->rq_list.
625  */
626 static void ptlrpc_server_free_request(struct ptlrpc_request *req)
627 {
628         LASSERT(cfs_atomic_read(&req->rq_refcount) == 0);
629         LASSERT(cfs_list_empty(&req->rq_timed_list));
630
631          /* DEBUG_REQ() assumes the reply state of a request with a valid
632           * ref will not be destroyed until that reference is dropped. */
633         ptlrpc_req_drop_rs(req);
634
635         sptlrpc_svc_ctx_decref(req);
636
637         if (req != &req->rq_rqbd->rqbd_req) {
638                 /* NB request buffers use an embedded
639                  * req if the incoming req unlinked the
640                  * MD; this isn't one of them! */
641                 OBD_FREE(req, sizeof(*req));
642         }
643 }
644
645 /**
646  * drop a reference count of the request. if it reaches 0, we either
647  * put it into history list, or free it immediately.
648  */
649 void ptlrpc_server_drop_request(struct ptlrpc_request *req)
650 {
651         struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
652         struct ptlrpc_service             *svc = rqbd->rqbd_service;
653         int                                refcount;
654         cfs_list_t                        *tmp;
655         cfs_list_t                        *nxt;
656
657         if (!cfs_atomic_dec_and_test(&req->rq_refcount))
658                 return;
659
660         cfs_spin_lock(&svc->srv_at_lock);
661         if (req->rq_at_linked) {
662                 struct ptlrpc_at_array *array = &svc->srv_at_array;
663                 __u32 index = req->rq_at_index;
664
665                 LASSERT(!cfs_list_empty(&req->rq_timed_list));
666                 cfs_list_del_init(&req->rq_timed_list);
667                 cfs_spin_lock(&req->rq_lock);
668                 req->rq_at_linked = 0;
669                 cfs_spin_unlock(&req->rq_lock);
670                 array->paa_reqs_count[index]--;
671                 array->paa_count--;
672         } else
673                 LASSERT(cfs_list_empty(&req->rq_timed_list));
674         cfs_spin_unlock(&svc->srv_at_lock);
675
676         /* finalize request */
677         if (req->rq_export) {
678                 class_export_put(req->rq_export);
679                 req->rq_export = NULL;
680         }
681
682         cfs_spin_lock(&svc->srv_lock);
683
684         cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
685
686         refcount = --(rqbd->rqbd_refcount);
687         if (refcount == 0) {
688                 /* request buffer is now idle: add to history */
689                 cfs_list_del(&rqbd->rqbd_list);
690                 cfs_list_add_tail(&rqbd->rqbd_list, &svc->srv_history_rqbds);
691                 svc->srv_n_history_rqbds++;
692
693                 /* cull some history?
694                  * I expect only about 1 or 2 rqbds need to be recycled here */
695                 while (svc->srv_n_history_rqbds > svc->srv_max_history_rqbds) {
696                         rqbd = cfs_list_entry(svc->srv_history_rqbds.next,
697                                               struct ptlrpc_request_buffer_desc,
698                                               rqbd_list);
699
700                         cfs_list_del(&rqbd->rqbd_list);
701                         svc->srv_n_history_rqbds--;
702
703                         /* remove rqbd's reqs from svc's req history while
704                          * I've got the service lock */
705                         cfs_list_for_each(tmp, &rqbd->rqbd_reqs) {
706                                 req = cfs_list_entry(tmp, struct ptlrpc_request,
707                                                      rq_list);
708                                 /* Track the highest culled req seq */
709                                 if (req->rq_history_seq >
710                                     svc->srv_request_max_cull_seq)
711                                         svc->srv_request_max_cull_seq =
712                                                 req->rq_history_seq;
713                                 cfs_list_del(&req->rq_history_list);
714                         }
715
716                         cfs_spin_unlock(&svc->srv_lock);
717
718                         cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
719                                 req = cfs_list_entry(rqbd->rqbd_reqs.next,
720                                                      struct ptlrpc_request,
721                                                      rq_list);
722                                 cfs_list_del(&req->rq_list);
723                                 ptlrpc_server_free_request(req);
724                         }
725
726                         cfs_spin_lock(&svc->srv_lock);
727                         /*
728                          * now all reqs including the embedded req has been
729                          * disposed, schedule request buffer for re-use.
730                          */
731                         LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) ==
732                                 0);
733                         cfs_list_add_tail(&rqbd->rqbd_list,
734                                           &svc->srv_idle_rqbds);
735                 }
736
737                 cfs_spin_unlock(&svc->srv_lock);
738         } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
739                 /* If we are low on memory, we are not interested in history */
740                 cfs_list_del(&req->rq_list);
741                 cfs_list_del_init(&req->rq_history_list);
742                 cfs_spin_unlock(&svc->srv_lock);
743
744                 ptlrpc_server_free_request(req);
745         } else {
746                 cfs_spin_unlock(&svc->srv_lock);
747         }
748 }
749
750 /**
751  * to finish a request: stop sending more early replies, and release
752  * the request. should be called after we finished handling the request.
753  */
754 static void ptlrpc_server_finish_request(struct ptlrpc_service *svc,
755                                          struct ptlrpc_request *req)
756 {
757         cfs_spin_lock(&svc->srv_rq_lock);
758         svc->srv_n_active_reqs--;
759         if (req->rq_hp)
760                 svc->srv_n_active_hpreq--;
761         cfs_spin_unlock(&svc->srv_rq_lock);
762
763         ptlrpc_server_drop_request(req);
764 }
765
766 /**
767  * This function makes sure dead exports are evicted in a timely manner.
768  * This function is only called when some export receives a message (i.e.,
769  * the network is up.)
770  */
771 static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
772 {
773         struct obd_export *oldest_exp;
774         time_t oldest_time, new_time;
775
776         ENTRY;
777
778         LASSERT(exp);
779
780         /* Compensate for slow machines, etc, by faking our request time
781            into the future.  Although this can break the strict time-ordering
782            of the list, we can be really lazy here - we don't have to evict
783            at the exact right moment.  Eventually, all silent exports
784            will make it to the top of the list. */
785
786         /* Do not pay attention on 1sec or smaller renewals. */
787         new_time = cfs_time_current_sec() + extra_delay;
788         if (exp->exp_last_request_time + 1 /*second */ >= new_time)
789                 RETURN_EXIT;
790
791         exp->exp_last_request_time = new_time;
792         CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n",
793                exp->exp_client_uuid.uuid,
794                exp->exp_last_request_time, exp);
795
796         /* exports may get disconnected from the chain even though the
797            export has references, so we must keep the spin lock while
798            manipulating the lists */
799         cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
800
801         if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
802                 /* this one is not timed */
803                 cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
804                 RETURN_EXIT;
805         }
806
807         cfs_list_move_tail(&exp->exp_obd_chain_timed,
808                            &exp->exp_obd->obd_exports_timed);
809
810         oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
811                                     struct obd_export, exp_obd_chain_timed);
812         oldest_time = oldest_exp->exp_last_request_time;
813         cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
814
815         if (exp->exp_obd->obd_recovering) {
816                 /* be nice to everyone during recovery */
817                 EXIT;
818                 return;
819         }
820
821         /* Note - racing to start/reset the obd_eviction timer is safe */
822         if (exp->exp_obd->obd_eviction_timer == 0) {
823                 /* Check if the oldest entry is expired. */
824                 if (cfs_time_current_sec() > (oldest_time + PING_EVICT_TIMEOUT +
825                                               extra_delay)) {
826                         /* We need a second timer, in case the net was down and
827                          * it just came back. Since the pinger may skip every
828                          * other PING_INTERVAL (see note in ptlrpc_pinger_main),
829                          * we better wait for 3. */
830                         exp->exp_obd->obd_eviction_timer =
831                                 cfs_time_current_sec() + 3 * PING_INTERVAL;
832                         CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
833                                exp->exp_obd->obd_name, 
834                                obd_export_nid2str(oldest_exp), oldest_time);
835                 }
836         } else {
837                 if (cfs_time_current_sec() >
838                     (exp->exp_obd->obd_eviction_timer + extra_delay)) {
839                         /* The evictor won't evict anyone who we've heard from
840                          * recently, so we don't have to check before we start
841                          * it. */
842                         if (!ping_evictor_wake(exp))
843                                 exp->exp_obd->obd_eviction_timer = 0;
844                 }
845         }
846
847         EXIT;
848 }
849
850 /**
851  * Sanity check request \a req.
852  * Return 0 if all is ok, error code otherwise.
853  */
854 static int ptlrpc_check_req(struct ptlrpc_request *req)
855 {
856         if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
857                      req->rq_export->exp_conn_cnt)) {
858                 DEBUG_REQ(D_ERROR, req,
859                           "DROPPING req from old connection %d < %d",
860                           lustre_msg_get_conn_cnt(req->rq_reqmsg),
861                           req->rq_export->exp_conn_cnt);
862                 return -EEXIST;
863         }
864         if (unlikely(req->rq_export->exp_obd &&
865                      req->rq_export->exp_obd->obd_fail)) {
866              /* Failing over, don't handle any more reqs, send
867                 error response instead. */
868                 CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
869                        req, req->rq_export->exp_obd->obd_name);
870                 req->rq_status = -ENODEV;
871                 ptlrpc_error(req);
872                 return -ENODEV;
873         }
874
875         return 0;
876 }
877
878 static void ptlrpc_at_set_timer(struct ptlrpc_service *svc)
879 {
880         struct ptlrpc_at_array *array = &svc->srv_at_array;
881         __s32 next;
882
883         cfs_spin_lock(&svc->srv_at_lock);
884         if (array->paa_count == 0) {
885                 cfs_timer_disarm(&svc->srv_at_timer);
886                 cfs_spin_unlock(&svc->srv_at_lock);
887                 return;
888         }
889
890         /* Set timer for closest deadline */
891         next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
892                        at_early_margin);
893         if (next <= 0)
894                 ptlrpc_at_timer((unsigned long)svc);
895         else
896                 cfs_timer_arm(&svc->srv_at_timer, cfs_time_shift(next));
897         cfs_spin_unlock(&svc->srv_at_lock);
898         CDEBUG(D_INFO, "armed %s at %+ds\n", svc->srv_name, next);
899 }
900
901 /* Add rpc to early reply check list */
902 static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
903 {
904         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
905         struct ptlrpc_request *rq = NULL;
906         struct ptlrpc_at_array *array = &svc->srv_at_array;
907         __u32 index;
908         int found = 0;
909
910         if (AT_OFF)
911                 return(0);
912
913         if (req->rq_no_reply)
914                 return 0;
915
916         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
917                 return(-ENOSYS);
918
919         cfs_spin_lock(&svc->srv_at_lock);
920         LASSERT(cfs_list_empty(&req->rq_timed_list));
921
922         index = (unsigned long)req->rq_deadline % array->paa_size;
923         if (array->paa_reqs_count[index] > 0) {
924                 /* latest rpcs will have the latest deadlines in the list,
925                  * so search backward. */
926                 cfs_list_for_each_entry_reverse(rq,
927                                                 &array->paa_reqs_array[index],
928                                                 rq_timed_list) {
929                         if (req->rq_deadline >= rq->rq_deadline) {
930                                 cfs_list_add(&req->rq_timed_list,
931                                              &rq->rq_timed_list);
932                                 break;
933                         }
934                 }
935         }
936
937         /* Add the request at the head of the list */
938         if (cfs_list_empty(&req->rq_timed_list))
939                 cfs_list_add(&req->rq_timed_list,
940                              &array->paa_reqs_array[index]);
941
942         cfs_spin_lock(&req->rq_lock);
943         req->rq_at_linked = 1;
944         cfs_spin_unlock(&req->rq_lock);
945         req->rq_at_index = index;
946         array->paa_reqs_count[index]++;
947         array->paa_count++;
948         if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
949                 array->paa_deadline = req->rq_deadline;
950                 found = 1;
951         }
952         cfs_spin_unlock(&svc->srv_at_lock);
953
954         if (found)
955                 ptlrpc_at_set_timer(svc);
956
957         return 0;
958 }
959
960 static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
961 {
962         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
963         struct ptlrpc_request *reqcopy;
964         struct lustre_msg *reqmsg;
965         cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
966         time_t newdl;
967         int rc;
968         ENTRY;
969
970         /* deadline is when the client expects us to reply, margin is the
971            difference between clients' and servers' expectations */
972         DEBUG_REQ(D_ADAPTTO, req,
973                   "%ssending early reply (deadline %+lds, margin %+lds) for "
974                   "%d+%d", AT_OFF ? "AT off - not " : "",
975                   olddl, olddl - at_get(&svc->srv_at_estimate),
976                   at_get(&svc->srv_at_estimate), at_extra);
977
978         if (AT_OFF)
979                 RETURN(0);
980
981         if (olddl < 0) {
982                 DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
983                           "not sending early reply. Consider increasing "
984                           "at_early_margin (%d)?", olddl, at_early_margin);
985
986                 /* Return an error so we're not re-added to the timed list. */
987                 RETURN(-ETIMEDOUT);
988         }
989
990         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
991                 DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
992                           "but no AT support");
993                 RETURN(-ENOSYS);
994         }
995
996         if (req->rq_export &&
997             lustre_msg_get_flags(req->rq_reqmsg) &
998             (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
999                 /* During recovery, we don't want to send too many early
1000                  * replies, but on the other hand we want to make sure the
1001                  * client has enough time to resend if the rpc is lost. So
1002                  * during the recovery period send at least 4 early replies,
1003                  * spacing them every at_extra if we can. at_estimate should
1004                  * always equal this fixed value during recovery. */
1005                 at_measured(&svc->srv_at_estimate, min(at_extra,
1006                             req->rq_export->exp_obd->obd_recovery_timeout / 4));
1007         } else {
1008                 /* Fake our processing time into the future to ask the clients
1009                  * for some extra amount of time */
1010                 at_measured(&svc->srv_at_estimate, at_extra +
1011                             cfs_time_current_sec() -
1012                             req->rq_arrival_time.tv_sec);
1013
1014                 /* Check to see if we've actually increased the deadline -
1015                  * we may be past adaptive_max */
1016                 if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
1017                     at_get(&svc->srv_at_estimate)) {
1018                         DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
1019                                   "(%ld/%ld), not sending early reply\n",
1020                                   olddl, req->rq_arrival_time.tv_sec +
1021                                   at_get(&svc->srv_at_estimate) -
1022                                   cfs_time_current_sec());
1023                         RETURN(-ETIMEDOUT);
1024                 }
1025         }
1026         newdl = cfs_time_current_sec() + at_get(&svc->srv_at_estimate);
1027
1028         OBD_ALLOC(reqcopy, sizeof *reqcopy);
1029         if (reqcopy == NULL)
1030                 RETURN(-ENOMEM);
1031         OBD_ALLOC(reqmsg, req->rq_reqlen);
1032         if (!reqmsg) {
1033                 OBD_FREE(reqcopy, sizeof *reqcopy);
1034                 RETURN(-ENOMEM);
1035         }
1036
1037         *reqcopy = *req;
1038         reqcopy->rq_reply_state = NULL;
1039         reqcopy->rq_rep_swab_mask = 0;
1040         reqcopy->rq_pack_bulk = 0;
1041         reqcopy->rq_pack_udesc = 0;
1042         reqcopy->rq_packed_final = 0;
1043         sptlrpc_svc_ctx_addref(reqcopy);
1044         /* We only need the reqmsg for the magic */
1045         reqcopy->rq_reqmsg = reqmsg;
1046         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
1047
1048         LASSERT(cfs_atomic_read(&req->rq_refcount));
1049         /** if it is last refcount then early reply isn't needed */
1050         if (cfs_atomic_read(&req->rq_refcount) == 1) {
1051                 DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
1052                           "abort sending early reply\n");
1053                 GOTO(out, rc = -EINVAL);
1054         }
1055
1056         /* Connection ref */
1057         reqcopy->rq_export = class_conn2export(
1058                                      lustre_msg_get_handle(reqcopy->rq_reqmsg));
1059         if (reqcopy->rq_export == NULL)
1060                 GOTO(out, rc = -ENODEV);
1061
1062         /* RPC ref */
1063         class_export_rpc_get(reqcopy->rq_export);
1064         if (reqcopy->rq_export->exp_obd &&
1065             reqcopy->rq_export->exp_obd->obd_fail)
1066                 GOTO(out_put, rc = -ENODEV);
1067
1068         rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
1069         if (rc)
1070                 GOTO(out_put, rc);
1071
1072         rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
1073
1074         if (!rc) {
1075                 /* Adjust our own deadline to what we told the client */
1076                 req->rq_deadline = newdl;
1077                 req->rq_early_count++; /* number sent, server side */
1078         } else {
1079                 DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
1080         }
1081
1082         /* Free the (early) reply state from lustre_pack_reply.
1083            (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
1084         ptlrpc_req_drop_rs(reqcopy);
1085
1086 out_put:
1087         class_export_rpc_put(reqcopy->rq_export);
1088         class_export_put(reqcopy->rq_export);
1089 out:
1090         sptlrpc_svc_ctx_decref(reqcopy);
1091         OBD_FREE(reqmsg, req->rq_reqlen);
1092         OBD_FREE(reqcopy, sizeof *reqcopy);
1093         RETURN(rc);
1094 }
1095
1096 /* Send early replies to everybody expiring within at_early_margin
1097    asking for at_extra time */
1098 static int ptlrpc_at_check_timed(struct ptlrpc_service *svc)
1099 {
1100         struct ptlrpc_request *rq, *n;
1101         cfs_list_t work_list;
1102         struct ptlrpc_at_array *array = &svc->srv_at_array;
1103         __u32  index, count;
1104         time_t deadline;
1105         time_t now = cfs_time_current_sec();
1106         cfs_duration_t delay;
1107         int first, counter = 0;
1108         ENTRY;
1109
1110         cfs_spin_lock(&svc->srv_at_lock);
1111         if (svc->srv_at_check == 0) {
1112                 cfs_spin_unlock(&svc->srv_at_lock);
1113                 RETURN(0);
1114         }
1115         delay = cfs_time_sub(cfs_time_current(), svc->srv_at_checktime);
1116         svc->srv_at_check = 0;
1117
1118         if (array->paa_count == 0) {
1119                 cfs_spin_unlock(&svc->srv_at_lock);
1120                 RETURN(0);
1121         }
1122
1123         /* The timer went off, but maybe the nearest rpc already completed. */
1124         first = array->paa_deadline - now;
1125         if (first > at_early_margin) {
1126                 /* We've still got plenty of time.  Reset the timer. */
1127                 cfs_spin_unlock(&svc->srv_at_lock);
1128                 ptlrpc_at_set_timer(svc);
1129                 RETURN(0);
1130         }
1131
1132         /* We're close to a timeout, and we don't know how much longer the
1133            server will take. Send early replies to everyone expiring soon. */
1134         CFS_INIT_LIST_HEAD(&work_list);
1135         deadline = -1;
1136         index = (unsigned long)array->paa_deadline % array->paa_size;
1137         count = array->paa_count;
1138         while (count > 0) {
1139                 count -= array->paa_reqs_count[index];
1140                 cfs_list_for_each_entry_safe(rq, n,
1141                                              &array->paa_reqs_array[index],
1142                                              rq_timed_list) {
1143                         if (rq->rq_deadline <= now + at_early_margin) {
1144                                 cfs_list_del_init(&rq->rq_timed_list);
1145                                 /**
1146                                  * ptlrpc_server_drop_request() may drop
1147                                  * refcount to 0 already. Let's check this and
1148                                  * don't add entry to work_list
1149                                  */
1150                                 if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount)))
1151                                         cfs_list_add(&rq->rq_timed_list, &work_list);
1152                                 counter++;
1153                                 array->paa_reqs_count[index]--;
1154                                 array->paa_count--;
1155                                 cfs_spin_lock(&rq->rq_lock);
1156                                 rq->rq_at_linked = 0;
1157                                 cfs_spin_unlock(&rq->rq_lock);
1158                                 continue;
1159                         }
1160
1161                         /* update the earliest deadline */
1162                         if (deadline == -1 || rq->rq_deadline < deadline)
1163                                 deadline = rq->rq_deadline;
1164
1165                         break;
1166                 }
1167
1168                 if (++index >= array->paa_size)
1169                         index = 0;
1170         }
1171         array->paa_deadline = deadline;
1172         cfs_spin_unlock(&svc->srv_at_lock);
1173
1174         /* we have a new earliest deadline, restart the timer */
1175         ptlrpc_at_set_timer(svc);
1176
1177         CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
1178                "replies\n", first, at_extra, counter);
1179         if (first < 0) {
1180                 /* We're already past request deadlines before we even get a
1181                    chance to send early replies */
1182                 LCONSOLE_WARN("%s: This server is not able to keep up with "
1183                               "request traffic (cpu-bound).\n", svc->srv_name);
1184                 CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
1185                       "delay="CFS_DURATION_T"(jiff)\n",
1186                       counter, svc->srv_n_queued_reqs, svc->srv_n_active_reqs,
1187                       at_get(&svc->srv_at_estimate), delay);
1188         }
1189
1190         /* we took additional refcount so entries can't be deleted from list, no
1191          * locking is needed */
1192         while (!cfs_list_empty(&work_list)) {
1193                 rq = cfs_list_entry(work_list.next, struct ptlrpc_request,
1194                                     rq_timed_list);
1195                 cfs_list_del_init(&rq->rq_timed_list);
1196
1197                 if (ptlrpc_at_send_early_reply(rq) == 0)
1198                         ptlrpc_at_add_timed(rq);
1199
1200                 ptlrpc_server_drop_request(rq);
1201         }
1202
1203         RETURN(0);
1204 }
1205
1206 /**
1207  * Put the request to the export list if the request may become
1208  * a high priority one.
1209  */
1210 static int ptlrpc_hpreq_init(struct ptlrpc_service *svc,
1211                              struct ptlrpc_request *req)
1212 {
1213         int rc;
1214         ENTRY;
1215
1216         if (svc->srv_hpreq_handler) {
1217                 rc = svc->srv_hpreq_handler(req);
1218                 if (rc)
1219                         RETURN(rc);
1220         }
1221         if (req->rq_export && req->rq_ops) {
1222                 cfs_spin_lock_bh(&req->rq_export->exp_rpc_lock);
1223                 cfs_list_add(&req->rq_exp_list,
1224                              &req->rq_export->exp_queued_rpc);
1225                 cfs_spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1226         }
1227
1228         RETURN(0);
1229 }
1230
1231 /** Remove the request from the export list. */
1232 static void ptlrpc_hpreq_fini(struct ptlrpc_request *req)
1233 {
1234         ENTRY;
1235         if (req->rq_export && req->rq_ops) {
1236                 cfs_spin_lock_bh(&req->rq_export->exp_rpc_lock);
1237                 cfs_list_del_init(&req->rq_exp_list);
1238                 cfs_spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1239         }
1240         EXIT;
1241 }
1242
1243 /**
1244  * Make the request a high priority one.
1245  *
1246  * All the high priority requests are queued in a separate FIFO
1247  * ptlrpc_service::srv_request_hpq list which is parallel to
1248  * ptlrpc_service::srv_request_queue list but has a higher priority
1249  * for handling.
1250  *
1251  * \see ptlrpc_server_handle_request().
1252  */
1253 static void ptlrpc_hpreq_reorder_nolock(struct ptlrpc_service *svc,
1254                                         struct ptlrpc_request *req)
1255 {
1256         ENTRY;
1257         LASSERT(svc != NULL);
1258         cfs_spin_lock(&req->rq_lock);
1259         if (req->rq_hp == 0) {
1260                 int opc = lustre_msg_get_opc(req->rq_reqmsg);
1261
1262                 /* Add to the high priority queue. */
1263                 cfs_list_move_tail(&req->rq_list, &svc->srv_request_hpq);
1264                 req->rq_hp = 1;
1265                 if (opc != OBD_PING)
1266                         DEBUG_REQ(D_NET, req, "high priority req");
1267         }
1268         cfs_spin_unlock(&req->rq_lock);
1269         EXIT;
1270 }
1271
1272 /**
1273  * \see ptlrpc_hpreq_reorder_nolock
1274  */
1275 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req)
1276 {
1277         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
1278         ENTRY;
1279
1280         cfs_spin_lock(&svc->srv_rq_lock);
1281         /* It may happen that the request is already taken for the processing
1282          * but still in the export list, do not re-add it into the HP list. */
1283         if (req->rq_phase == RQ_PHASE_NEW)
1284                 ptlrpc_hpreq_reorder_nolock(svc, req);
1285         cfs_spin_unlock(&svc->srv_rq_lock);
1286         EXIT;
1287 }
1288
1289 /** Check if the request is a high priority one. */
1290 static int ptlrpc_server_hpreq_check(struct ptlrpc_request *req)
1291 {
1292         int opc, rc = 0;
1293         ENTRY;
1294
1295         /* Check by request opc. */
1296         opc = lustre_msg_get_opc(req->rq_reqmsg);
1297         if (opc == OBD_PING)
1298                 RETURN(1);
1299
1300         /* Perform request specific check. */
1301         if (req->rq_ops && req->rq_ops->hpreq_check)
1302                 rc = req->rq_ops->hpreq_check(req);
1303         RETURN(rc);
1304 }
1305
1306 /** Check if a request is a high priority one. */
1307 static int ptlrpc_server_request_add(struct ptlrpc_service *svc,
1308                                      struct ptlrpc_request *req)
1309 {
1310         int rc;
1311         ENTRY;
1312
1313         rc = ptlrpc_server_hpreq_check(req);
1314         if (rc < 0)
1315                 RETURN(rc);
1316
1317         cfs_spin_lock(&svc->srv_rq_lock);
1318         /* Before inserting the request into the queue, check if it is not
1319          * inserted yet, or even already handled -- it may happen due to
1320          * a racing ldlm_server_blocking_ast(). */
1321         if (req->rq_phase == RQ_PHASE_NEW && cfs_list_empty(&req->rq_list)) {
1322                 if (rc)
1323                         ptlrpc_hpreq_reorder_nolock(svc, req);
1324                 else
1325                         cfs_list_add_tail(&req->rq_list,
1326                                           &svc->srv_request_queue);
1327         }
1328         cfs_spin_unlock(&svc->srv_rq_lock);
1329
1330         RETURN(0);
1331 }
1332
1333 /**
1334  * Allow to handle high priority request
1335  * User can call it w/o any lock but need to hold ptlrpc_service::srv_rq_lock
1336  * to get reliable result
1337  */
1338 static int ptlrpc_server_allow_high(struct ptlrpc_service *svc, int force)
1339 {
1340         if (force)
1341                 return 1;
1342
1343         if (svc->srv_n_active_reqs >= svc->srv_threads_running - 1)
1344                 return 0;
1345
1346         return cfs_list_empty(&svc->srv_request_queue) ||
1347                svc->srv_hpreq_count < svc->srv_hpreq_ratio;
1348 }
1349
1350 static int ptlrpc_server_high_pending(struct ptlrpc_service *svc, int force)
1351 {
1352         return ptlrpc_server_allow_high(svc, force) &&
1353                !cfs_list_empty(&svc->srv_request_hpq);
1354 }
1355
1356 /**
1357  * Only allow normal priority requests on a service that has a high-priority
1358  * queue if forced (i.e. cleanup), if there are other high priority requests
1359  * already being processed (i.e. those threads can service more high-priority
1360  * requests), or if there are enough idle threads that a later thread can do
1361  * a high priority request.
1362  * User can call it w/o any lock but need to hold ptlrpc_service::srv_rq_lock
1363  * to get reliable result
1364  */
1365 static int ptlrpc_server_allow_normal(struct ptlrpc_service *svc, int force)
1366 {
1367 #ifndef __KERNEL__
1368         if (1) /* always allow to handle normal request for liblustre */
1369                 return 1;
1370 #endif
1371         if (force ||
1372             svc->srv_n_active_reqs < svc->srv_threads_running - 2)
1373                 return 1;
1374
1375         if (svc->srv_n_active_reqs >= svc->srv_threads_running - 1)
1376                 return 0;
1377
1378         return svc->srv_n_active_hpreq > 0 || svc->srv_hpreq_handler == NULL;
1379 }
1380
1381 static int ptlrpc_server_normal_pending(struct ptlrpc_service *svc, int force)
1382 {
1383         return ptlrpc_server_allow_normal(svc, force) &&
1384                !cfs_list_empty(&svc->srv_request_queue);
1385 }
1386
1387 /**
1388  * Returns true if there are requests available in incoming
1389  * request queue for processing and it is allowed to fetch them.
1390  * User can call it w/o any lock but need to hold ptlrpc_service::srv_rq_lock
1391  * to get reliable result
1392  * \see ptlrpc_server_allow_normal
1393  * \see ptlrpc_server_allow high
1394  */
1395 static inline int
1396 ptlrpc_server_request_pending(struct ptlrpc_service *svc, int force)
1397 {
1398         return ptlrpc_server_high_pending(svc, force) ||
1399                ptlrpc_server_normal_pending(svc, force);
1400 }
1401
1402 /**
1403  * Fetch a request for processing from queue of unprocessed requests.
1404  * Favors high-priority requests.
1405  * Returns a pointer to fetched request.
1406  */
1407 static struct ptlrpc_request *
1408 ptlrpc_server_request_get(struct ptlrpc_service *svc, int force)
1409 {
1410         struct ptlrpc_request *req;
1411         ENTRY;
1412
1413         if (ptlrpc_server_high_pending(svc, force)) {
1414                 req = cfs_list_entry(svc->srv_request_hpq.next,
1415                                      struct ptlrpc_request, rq_list);
1416                 svc->srv_hpreq_count++;
1417                 RETURN(req);
1418
1419         }
1420
1421         if (ptlrpc_server_normal_pending(svc, force)) {
1422                 req = cfs_list_entry(svc->srv_request_queue.next,
1423                                      struct ptlrpc_request, rq_list);
1424                 svc->srv_hpreq_count = 0;
1425                 RETURN(req);
1426         }
1427         RETURN(NULL);
1428 }
1429
1430 /**
1431  * Handle freshly incoming reqs, add to timed early reply list,
1432  * pass on to regular request queue.
1433  * All incoming requests pass through here before getting into
1434  * ptlrpc_server_handle_req later on.
1435  */
1436 static int
1437 ptlrpc_server_handle_req_in(struct ptlrpc_service *svc)
1438 {
1439         struct ptlrpc_request *req;
1440         __u32                  deadline;
1441         int                    rc;
1442         ENTRY;
1443
1444         LASSERT(svc);
1445
1446         cfs_spin_lock(&svc->srv_lock);
1447         if (cfs_list_empty(&svc->srv_req_in_queue)) {
1448                 cfs_spin_unlock(&svc->srv_lock);
1449                 RETURN(0);
1450         }
1451
1452         req = cfs_list_entry(svc->srv_req_in_queue.next,
1453                              struct ptlrpc_request, rq_list);
1454         cfs_list_del_init (&req->rq_list);
1455         svc->srv_n_queued_reqs--;
1456         /* Consider this still a "queued" request as far as stats are
1457            concerned */
1458         /* ptlrpc_hpreq_init() inserts it to the export list and by the time
1459          * of ptlrpc_server_request_add() it could be already handled and
1460          * released. To not lose request in between, take an extra reference
1461          * on the request. */
1462         ptlrpc_request_addref(req);
1463         cfs_spin_unlock(&svc->srv_lock);
1464
1465         /* go through security check/transform */
1466         rc = sptlrpc_svc_unwrap_request(req);
1467         switch (rc) {
1468         case SECSVC_OK:
1469                 break;
1470         case SECSVC_COMPLETE:
1471                 target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
1472                 goto err_req;
1473         case SECSVC_DROP:
1474                 goto err_req;
1475         default:
1476                 LBUG();
1477         }
1478
1479         /*
1480          * for null-flavored rpc, msg has been unpacked by sptlrpc, although
1481          * redo it wouldn't be harmful.
1482          */
1483         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
1484                 rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
1485                 if (rc != 0) {
1486                         CERROR("error unpacking request: ptl %d from %s "
1487                                "x"LPU64"\n", svc->srv_req_portal,
1488                                libcfs_id2str(req->rq_peer), req->rq_xid);
1489                         goto err_req;
1490                 }
1491         }
1492
1493         rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
1494         if (rc) {
1495                 CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
1496                         LPU64"\n", svc->srv_req_portal,
1497                         libcfs_id2str(req->rq_peer), req->rq_xid);
1498                 goto err_req;
1499         }
1500
1501         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
1502             lustre_msg_get_opc(req->rq_reqmsg) == obd_fail_val) {
1503                 CERROR("drop incoming rpc opc %u, x"LPU64"\n",
1504                        obd_fail_val, req->rq_xid);
1505                 goto err_req;
1506         }
1507
1508         rc = -EINVAL;
1509         if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
1510                 CERROR("wrong packet type received (type=%u) from %s\n",
1511                        lustre_msg_get_type(req->rq_reqmsg),
1512                        libcfs_id2str(req->rq_peer));
1513                 goto err_req;
1514         }
1515
1516         switch(lustre_msg_get_opc(req->rq_reqmsg)) {
1517         case MDS_WRITEPAGE:
1518         case OST_WRITE:
1519                 req->rq_bulk_write = 1;
1520                 break;
1521         case MDS_READPAGE:
1522         case OST_READ:
1523                 req->rq_bulk_read = 1;
1524                 break;
1525         }
1526
1527         CDEBUG(D_NET, "got req "LPU64"\n", req->rq_xid);
1528
1529         req->rq_export = class_conn2export(
1530                 lustre_msg_get_handle(req->rq_reqmsg));
1531         if (req->rq_export) {
1532                 rc = ptlrpc_check_req(req);
1533                 if (rc == 0) {
1534                         rc = sptlrpc_target_export_check(req->rq_export, req);
1535                         if (rc)
1536                                 DEBUG_REQ(D_ERROR, req, "DROPPING req with "
1537                                           "illegal security flavor,");
1538                 }
1539
1540                 if (rc)
1541                         goto err_req;
1542                 ptlrpc_update_export_timer(req->rq_export, 0);
1543         }
1544
1545         /* req_in handling should/must be fast */
1546         if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
1547                 DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
1548                           cfs_time_sub(cfs_time_current_sec(),
1549                                        req->rq_arrival_time.tv_sec));
1550
1551         /* Set rpc server deadline and add it to the timed list */
1552         deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
1553                     MSGHDR_AT_SUPPORT) ?
1554                    /* The max time the client expects us to take */
1555                    lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
1556         req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
1557         if (unlikely(deadline == 0)) {
1558                 DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
1559                 goto err_req;
1560         }
1561
1562         ptlrpc_at_add_timed(req);
1563         rc = ptlrpc_hpreq_init(svc, req);
1564         if (rc)
1565                 GOTO(err_req, rc);
1566
1567         /* Move it over to the request processing queue */
1568         rc = ptlrpc_server_request_add(svc, req);
1569         if (rc)
1570                 GOTO(err_req, rc);
1571         cfs_waitq_signal(&svc->srv_waitq);
1572         ptlrpc_server_drop_request(req);
1573         RETURN(1);
1574
1575 err_req:
1576         ptlrpc_server_drop_request(req);
1577         cfs_spin_lock(&svc->srv_rq_lock);
1578         svc->srv_n_active_reqs++;
1579         cfs_spin_unlock(&svc->srv_rq_lock);
1580         ptlrpc_server_finish_request(svc, req);
1581
1582         RETURN(1);
1583 }
1584
1585 /**
1586  * Main incoming request handling logic.
1587  * Calls handler function from service to do actual processing.
1588  */
1589 static int
1590 ptlrpc_server_handle_request(struct ptlrpc_service *svc,
1591                              struct ptlrpc_thread *thread)
1592 {
1593         struct obd_export     *export = NULL;
1594         struct ptlrpc_request *request;
1595         struct timeval         work_start;
1596         struct timeval         work_end;
1597         long                   timediff;
1598         int                    opc, rc;
1599         int                    fail_opc = 0;
1600         ENTRY;
1601
1602         LASSERT(svc);
1603
1604         cfs_spin_lock(&svc->srv_rq_lock);
1605 #ifndef __KERNEL__
1606         /* !@%$# liblustre only has 1 thread */
1607         if (cfs_atomic_read(&svc->srv_n_difficult_replies) != 0) {
1608                 cfs_spin_unlock(&svc->srv_rq_lock);
1609                 RETURN(0);
1610         }
1611 #endif
1612         request = ptlrpc_server_request_get(svc, 0);
1613         if  (request == NULL) {
1614                 cfs_spin_unlock(&svc->srv_rq_lock);
1615                 RETURN(0);
1616         }
1617
1618         opc = lustre_msg_get_opc(request->rq_reqmsg);
1619         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
1620                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
1621         else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
1622                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
1623
1624         if (unlikely(fail_opc)) {
1625                 if (request->rq_export && request->rq_ops) {
1626                         cfs_spin_unlock(&svc->srv_rq_lock);
1627                         OBD_FAIL_TIMEOUT(fail_opc, 4);
1628                         cfs_spin_lock(&svc->srv_rq_lock);
1629                         request = ptlrpc_server_request_get(svc, 0);
1630                         if  (request == NULL) {
1631                                 cfs_spin_unlock(&svc->srv_rq_lock);
1632                                 RETURN(0);
1633                         }
1634                 }
1635         }
1636
1637         cfs_list_del_init(&request->rq_list);
1638         svc->srv_n_active_reqs++;
1639         if (request->rq_hp)
1640                 svc->srv_n_active_hpreq++;
1641
1642         /* The phase is changed under the lock here because we need to know
1643          * the request is under processing (see ptlrpc_hpreq_reorder()). */
1644         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
1645         cfs_spin_unlock(&svc->srv_rq_lock);
1646
1647         ptlrpc_hpreq_fini(request);
1648
1649         if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
1650                 libcfs_debug_dumplog();
1651
1652         cfs_gettimeofday(&work_start);
1653         timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
1654         if (likely(svc->srv_stats != NULL)) {
1655                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
1656                                     timediff);
1657                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
1658                                     svc->srv_n_queued_reqs);
1659                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
1660                                     svc->srv_n_active_reqs);
1661                 lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
1662                                     at_get(&svc->srv_at_estimate));
1663         }
1664
1665         rc = lu_context_init(&request->rq_session,
1666                              LCT_SESSION|LCT_REMEMBER|LCT_NOREF);
1667         if (rc) {
1668                 CERROR("Failure to initialize session: %d\n", rc);
1669                 goto out_req;
1670         }
1671         request->rq_session.lc_thread = thread;
1672         request->rq_session.lc_cookie = 0x5;
1673         lu_context_enter(&request->rq_session);
1674
1675         CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
1676
1677         request->rq_svc_thread = thread;
1678         if (thread)
1679                 request->rq_svc_thread->t_env->le_ses = &request->rq_session;
1680
1681         if (likely(request->rq_export)) {
1682                 if (unlikely(ptlrpc_check_req(request)))
1683                         goto put_conn;
1684                 ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
1685                 export = class_export_rpc_get(request->rq_export);
1686         }
1687
1688         /* Discard requests queued for longer than the deadline.
1689            The deadline is increased if we send an early reply. */
1690         if (cfs_time_current_sec() > request->rq_deadline) {
1691                 DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
1692                           ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
1693                           libcfs_id2str(request->rq_peer),
1694                           cfs_time_sub(request->rq_deadline,
1695                           request->rq_arrival_time.tv_sec),
1696                           cfs_time_sub(cfs_time_current_sec(),
1697                           request->rq_deadline));
1698                 goto put_rpc_export;
1699         }
1700
1701         CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
1702                "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
1703                (request->rq_export ?
1704                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
1705                (request->rq_export ?
1706                 cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
1707                lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
1708                libcfs_id2str(request->rq_peer),
1709                lustre_msg_get_opc(request->rq_reqmsg));
1710
1711         if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
1712                 OBD_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, obd_fail_val);
1713
1714         rc = svc->srv_handler(request);
1715
1716         ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
1717
1718 put_rpc_export:
1719         if (export != NULL)
1720                 class_export_rpc_put(export);
1721 put_conn:
1722         lu_context_exit(&request->rq_session);
1723         lu_context_fini(&request->rq_session);
1724
1725         if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
1726                 DEBUG_REQ(D_WARNING, request, "Request x"LPU64" took longer "
1727                           "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
1728                           " client may timeout.",
1729                           request->rq_xid, cfs_time_sub(request->rq_deadline,
1730                           request->rq_arrival_time.tv_sec),
1731                           cfs_time_sub(cfs_time_current_sec(),
1732                           request->rq_deadline));
1733         }
1734
1735         cfs_gettimeofday(&work_end);
1736         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1737         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
1738                "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
1739                "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
1740                 cfs_curproc_comm(),
1741                 (request->rq_export ?
1742                  (char *)request->rq_export->exp_client_uuid.uuid : "0"),
1743                 (request->rq_export ?
1744                  cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
1745                 lustre_msg_get_status(request->rq_reqmsg),
1746                 request->rq_xid,
1747                 libcfs_id2str(request->rq_peer),
1748                 lustre_msg_get_opc(request->rq_reqmsg),
1749                 timediff,
1750                 cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
1751                 (request->rq_repmsg ?
1752                  lustre_msg_get_transno(request->rq_repmsg) :
1753                  request->rq_transno),
1754                 request->rq_status,
1755                 (request->rq_repmsg ?
1756                  lustre_msg_get_status(request->rq_repmsg) : -999));
1757         if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
1758                 __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
1759                 int opc = opcode_offset(op);
1760                 if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
1761                         LASSERT(opc < LUSTRE_MAX_OPCODES);
1762                         lprocfs_counter_add(svc->srv_stats,
1763                                             opc + EXTRA_MAX_OPCODES,
1764                                             timediff);
1765                 }
1766         }
1767         if (unlikely(request->rq_early_count)) {
1768                 DEBUG_REQ(D_ADAPTTO, request,
1769                           "sent %d early replies before finishing in "
1770                           CFS_DURATION_T"s",
1771                           request->rq_early_count,
1772                           cfs_time_sub(work_end.tv_sec,
1773                           request->rq_arrival_time.tv_sec));
1774         }
1775
1776 out_req:
1777         ptlrpc_server_finish_request(svc, request);
1778
1779         RETURN(1);
1780 }
1781
1782 /**
1783  * An internal function to process a single reply state object.
1784  */
1785 static int
1786 ptlrpc_handle_rs (struct ptlrpc_reply_state *rs)
1787 {
1788         struct ptlrpc_service     *svc = rs->rs_service;
1789         struct obd_export         *exp;
1790         struct obd_device         *obd;
1791         int                        nlocks;
1792         int                        been_handled;
1793         ENTRY;
1794
1795         exp = rs->rs_export;
1796         obd = exp->exp_obd;
1797
1798         LASSERT (rs->rs_difficult);
1799         LASSERT (rs->rs_scheduled);
1800         LASSERT (cfs_list_empty(&rs->rs_list));
1801
1802         cfs_spin_lock (&exp->exp_lock);
1803         /* Noop if removed already */
1804         cfs_list_del_init (&rs->rs_exp_list);
1805         cfs_spin_unlock (&exp->exp_lock);
1806
1807         /* The disk commit callback holds exp_uncommitted_replies_lock while it
1808          * iterates over newly committed replies, removing them from
1809          * exp_uncommitted_replies.  It then drops this lock and schedules the
1810          * replies it found for handling here.
1811          *
1812          * We can avoid contention for exp_uncommitted_replies_lock between the
1813          * HRT threads and further commit callbacks by checking rs_committed
1814          * which is set in the commit callback while it holds both
1815          * rs_lock and exp_uncommitted_reples.
1816          *
1817          * If we see rs_committed clear, the commit callback _may_ not have
1818          * handled this reply yet and we race with it to grab
1819          * exp_uncommitted_replies_lock before removing the reply from
1820          * exp_uncommitted_replies.  Note that if we lose the race and the
1821          * reply has already been removed, list_del_init() is a noop.
1822          *
1823          * If we see rs_committed set, we know the commit callback is handling,
1824          * or has handled this reply since store reordering might allow us to
1825          * see rs_committed set out of sequence.  But since this is done
1826          * holding rs_lock, we can be sure it has all completed once we hold
1827          * rs_lock, which we do right next.
1828          */
1829         if (!rs->rs_committed) {
1830                 cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
1831                 cfs_list_del_init(&rs->rs_obd_list);
1832                 cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
1833         }
1834
1835         cfs_spin_lock(&rs->rs_lock);
1836
1837         been_handled = rs->rs_handled;
1838         rs->rs_handled = 1;
1839
1840         nlocks = rs->rs_nlocks;                 /* atomic "steal", but */
1841         rs->rs_nlocks = 0;                      /* locks still on rs_locks! */
1842
1843         if (nlocks == 0 && !been_handled) {
1844                 /* If we see this, we should already have seen the warning
1845                  * in mds_steal_ack_locks()  */
1846                 CWARN("All locks stolen from rs %p x"LPD64".t"LPD64
1847                       " o%d NID %s\n",
1848                       rs,
1849                       rs->rs_xid, rs->rs_transno, rs->rs_opc,
1850                       libcfs_nid2str(exp->exp_connection->c_peer.nid));
1851         }
1852
1853         if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
1854                 cfs_spin_unlock(&rs->rs_lock);
1855
1856                 if (!been_handled && rs->rs_on_net) {
1857                         LNetMDUnlink(rs->rs_md_h);
1858                         /* Ignore return code; we're racing with
1859                          * completion... */
1860                 }
1861
1862                 while (nlocks-- > 0)
1863                         ldlm_lock_decref(&rs->rs_locks[nlocks],
1864                                          rs->rs_modes[nlocks]);
1865
1866                 cfs_spin_lock(&rs->rs_lock);
1867         }
1868
1869         rs->rs_scheduled = 0;
1870
1871         if (!rs->rs_on_net) {
1872                 /* Off the net */
1873                 cfs_spin_unlock(&rs->rs_lock);
1874
1875                 class_export_put (exp);
1876                 rs->rs_export = NULL;
1877                 ptlrpc_rs_decref (rs);
1878                 if (cfs_atomic_dec_and_test(&svc->srv_n_difficult_replies) &&
1879                     svc->srv_is_stopping)
1880                         cfs_waitq_broadcast(&svc->srv_waitq);
1881                 RETURN(1);
1882         }
1883
1884         /* still on the net; callback will schedule */
1885         cfs_spin_unlock(&rs->rs_lock);
1886         RETURN(1);
1887 }
1888
1889 #ifndef __KERNEL__
1890
1891 /**
1892  * Check whether given service has a reply available for processing
1893  * and process it.
1894  *
1895  * \param svc a ptlrpc service
1896  * \retval 0 no replies processed
1897  * \retval 1 one reply processed
1898  */
1899 static int
1900 ptlrpc_server_handle_reply(struct ptlrpc_service *svc)
1901 {
1902         struct ptlrpc_reply_state *rs = NULL;
1903         ENTRY;
1904
1905         cfs_spin_lock(&svc->srv_rs_lock);
1906         if (!cfs_list_empty(&svc->srv_reply_queue)) {
1907                 rs = cfs_list_entry(svc->srv_reply_queue.prev,
1908                                     struct ptlrpc_reply_state,
1909                                     rs_list);
1910                 cfs_list_del_init(&rs->rs_list);
1911         }
1912         cfs_spin_unlock(&svc->srv_rs_lock);
1913         if (rs != NULL)
1914                 ptlrpc_handle_rs(rs);
1915         RETURN(rs != NULL);
1916 }
1917
1918 /* FIXME make use of timeout later */
1919 int
1920 liblustre_check_services (void *arg)
1921 {
1922         int  did_something = 0;
1923         int  rc;
1924         cfs_list_t *tmp, *nxt;
1925         ENTRY;
1926
1927         /* I'm relying on being single threaded, not to have to lock
1928          * ptlrpc_all_services etc */
1929         cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
1930                 struct ptlrpc_service *svc =
1931                         cfs_list_entry (tmp, struct ptlrpc_service, srv_list);
1932
1933                 if (svc->srv_threads_running != 0)     /* I've recursed */
1934                         continue;
1935
1936                 /* service threads can block for bulk, so this limits us
1937                  * (arbitrarily) to recursing 1 stack frame per service.
1938                  * Note that the problem with recursion is that we have to
1939                  * unwind completely before our caller can resume. */
1940
1941                 svc->srv_threads_running++;
1942
1943                 do {
1944                         rc = ptlrpc_server_handle_req_in(svc);
1945                         rc |= ptlrpc_server_handle_reply(svc);
1946                         rc |= ptlrpc_at_check_timed(svc);
1947                         rc |= ptlrpc_server_handle_request(svc, NULL);
1948                         rc |= (ptlrpc_server_post_idle_rqbds(svc) > 0);
1949                         did_something |= rc;
1950                 } while (rc);
1951
1952                 svc->srv_threads_running--;
1953         }
1954
1955         RETURN(did_something);
1956 }
1957 #define ptlrpc_stop_all_threads(s) do {} while (0)
1958
1959 #else /* __KERNEL__ */
1960
1961 static void
1962 ptlrpc_check_rqbd_pool(struct ptlrpc_service *svc)
1963 {
1964         int avail = svc->srv_nrqbd_receiving;
1965         int low_water = test_req_buffer_pressure ? 0 :
1966                         svc->srv_nbuf_per_group/2;
1967
1968         /* NB I'm not locking; just looking. */
1969
1970         /* CAVEAT EMPTOR: We might be allocating buffers here because we've
1971          * allowed the request history to grow out of control.  We could put a
1972          * sanity check on that here and cull some history if we need the
1973          * space. */
1974
1975         if (avail <= low_water)
1976                 ptlrpc_grow_req_bufs(svc);
1977
1978         if (svc->srv_stats)
1979                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQBUF_AVAIL_CNTR,
1980                                     avail);
1981 }
1982
1983 static int
1984 ptlrpc_retry_rqbds(void *arg)
1985 {
1986         struct ptlrpc_service *svc = (struct ptlrpc_service *)arg;
1987
1988         svc->srv_rqbd_timeout = 0;
1989         return (-ETIMEDOUT);
1990 }
1991
1992 static inline int
1993 ptlrpc_threads_enough(struct ptlrpc_service *svc)
1994 {
1995         return svc->srv_n_active_reqs <
1996                svc->srv_threads_running - 1 - (svc->srv_hpreq_handler != NULL);
1997 }
1998
1999 /**
2000  * allowed to create more threads
2001  * user can call it w/o any lock but need to hold ptlrpc_service::srv_lock to
2002  * get reliable result
2003  */
2004 static inline int
2005 ptlrpc_threads_increasable(struct ptlrpc_service *svc)
2006 {
2007         return svc->srv_threads_running +
2008                svc->srv_threads_starting < svc->srv_threads_max;
2009 }
2010
2011 /**
2012  * too many requests and allowed to create more threads
2013  */
2014 static inline int
2015 ptlrpc_threads_need_create(struct ptlrpc_service *svc)
2016 {
2017         return !ptlrpc_threads_enough(svc) && ptlrpc_threads_increasable(svc);
2018 }
2019
2020 static inline int
2021 ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
2022 {
2023         return (thread->t_flags & SVC_STOPPING) != 0 ||
2024                 thread->t_svc->srv_is_stopping;
2025 }
2026
2027 static inline int
2028 ptlrpc_rqbd_pending(struct ptlrpc_service *svc)
2029 {
2030         return !cfs_list_empty(&svc->srv_idle_rqbds) &&
2031                svc->srv_rqbd_timeout == 0;
2032 }
2033
2034 static inline int
2035 ptlrpc_at_check(struct ptlrpc_service *svc)
2036 {
2037         return svc->srv_at_check;
2038 }
2039
2040 /**
2041  * requests wait on preprocessing
2042  * user can call it w/o any lock but need to hold ptlrpc_service::srv_lock to
2043  * get reliable result
2044  */
2045 static inline int
2046 ptlrpc_server_request_waiting(struct ptlrpc_service *svc)
2047 {
2048         return !cfs_list_empty(&svc->srv_req_in_queue);
2049 }
2050
2051 static __attribute__((__noinline__)) int
2052 ptlrpc_wait_event(struct ptlrpc_service *svc,
2053                   struct ptlrpc_thread *thread)
2054 {
2055         /* Don't exit while there are replies to be handled */
2056         struct l_wait_info lwi = LWI_TIMEOUT(svc->srv_rqbd_timeout,
2057                                              ptlrpc_retry_rqbds, svc);
2058
2059         lc_watchdog_disable(thread->t_watchdog);
2060
2061         cfs_cond_resched();
2062
2063         l_wait_event_exclusive_head(svc->srv_waitq,
2064                                ptlrpc_thread_stopping(thread) ||
2065                                ptlrpc_server_request_waiting(svc) ||
2066                                ptlrpc_server_request_pending(svc, 0) ||
2067                                ptlrpc_rqbd_pending(svc) ||
2068                                ptlrpc_at_check(svc), &lwi);
2069
2070         if (ptlrpc_thread_stopping(thread))
2071                 return -EINTR;
2072
2073         lc_watchdog_touch(thread->t_watchdog, CFS_GET_TIMEOUT(svc));
2074
2075         return 0;
2076 }
2077
2078 /**
2079  * Main thread body for service threads.
2080  * Waits in a loop waiting for new requests to process to appear.
2081  * Every time an incoming requests is added to its queue, a waitq
2082  * is woken up and one of the threads will handle it.
2083  */
2084 static int ptlrpc_main(void *arg)
2085 {
2086         struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
2087         struct ptlrpc_service  *svc = data->svc;
2088         struct ptlrpc_thread   *thread = data->thread;
2089         struct ptlrpc_reply_state *rs;
2090 #ifdef WITH_GROUP_INFO
2091         cfs_group_info_t *ginfo = NULL;
2092 #endif
2093         struct lu_env env;
2094         int counter = 0, rc = 0;
2095         ENTRY;
2096
2097         thread->t_pid = cfs_curproc_pid();
2098         cfs_daemonize_ctxt(data->name);
2099
2100 #if defined(HAVE_NODE_TO_CPUMASK) && defined(CONFIG_NUMA)
2101         /* we need to do this before any per-thread allocation is done so that
2102          * we get the per-thread allocations on local node.  bug 7342 */
2103         if (svc->srv_cpu_affinity) {
2104                 int cpu, num_cpu;
2105
2106                 for (cpu = 0, num_cpu = 0; cpu < cfs_num_possible_cpus();
2107                      cpu++) {
2108                         if (!cfs_cpu_online(cpu))
2109                                 continue;
2110                         if (num_cpu == thread->t_id % cfs_num_online_cpus())
2111                                 break;
2112                         num_cpu++;
2113                 }
2114                 cfs_set_cpus_allowed(cfs_current(),
2115                                      node_to_cpumask(cpu_to_node(cpu)));
2116         }
2117 #endif
2118
2119 #ifdef WITH_GROUP_INFO
2120         ginfo = cfs_groups_alloc(0);
2121         if (!ginfo) {
2122                 rc = -ENOMEM;
2123                 goto out;
2124         }
2125
2126         cfs_set_current_groups(ginfo);
2127         cfs_put_group_info(ginfo);
2128 #endif
2129
2130         if (svc->srv_init != NULL) {
2131                 rc = svc->srv_init(thread);
2132                 if (rc)
2133                         goto out;
2134         }
2135
2136         rc = lu_context_init(&env.le_ctx,
2137                              svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
2138         if (rc)
2139                 goto out_srv_fini;
2140
2141         thread->t_env = &env;
2142         env.le_ctx.lc_thread = thread;
2143         env.le_ctx.lc_cookie = 0x6;
2144
2145         /* Alloc reply state structure for this one */
2146         OBD_ALLOC_GFP(rs, svc->srv_max_reply_size, CFS_ALLOC_STD);
2147         if (!rs) {
2148                 rc = -ENOMEM;
2149                 goto out_srv_fini;
2150         }
2151
2152         cfs_spin_lock(&svc->srv_lock);
2153
2154         LASSERT((thread->t_flags & SVC_STARTING) != 0);
2155         thread->t_flags &= ~SVC_STARTING;
2156         svc->srv_threads_starting--;
2157
2158         /* SVC_STOPPING may already be set here if someone else is trying
2159          * to stop the service while this new thread has been dynamically
2160          * forked. We still set SVC_RUNNING to let our creator know that
2161          * we are now running, however we will exit as soon as possible */
2162         thread->t_flags |= SVC_RUNNING;
2163         svc->srv_threads_running++;
2164         cfs_spin_unlock(&svc->srv_lock);
2165
2166         /*
2167          * wake up our creator. Note: @data is invalid after this point,
2168          * because it's allocated on ptlrpc_start_thread() stack.
2169          */
2170         cfs_waitq_signal(&thread->t_ctl_waitq);
2171
2172         thread->t_watchdog = lc_watchdog_add(CFS_GET_TIMEOUT(svc), NULL, NULL);
2173
2174         cfs_spin_lock(&svc->srv_rs_lock);
2175         cfs_list_add(&rs->rs_list, &svc->srv_free_rs_list);
2176         cfs_waitq_signal(&svc->srv_free_rs_waitq);
2177         cfs_spin_unlock(&svc->srv_rs_lock);
2178
2179         CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
2180                svc->srv_threads_running);
2181
2182         /* XXX maintain a list of all managed devices: insert here */
2183         while (!ptlrpc_thread_stopping(thread)) {
2184                 if (ptlrpc_wait_event(svc, thread))
2185                         break;
2186
2187                 ptlrpc_check_rqbd_pool(svc);
2188
2189                 if (ptlrpc_threads_need_create(svc)) {
2190                         /* Ignore return code - we tried... */
2191                         ptlrpc_start_thread(svc);
2192                 }
2193
2194                 /* Process all incoming reqs before handling any */
2195                 if (ptlrpc_server_request_waiting(svc)) {
2196                         ptlrpc_server_handle_req_in(svc);
2197                         /* but limit ourselves in case of flood */
2198                         if (counter++ < 100)
2199                                 continue;
2200                         counter = 0;
2201                 }
2202
2203                 if (ptlrpc_at_check(svc))
2204                         ptlrpc_at_check_timed(svc);
2205
2206                 if (ptlrpc_server_request_pending(svc, 0)) {
2207                         lu_context_enter(&env.le_ctx);
2208                         ptlrpc_server_handle_request(svc, thread);
2209                         lu_context_exit(&env.le_ctx);
2210                 }
2211
2212                 if (ptlrpc_rqbd_pending(svc) &&
2213                     ptlrpc_server_post_idle_rqbds(svc) < 0) {
2214                         /* I just failed to repost request buffers.
2215                          * Wait for a timeout (unless something else
2216                          * happens) before I try again */
2217                         svc->srv_rqbd_timeout = cfs_time_seconds(1)/10;
2218                         CDEBUG(D_RPCTRACE,"Posted buffers: %d\n",
2219                                svc->srv_nrqbd_receiving);
2220                 }
2221         }
2222
2223         lc_watchdog_delete(thread->t_watchdog);
2224         thread->t_watchdog = NULL;
2225
2226 out_srv_fini:
2227         /*
2228          * deconstruct service specific state created by ptlrpc_start_thread()
2229          */
2230         if (svc->srv_done != NULL)
2231                 svc->srv_done(thread);
2232
2233         lu_context_fini(&env.le_ctx);
2234 out:
2235         CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
2236                thread, thread->t_pid, thread->t_id, rc);
2237
2238         cfs_spin_lock(&svc->srv_lock);
2239         if ((thread->t_flags & SVC_STARTING) != 0) {
2240                 svc->srv_threads_starting--;
2241                 thread->t_flags &= ~SVC_STARTING;
2242         }
2243
2244         if ((thread->t_flags & SVC_RUNNING) != 0) {
2245                 /* must know immediately */
2246                 svc->srv_threads_running--;
2247                 thread->t_flags &= ~SVC_RUNNING;
2248         }
2249
2250         thread->t_id    = rc;
2251         thread->t_flags |= SVC_STOPPED;
2252
2253         cfs_waitq_signal(&thread->t_ctl_waitq);
2254         cfs_spin_unlock(&svc->srv_lock);
2255
2256         return rc;
2257 }
2258
2259 struct ptlrpc_hr_args {
2260         int                       thread_index;
2261         int                       cpu_index;
2262         struct ptlrpc_hr_service *hrs;
2263 };
2264
2265 static int hrt_dont_sleep(struct ptlrpc_hr_thread *t,
2266                           cfs_list_t *replies)
2267 {
2268         int result;
2269
2270         cfs_spin_lock(&t->hrt_lock);
2271         cfs_list_splice_init(&t->hrt_queue, replies);
2272         result = cfs_test_bit(HRT_STOPPING, &t->hrt_flags) ||
2273                 !cfs_list_empty(replies);
2274         cfs_spin_unlock(&t->hrt_lock);
2275         return result;
2276 }
2277
2278 /**
2279  * Main body of "handle reply" function.
2280  * It processes acked reply states
2281  */
2282 static int ptlrpc_hr_main(void *arg)
2283 {
2284         struct ptlrpc_hr_args * hr_args = arg;
2285         struct ptlrpc_hr_service *hr = hr_args->hrs;
2286         struct ptlrpc_hr_thread *t = &hr->hr_threads[hr_args->thread_index];
2287         char threadname[20];
2288         CFS_LIST_HEAD(replies);
2289
2290         snprintf(threadname, sizeof(threadname),
2291                  "ptlrpc_hr_%d", hr_args->thread_index);
2292
2293         cfs_daemonize_ctxt(threadname);
2294 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
2295         cfs_set_cpus_allowed(cfs_current(),
2296                              node_to_cpumask(cpu_to_node(hr_args->cpu_index)));
2297 #endif
2298         cfs_set_bit(HRT_RUNNING, &t->hrt_flags);
2299         cfs_waitq_signal(&t->hrt_wait);
2300
2301         while (!cfs_test_bit(HRT_STOPPING, &t->hrt_flags)) {
2302
2303                 l_wait_condition(t->hrt_wait, hrt_dont_sleep(t, &replies));
2304                 while (!cfs_list_empty(&replies)) {
2305                         struct ptlrpc_reply_state *rs;
2306
2307                         rs = cfs_list_entry(replies.prev,
2308                                             struct ptlrpc_reply_state,
2309                                             rs_list);
2310                         cfs_list_del_init(&rs->rs_list);
2311                         ptlrpc_handle_rs(rs);
2312                 }
2313         }
2314
2315         cfs_clear_bit(HRT_RUNNING, &t->hrt_flags);
2316         cfs_complete(&t->hrt_completion);
2317
2318         return 0;
2319 }
2320
2321 static int ptlrpc_start_hr_thread(struct ptlrpc_hr_service *hr, int n, int cpu)
2322 {
2323         struct ptlrpc_hr_thread *t = &hr->hr_threads[n];
2324         struct ptlrpc_hr_args args;
2325         int rc;
2326         ENTRY;
2327
2328         args.thread_index = n;
2329         args.cpu_index = cpu;
2330         args.hrs = hr;
2331
2332         rc = cfs_kernel_thread(ptlrpc_hr_main, (void*)&args,
2333                                CLONE_VM|CLONE_FILES);
2334         if (rc < 0) {
2335                 cfs_complete(&t->hrt_completion);
2336                 GOTO(out, rc);
2337         }
2338         l_wait_condition(t->hrt_wait, cfs_test_bit(HRT_RUNNING, &t->hrt_flags));
2339         RETURN(0);
2340  out:
2341         return rc;
2342 }
2343
2344 static void ptlrpc_stop_hr_thread(struct ptlrpc_hr_thread *t)
2345 {
2346         ENTRY;
2347
2348         cfs_set_bit(HRT_STOPPING, &t->hrt_flags);
2349         cfs_waitq_signal(&t->hrt_wait);
2350         cfs_wait_for_completion(&t->hrt_completion);
2351
2352         EXIT;
2353 }
2354
2355 static void ptlrpc_stop_hr_threads(struct ptlrpc_hr_service *hrs)
2356 {
2357         int n;
2358         ENTRY;
2359
2360         for (n = 0; n < hrs->hr_n_threads; n++)
2361                 ptlrpc_stop_hr_thread(&hrs->hr_threads[n]);
2362
2363         EXIT;
2364 }
2365
2366 static int ptlrpc_start_hr_threads(struct ptlrpc_hr_service *hr)
2367 {
2368         int rc = -ENOMEM;
2369         int n, cpu, threads_started = 0;
2370         ENTRY;
2371
2372         LASSERT(hr != NULL);
2373         LASSERT(hr->hr_n_threads > 0);
2374
2375         for (n = 0, cpu = 0; n < hr->hr_n_threads; n++) {
2376 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
2377                 while(!cfs_cpu_online(cpu)) {
2378                         cpu++;
2379                         if (cpu >= cfs_num_possible_cpus())
2380                                 cpu = 0;
2381                 }
2382 #endif
2383                 rc = ptlrpc_start_hr_thread(hr, n, cpu);
2384                 if (rc != 0)
2385                         break;
2386                 threads_started++;
2387                 cpu++;
2388         }
2389         if (threads_started == 0) {
2390                 CERROR("No reply handling threads started\n");
2391                 RETURN(-ESRCH);
2392         }
2393         if (threads_started < hr->hr_n_threads) {
2394                 CWARN("Started only %d reply handling threads from %d\n",
2395                       threads_started, hr->hr_n_threads);
2396                 hr->hr_n_threads = threads_started;
2397         }
2398         RETURN(0);
2399 }
2400
2401 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
2402                                struct ptlrpc_thread *thread)
2403 {
2404         struct l_wait_info lwi = { 0 };
2405         ENTRY;
2406
2407         CDEBUG(D_RPCTRACE, "Stopping thread [ %p : %u ]\n",
2408                thread, thread->t_pid);
2409
2410         cfs_spin_lock(&svc->srv_lock);
2411         /* let the thread know that we would like it to stop asap */
2412         thread->t_flags |= SVC_STOPPING;
2413         cfs_spin_unlock(&svc->srv_lock);
2414
2415         cfs_waitq_broadcast(&svc->srv_waitq);
2416         l_wait_event(thread->t_ctl_waitq,
2417                      (thread->t_flags & SVC_STOPPED), &lwi);
2418
2419         cfs_spin_lock(&svc->srv_lock);
2420         cfs_list_del(&thread->t_link);
2421         cfs_spin_unlock(&svc->srv_lock);
2422
2423         OBD_FREE_PTR(thread);
2424         EXIT;
2425 }
2426
2427 /**
2428  * Stops all threads of a particular service \a svc
2429  */
2430 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
2431 {
2432         struct ptlrpc_thread *thread;
2433         ENTRY;
2434
2435         cfs_spin_lock(&svc->srv_lock);
2436         while (!cfs_list_empty(&svc->srv_threads)) {
2437                 thread = cfs_list_entry(svc->srv_threads.next,
2438                                         struct ptlrpc_thread, t_link);
2439
2440                 cfs_spin_unlock(&svc->srv_lock);
2441                 ptlrpc_stop_thread(svc, thread);
2442                 cfs_spin_lock(&svc->srv_lock);
2443         }
2444
2445         cfs_spin_unlock(&svc->srv_lock);
2446         EXIT;
2447 }
2448
2449 int ptlrpc_start_threads(struct ptlrpc_service *svc)
2450 {
2451         int i, rc = 0;
2452         ENTRY;
2453
2454         /* We require 2 threads min - see note in
2455            ptlrpc_server_handle_request */
2456         LASSERT(svc->srv_threads_min >= 2);
2457         for (i = 0; i < svc->srv_threads_min; i++) {
2458                 rc = ptlrpc_start_thread(svc);
2459                 /* We have enough threads, don't start more.  b=15759 */
2460                 if (rc == -EMFILE) {
2461                         rc = 0;
2462                         break;
2463                 }
2464                 if (rc) {
2465                         CERROR("cannot start %s thread #%d: rc %d\n",
2466                                svc->srv_thread_name, i, rc);
2467                         ptlrpc_stop_all_threads(svc);
2468                         break;
2469                 }
2470         }
2471         RETURN(rc);
2472 }
2473
2474 int ptlrpc_start_thread(struct ptlrpc_service *svc)
2475 {
2476         struct l_wait_info lwi = { 0 };
2477         struct ptlrpc_svc_data d;
2478         struct ptlrpc_thread *thread;
2479         char name[32];
2480         int rc;
2481         ENTRY;
2482
2483         CDEBUG(D_RPCTRACE, "%s started %d min %d max %d running %d\n",
2484                svc->srv_name, svc->srv_threads_running, svc->srv_threads_min,
2485                svc->srv_threads_max, svc->srv_threads_running);
2486
2487         if (unlikely(svc->srv_is_stopping))
2488                 RETURN(-ESRCH);
2489
2490         if (!ptlrpc_threads_increasable(svc) ||
2491             (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
2492              svc->srv_threads_running == svc->srv_threads_min - 1))
2493                 RETURN(-EMFILE);
2494
2495         OBD_ALLOC_PTR(thread);
2496         if (thread == NULL)
2497                 RETURN(-ENOMEM);
2498         cfs_waitq_init(&thread->t_ctl_waitq);
2499
2500         cfs_spin_lock(&svc->srv_lock);
2501         if (!ptlrpc_threads_increasable(svc)) {
2502                 cfs_spin_unlock(&svc->srv_lock);
2503                 OBD_FREE_PTR(thread);
2504                 RETURN(-EMFILE);
2505         }
2506
2507         svc->srv_threads_starting++;
2508         thread->t_id    = svc->srv_threads_next_id++;
2509         thread->t_flags |= SVC_STARTING;
2510         thread->t_svc   = svc;
2511
2512         cfs_list_add(&thread->t_link, &svc->srv_threads);
2513         cfs_spin_unlock(&svc->srv_lock);
2514
2515         sprintf(name, "%s_%02d", svc->srv_thread_name, thread->t_id);
2516         d.svc = svc;
2517         d.name = name;
2518         d.thread = thread;
2519
2520         CDEBUG(D_RPCTRACE, "starting thread '%s'\n", name);
2521
2522         /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
2523          * just drop the VM and FILES in cfs_daemonize_ctxt() right away.
2524          */
2525         rc = cfs_kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
2526         if (rc < 0) {
2527                 CERROR("cannot start thread '%s': rc %d\n", name, rc);
2528
2529                 cfs_spin_lock(&svc->srv_lock);
2530                 cfs_list_del(&thread->t_link);
2531                 --svc->srv_threads_starting;
2532                 cfs_spin_unlock(&svc->srv_lock);
2533
2534                 OBD_FREE(thread, sizeof(*thread));
2535                 RETURN(rc);
2536         }
2537         l_wait_event(thread->t_ctl_waitq,
2538                      thread->t_flags & (SVC_RUNNING | SVC_STOPPED), &lwi);
2539
2540         rc = (thread->t_flags & SVC_STOPPED) ? thread->t_id : 0;
2541         RETURN(rc);
2542 }
2543
2544
2545 int ptlrpc_hr_init(void)
2546 {
2547         int i;
2548         int n_cpus = cfs_num_online_cpus();
2549         struct ptlrpc_hr_service *hr;
2550         int size;
2551         int rc;
2552         ENTRY;
2553
2554         LASSERT(ptlrpc_hr == NULL);
2555
2556         size = offsetof(struct ptlrpc_hr_service, hr_threads[n_cpus]);
2557         OBD_ALLOC(hr, size);
2558         if (hr == NULL)
2559                 RETURN(-ENOMEM);
2560         for (i = 0; i < n_cpus; i++) {
2561                 struct ptlrpc_hr_thread *t = &hr->hr_threads[i];
2562
2563                 cfs_spin_lock_init(&t->hrt_lock);
2564                 cfs_waitq_init(&t->hrt_wait);
2565                 CFS_INIT_LIST_HEAD(&t->hrt_queue);
2566                 cfs_init_completion(&t->hrt_completion);
2567         }
2568         hr->hr_n_threads = n_cpus;
2569         hr->hr_size = size;
2570         ptlrpc_hr = hr;
2571
2572         rc = ptlrpc_start_hr_threads(hr);
2573         if (rc) {
2574                 OBD_FREE(hr, hr->hr_size);
2575                 ptlrpc_hr = NULL;
2576         }
2577         RETURN(rc);
2578 }
2579
2580 void ptlrpc_hr_fini(void)
2581 {
2582         if (ptlrpc_hr != NULL) {
2583                 ptlrpc_stop_hr_threads(ptlrpc_hr);
2584                 OBD_FREE(ptlrpc_hr, ptlrpc_hr->hr_size);
2585                 ptlrpc_hr = NULL;
2586         }
2587 }
2588
2589 #endif /* __KERNEL__ */
2590
2591 /**
2592  * Wait until all already scheduled replies are processed.
2593  */
2594 static void ptlrpc_wait_replies(struct ptlrpc_service *svc)
2595 {
2596         while (1) {
2597                 int rc;
2598                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
2599                                                      NULL, NULL);
2600                 rc = l_wait_event(svc->srv_waitq, cfs_atomic_read(&svc-> \
2601                                   srv_n_difficult_replies) == 0,
2602                                   &lwi);
2603                 if (rc == 0)
2604                         break;
2605                 CWARN("Unexpectedly long timeout %p\n", svc);
2606         }
2607 }
2608
2609 int ptlrpc_unregister_service(struct ptlrpc_service *service)
2610 {
2611         int                   rc;
2612         struct l_wait_info    lwi;
2613         cfs_list_t           *tmp;
2614         struct ptlrpc_reply_state *rs, *t;
2615         struct ptlrpc_at_array *array = &service->srv_at_array;
2616         ENTRY;
2617
2618         service->srv_is_stopping = 1;
2619         cfs_timer_disarm(&service->srv_at_timer);
2620
2621         ptlrpc_stop_all_threads(service);
2622         LASSERT(cfs_list_empty(&service->srv_threads));
2623
2624         cfs_spin_lock (&ptlrpc_all_services_lock);
2625         cfs_list_del_init (&service->srv_list);
2626         cfs_spin_unlock (&ptlrpc_all_services_lock);
2627
2628         ptlrpc_lprocfs_unregister_service(service);
2629
2630         /* All history will be culled when the next request buffer is
2631          * freed */
2632         service->srv_max_history_rqbds = 0;
2633
2634         CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
2635
2636         rc = LNetClearLazyPortal(service->srv_req_portal);
2637         LASSERT (rc == 0);
2638
2639         /* Unlink all the request buffers.  This forces a 'final' event with
2640          * its 'unlink' flag set for each posted rqbd */
2641         cfs_list_for_each(tmp, &service->srv_active_rqbds) {
2642                 struct ptlrpc_request_buffer_desc *rqbd =
2643                         cfs_list_entry(tmp, struct ptlrpc_request_buffer_desc,
2644                                        rqbd_list);
2645
2646                 rc = LNetMDUnlink(rqbd->rqbd_md_h);
2647                 LASSERT (rc == 0 || rc == -ENOENT);
2648         }
2649
2650         /* Wait for the network to release any buffers it's currently
2651          * filling */
2652         for (;;) {
2653                 cfs_spin_lock(&service->srv_lock);
2654                 rc = service->srv_nrqbd_receiving;
2655                 cfs_spin_unlock(&service->srv_lock);
2656
2657                 if (rc == 0)
2658                         break;
2659
2660                 /* Network access will complete in finite time but the HUGE
2661                  * timeout lets us CWARN for visibility of sluggish NALs */
2662                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
2663                                            cfs_time_seconds(1), NULL, NULL);
2664                 rc = l_wait_event(service->srv_waitq,
2665                                   service->srv_nrqbd_receiving == 0,
2666                                   &lwi);
2667                 if (rc == -ETIMEDOUT)
2668                         CWARN("Service %s waiting for request buffers\n",
2669                               service->srv_name);
2670         }
2671
2672         /* schedule all outstanding replies to terminate them */
2673         cfs_spin_lock(&service->srv_rs_lock);
2674         while (!cfs_list_empty(&service->srv_active_replies)) {
2675                 struct ptlrpc_reply_state *rs =
2676                         cfs_list_entry(service->srv_active_replies.next,
2677                                        struct ptlrpc_reply_state, rs_list);
2678                 cfs_spin_lock(&rs->rs_lock);
2679                 ptlrpc_schedule_difficult_reply(rs);
2680                 cfs_spin_unlock(&rs->rs_lock);
2681         }
2682         cfs_spin_unlock(&service->srv_rs_lock);
2683
2684         /* purge the request queue.  NB No new replies (rqbds all unlinked)
2685          * and no service threads, so I'm the only thread noodling the
2686          * request queue now */
2687         while (!cfs_list_empty(&service->srv_req_in_queue)) {
2688                 struct ptlrpc_request *req =
2689                         cfs_list_entry(service->srv_req_in_queue.next,
2690                                        struct ptlrpc_request,
2691                                        rq_list);
2692
2693                 cfs_list_del(&req->rq_list);
2694                 service->srv_n_queued_reqs--;
2695                 service->srv_n_active_reqs++;
2696                 ptlrpc_server_finish_request(service, req);
2697         }
2698         while (ptlrpc_server_request_pending(service, 1)) {
2699                 struct ptlrpc_request *req;
2700
2701                 req = ptlrpc_server_request_get(service, 1);
2702                 cfs_list_del(&req->rq_list);
2703                 service->srv_n_queued_reqs--;
2704                 service->srv_n_active_reqs++;
2705                 ptlrpc_hpreq_fini(req);
2706                 ptlrpc_server_finish_request(service, req);
2707         }
2708         LASSERT(service->srv_n_queued_reqs == 0);
2709         LASSERT(service->srv_n_active_reqs == 0);
2710         LASSERT(service->srv_n_history_rqbds == 0);
2711         LASSERT(cfs_list_empty(&service->srv_active_rqbds));
2712
2713         /* Now free all the request buffers since nothing references them
2714          * any more... */
2715         while (!cfs_list_empty(&service->srv_idle_rqbds)) {
2716                 struct ptlrpc_request_buffer_desc *rqbd =
2717                         cfs_list_entry(service->srv_idle_rqbds.next,
2718                                        struct ptlrpc_request_buffer_desc,
2719                                        rqbd_list);
2720
2721                 ptlrpc_free_rqbd(rqbd);
2722         }
2723
2724         ptlrpc_wait_replies(service);
2725
2726         cfs_list_for_each_entry_safe(rs, t, &service->srv_free_rs_list,
2727                                      rs_list) {
2728                 cfs_list_del(&rs->rs_list);
2729                 OBD_FREE(rs, service->srv_max_reply_size);
2730         }
2731
2732         /* In case somebody rearmed this in the meantime */
2733         cfs_timer_disarm(&service->srv_at_timer);
2734
2735         if (array->paa_reqs_array != NULL) {
2736                 OBD_FREE(array->paa_reqs_array,
2737                          sizeof(cfs_list_t) * array->paa_size);
2738                 array->paa_reqs_array = NULL;
2739         }
2740
2741         if (array->paa_reqs_count != NULL) {
2742                 OBD_FREE(array->paa_reqs_count,
2743                          sizeof(__u32) * array->paa_size);
2744                 array->paa_reqs_count= NULL;
2745         }
2746
2747         OBD_FREE_PTR(service);
2748         RETURN(0);
2749 }
2750
2751 /**
2752  * Returns 0 if the service is healthy.
2753  *
2754  * Right now, it just checks to make sure that requests aren't languishing
2755  * in the queue.  We'll use this health check to govern whether a node needs
2756  * to be shot, so it's intentionally non-aggressive. */
2757 int ptlrpc_service_health_check(struct ptlrpc_service *svc)
2758 {
2759         struct ptlrpc_request *request;
2760         struct timeval         right_now;
2761         long                   timediff;
2762
2763         if (svc == NULL)
2764                 return 0;
2765
2766         cfs_gettimeofday(&right_now);
2767
2768         cfs_spin_lock(&svc->srv_rq_lock);
2769         if (!ptlrpc_server_request_pending(svc, 1)) {
2770                 cfs_spin_unlock(&svc->srv_rq_lock);
2771                 return 0;
2772         }
2773
2774         /* How long has the next entry been waiting? */
2775         if (cfs_list_empty(&svc->srv_request_queue))
2776                 request = cfs_list_entry(svc->srv_request_hpq.next,
2777                                          struct ptlrpc_request, rq_list);
2778         else
2779                 request = cfs_list_entry(svc->srv_request_queue.next,
2780                                          struct ptlrpc_request, rq_list);
2781         timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
2782         cfs_spin_unlock(&svc->srv_rq_lock);
2783
2784         if ((timediff / ONE_MILLION) > (AT_OFF ? obd_timeout * 3/2 :
2785                                         at_max)) {
2786                 CERROR("%s: unhealthy - request has been waiting %lds\n",
2787                        svc->srv_name, timediff / ONE_MILLION);
2788                 return (-1);
2789         }
2790
2791         return 0;
2792 }