Whamcloud - gitweb
b=3550
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24 #ifndef __KERNEL__
25 #include <liblustre.h>
26 #include <linux/kp30.h>
27 #endif
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31 #include <linux/lustre_log.h>
32 #include <portals/types.h>
33 #include "ptlrpc_internal.h"
34
35 static LIST_HEAD (ptlrpc_all_services);
36 static spinlock_t ptlrpc_all_services_lock = SPIN_LOCK_UNLOCKED;
37
38 static void
39 ptlrpc_free_server_req (struct ptlrpc_request *req)
40 {
41         /* The last request to be received into a request buffer uses space
42          * in the request buffer descriptor, otherwise requests are
43          * allocated dynamically in the incoming reply event handler */
44         if (req == &req->rq_rqbd->rqbd_req)
45                 return;
46
47         OBD_FREE(req, sizeof(*req));
48 }
49         
50 static char *
51 ptlrpc_alloc_request_buffer (int size)
52 {
53         char *ptr;
54         
55         if (size > SVC_BUF_VMALLOC_THRESHOLD)
56                 OBD_VMALLOC(ptr, size);
57         else
58                 OBD_ALLOC(ptr, size);
59         
60         return (ptr);
61 }
62
63 static void
64 ptlrpc_free_request_buffer (char *ptr, int size)
65 {
66         if (size > SVC_BUF_VMALLOC_THRESHOLD)
67                 OBD_VFREE(ptr, size);
68         else
69                 OBD_FREE(ptr, size);
70 }
71
72 struct ptlrpc_request_buffer_desc *
73 ptlrpc_alloc_rqbd (struct ptlrpc_srv_ni *srv_ni)
74 {
75         struct ptlrpc_service             *svc = srv_ni->sni_service;
76         unsigned long                      flags;
77         struct ptlrpc_request_buffer_desc *rqbd;
78
79         OBD_ALLOC(rqbd, sizeof (*rqbd));
80         if (rqbd == NULL)
81                 return (NULL);
82
83         rqbd->rqbd_srv_ni = srv_ni;
84         rqbd->rqbd_refcount = 0;
85         rqbd->rqbd_cbid.cbid_fn = request_in_callback;
86         rqbd->rqbd_cbid.cbid_arg = rqbd;
87         rqbd->rqbd_buffer = ptlrpc_alloc_request_buffer(svc->srv_buf_size);
88
89         if (rqbd->rqbd_buffer == NULL) {
90                 OBD_FREE(rqbd, sizeof (*rqbd));
91                 return (NULL);
92         }
93
94         spin_lock_irqsave (&svc->srv_lock, flags);
95         list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
96         svc->srv_nbufs++;
97         spin_unlock_irqrestore (&svc->srv_lock, flags);
98
99         return (rqbd);
100 }
101
102 void
103 ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd) 
104 {
105         struct ptlrpc_srv_ni  *sni = rqbd->rqbd_srv_ni;
106         struct ptlrpc_service *svc = sni->sni_service;
107         unsigned long          flags;
108         
109         LASSERT (rqbd->rqbd_refcount == 0);
110
111         spin_lock_irqsave(&svc->srv_lock, flags);
112         list_del(&rqbd->rqbd_list);
113         svc->srv_nbufs--;
114         spin_unlock_irqrestore(&svc->srv_lock, flags);
115
116         ptlrpc_free_request_buffer (rqbd->rqbd_buffer, svc->srv_buf_size);
117         OBD_FREE (rqbd, sizeof (*rqbd));
118 }
119
120 void
121 ptlrpc_save_llog_lock (struct ptlrpc_request *req,
122                        struct llog_create_locks *lcl)
123 {
124         struct ptlrpc_reply_state *rs = req->rq_reply_state;
125         LASSERT (rs != NULL);
126         LASSERT (rs->rs_llog_locks == NULL);
127
128         rs->rs_llog_locks = lcl;
129 }
130
131 void
132 ptlrpc_save_lock (struct ptlrpc_request *req, 
133                   struct lustre_handle *lock, int mode)
134 {
135         struct ptlrpc_reply_state *rs = req->rq_reply_state;
136         int                        idx;
137
138         if (!lock->cookie)
139                 return;
140
141         LASSERT (rs != NULL);
142         LASSERT (rs->rs_nlocks < RS_MAX_LOCKS);
143
144         idx = rs->rs_nlocks++;
145         rs->rs_locks[idx] = *lock;
146         rs->rs_modes[idx] = mode;
147         rs->rs_difficult = 1;
148 }
149
150 void
151 ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs)
152 {
153         struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
154
155 #ifdef CONFIG_SMP
156         LASSERT (spin_is_locked (&svc->srv_lock));
157 #endif
158         LASSERT (rs->rs_difficult);
159         rs->rs_scheduled_ever = 1;              /* flag any notification attempt */
160
161         if (rs->rs_scheduled)                   /* being set up or already notified */
162                 return;
163
164         rs->rs_scheduled = 1;
165         list_del (&rs->rs_list);
166         list_add (&rs->rs_list, &svc->srv_reply_queue);
167         wake_up (&svc->srv_waitq);
168 }
169
170 void 
171 ptlrpc_commit_replies (struct obd_device *obd)
172 {
173         struct list_head   *tmp;
174         struct list_head   *nxt;
175         unsigned long       flags;
176         
177         /* Find any replies that have been committed and get their service
178          * to attend to complete them. */
179
180         /* CAVEAT EMPTOR: spinlock ordering!!! */
181         spin_lock_irqsave (&obd->obd_uncommitted_replies_lock, flags);
182
183         list_for_each_safe (tmp, nxt, &obd->obd_uncommitted_replies) {
184                 struct ptlrpc_reply_state *rs =
185                         list_entry (tmp, struct ptlrpc_reply_state, rs_obd_list);
186                 struct llog_create_locks *lcl = rs->rs_llog_locks;
187
188                 rs->rs_llog_locks = NULL; 
189                 LASSERT (rs->rs_difficult);
190
191                 if (rs->rs_transno <= obd->obd_last_committed) {
192                         struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
193
194                         spin_lock (&svc->srv_lock);
195                         list_del_init (&rs->rs_obd_list);
196                         ptlrpc_schedule_difficult_reply (rs);
197                         spin_unlock (&svc->srv_lock);
198
199                         if (lcl != NULL)
200                                 llog_create_lock_free(lcl);
201                 }
202         }
203         
204         spin_unlock_irqrestore (&obd->obd_uncommitted_replies_lock, flags);
205 }
206
207 static long
208 timeval_sub(struct timeval *large, struct timeval *small)
209 {
210         return (large->tv_sec - small->tv_sec) * 1000000 +
211                 (large->tv_usec - small->tv_usec);
212 }
213
214 static int
215 ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc)
216 {
217         struct ptlrpc_srv_ni              *srv_ni;
218         struct ptlrpc_request_buffer_desc *rqbd;
219         unsigned long                      flags;
220         int                                rc;
221
222         spin_lock_irqsave(&svc->srv_lock, flags);
223         if (list_empty (&svc->srv_idle_rqbds)) {
224                 spin_unlock_irqrestore(&svc->srv_lock, flags);
225                 return (0);
226         }
227
228         rqbd = list_entry(svc->srv_idle_rqbds.next,
229                           struct ptlrpc_request_buffer_desc,
230                           rqbd_list);
231         list_del (&rqbd->rqbd_list);
232
233         /* assume we will post successfully */
234         srv_ni = rqbd->rqbd_srv_ni;
235         srv_ni->sni_nrqbd_receiving++;
236         list_add (&rqbd->rqbd_list, &srv_ni->sni_active_rqbds);
237
238         spin_unlock_irqrestore(&svc->srv_lock, flags);
239
240         rc = ptlrpc_register_rqbd(rqbd);
241         if (rc == 0)
242                 return (1);
243
244         spin_lock_irqsave(&svc->srv_lock, flags);
245
246         srv_ni->sni_nrqbd_receiving--;
247         list_del(&rqbd->rqbd_list);
248         list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
249
250         if (srv_ni->sni_nrqbd_receiving == 0) {
251                 /* This service is off-air on this interface because all
252                  * its request buffers are busy.  Portals will have started
253                  * dropping incoming requests until more buffers get
254                  * posted */
255                 CERROR("All %s %s request buffers busy\n",
256                        svc->srv_name, srv_ni->sni_ni->pni_name);
257         }
258
259         spin_unlock_irqrestore (&svc->srv_lock, flags);
260
261         return (-1);
262 }
263
264 struct ptlrpc_service *
265 ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
266                 int req_portal, int rep_portal, 
267                 svc_handler_t handler, char *name,
268                 struct proc_dir_entry *proc_entry)
269 {
270         int                                i;
271         int                                j;
272         int                                ssize;
273         struct ptlrpc_service             *service;
274         struct ptlrpc_srv_ni              *srv_ni;
275         struct ptlrpc_request_buffer_desc *rqbd;
276         ENTRY;
277
278         LASSERT (ptlrpc_ninterfaces > 0);
279         LASSERT (nbufs > 0);
280         LASSERT (bufsize >= max_req_size);
281         
282         ssize = offsetof (struct ptlrpc_service,
283                           srv_interfaces[ptlrpc_ninterfaces]);
284         OBD_ALLOC(service, ssize);
285         if (service == NULL)
286                 RETURN(NULL);
287
288         service->srv_name = name;
289         spin_lock_init(&service->srv_lock);
290         INIT_LIST_HEAD(&service->srv_threads);
291         init_waitqueue_head(&service->srv_waitq);
292
293         service->srv_max_req_size = max_req_size;
294         service->srv_buf_size = bufsize;
295         service->srv_rep_portal = rep_portal;
296         service->srv_req_portal = req_portal;
297         service->srv_handler = handler;
298
299         INIT_LIST_HEAD(&service->srv_request_queue);
300         INIT_LIST_HEAD(&service->srv_idle_rqbds);
301         INIT_LIST_HEAD(&service->srv_reply_queue);
302
303         /* First initialise enough for early teardown */
304         for (i = 0; i < ptlrpc_ninterfaces; i++) {
305                 srv_ni = &service->srv_interfaces[i];
306
307                 srv_ni->sni_service = service;
308                 srv_ni->sni_ni = &ptlrpc_interfaces[i];
309                 INIT_LIST_HEAD(&srv_ni->sni_active_rqbds);
310                 INIT_LIST_HEAD(&srv_ni->sni_active_replies);
311         }
312
313         spin_lock (&ptlrpc_all_services_lock);
314         list_add (&service->srv_list, &ptlrpc_all_services);
315         spin_unlock (&ptlrpc_all_services_lock);
316         
317         /* Now allocate the request buffers, assuming all interfaces require
318          * the same number. */
319         for (i = 0; i < ptlrpc_ninterfaces; i++) {
320                 srv_ni = &service->srv_interfaces[i];
321                 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
322                         srv_ni->sni_ni->pni_name);
323
324                 for (j = 0; j < nbufs; j++) {
325                         rqbd = ptlrpc_alloc_rqbd (srv_ni);
326                         
327                         if (rqbd == NULL) {
328                                 CERROR ("%s.%d: Can't allocate request %d "
329                                         "on %s\n", name, i, j, 
330                                         srv_ni->sni_ni->pni_name);
331                                 GOTO(failed, NULL);
332                         }
333
334                         /* We shouldn't be under memory pressure at
335                          * startup, so fail if we can't post all our
336                          * buffers at this time. */
337                         if (ptlrpc_server_post_idle_rqbds(service) <= 0)
338                                 GOTO(failed, NULL);
339                 }
340         }
341
342         if (proc_entry != NULL)
343                 ptlrpc_lprocfs_register_service(proc_entry, service);
344
345         CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
346                service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
347
348         RETURN(service);
349 failed:
350         ptlrpc_unregister_service(service);
351         return NULL;
352 }
353
354 static void
355 ptlrpc_server_free_request(struct ptlrpc_service *svc, struct ptlrpc_request *req)
356 {
357         unsigned long  flags;
358         int            refcount;
359         
360         spin_lock_irqsave(&svc->srv_lock, flags);
361         svc->srv_n_active_reqs--;
362         refcount = --(req->rq_rqbd->rqbd_refcount);
363         if (refcount == 0) {
364                 /* request buffer is now idle */
365                 list_del(&req->rq_rqbd->rqbd_list);
366                 list_add_tail(&req->rq_rqbd->rqbd_list,
367                               &svc->srv_idle_rqbds);
368         }
369         spin_unlock_irqrestore(&svc->srv_lock, flags);
370
371         ptlrpc_free_server_req(req);
372 }
373
374 static int 
375 ptlrpc_server_handle_request (struct ptlrpc_service *svc)
376 {
377         struct obd_export     *export = NULL;
378         struct ptlrpc_request *request;
379         unsigned long          flags;
380         struct timeval         work_start;
381         struct timeval         work_end;
382         long                   timediff;
383         int                    rc;
384         char                   str[PTL_NALFMT_SIZE];
385         ENTRY;
386
387         spin_lock_irqsave (&svc->srv_lock, flags);
388         if (list_empty (&svc->srv_request_queue) ||
389             (svc->srv_n_difficult_replies != 0 &&
390              svc->srv_n_active_reqs >= (svc->srv_nthreads - 1))) {
391                 /* If all the other threads are handling requests, I must
392                  * remain free to handle any 'difficult' reply that might
393                  * block them */
394                 spin_unlock_irqrestore (&svc->srv_lock, flags);
395                 RETURN(0);
396         }
397
398         request = list_entry (svc->srv_request_queue.next,
399                               struct ptlrpc_request, rq_list);
400         list_del_init (&request->rq_list);
401         svc->srv_n_queued_reqs--;
402         svc->srv_n_active_reqs++;
403
404         spin_unlock_irqrestore (&svc->srv_lock, flags);
405
406         do_gettimeofday(&work_start);
407         timediff = timeval_sub(&work_start, &request->rq_arrival_time);
408         if (svc->srv_stats != NULL) {
409                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
410                                     timediff);
411                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
412                                     svc->srv_n_queued_reqs);
413                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
414                                     svc->srv_n_active_reqs);
415         }
416
417 #if SWAB_PARANOIA
418         /* Clear request swab mask; this is a new request */
419         request->rq_req_swab_mask = 0;
420 #endif
421         rc = lustre_unpack_msg (request->rq_reqmsg, request->rq_reqlen);
422         if (rc != 0) {
423                 CERROR ("error unpacking request: ptl %d from %s"
424                         " xid "LPU64"\n", svc->srv_req_portal,
425                         ptlrpc_peernid2str(&request->rq_peer, str),
426                        request->rq_xid);
427                 goto out;
428         }
429
430         rc = -EINVAL;
431         if (request->rq_reqmsg->type != PTL_RPC_MSG_REQUEST) {
432                 CERROR("wrong packet type received (type=%u) from %s\n",
433                        request->rq_reqmsg->type,
434                        ptlrpc_peernid2str(&request->rq_peer, str));
435                 goto out;
436         }
437
438         CDEBUG(D_NET, "got req "LPD64"\n", request->rq_xid);
439
440         /* Discard requests queued for longer than my timeout.  If the
441          * client's timeout is similar to mine, she'll be timing out this
442          * REQ anyway (bug 1502) */
443         if (timediff / 1000000 > (long)obd_timeout) {
444                 CERROR("Dropping timed-out request from %s: %ld seconds old\n",
445                        ptlrpc_peernid2str(&request->rq_peer, str), 
446                        timediff / 1000000);
447                 goto out;
448         }
449
450         request->rq_export = class_conn2export(&request->rq_reqmsg->handle);
451
452         if (request->rq_export) {
453                 if (request->rq_reqmsg->conn_cnt <
454                     request->rq_export->exp_conn_cnt) {
455                         DEBUG_REQ(D_ERROR, request,
456                                   "DROPPING req from old connection %d < %d",
457                                   request->rq_reqmsg->conn_cnt,
458                                   request->rq_export->exp_conn_cnt);
459                         goto put_conn;
460                 }
461                 
462                 export = class_export_rpc_get(request->rq_export);
463                 request->rq_export->exp_last_request_time =
464                         LTIME_S(CURRENT_TIME);
465         }
466
467         CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
468                "%s:%s+%d:%d:"LPU64":%s:%s:%d\n", current->comm,
469                (request->rq_export ?
470                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
471                (request->rq_export ?
472                 atomic_read(&request->rq_export->exp_refcount) : -99),
473                request->rq_reqmsg->status, request->rq_xid,
474                request->rq_peer.peer_ni->pni_name,
475                ptlrpc_peernid2str(&request->rq_peer, str),
476                request->rq_reqmsg->opc);
477
478         rc = svc->srv_handler(request);
479         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
480                "%s:%s+%d:%d:"LPU64":%s:%s:%d\n", current->comm,
481                (request->rq_export ?
482                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
483                (request->rq_export ?
484                 atomic_read(&request->rq_export->exp_refcount) : -99),
485                request->rq_reqmsg->status, request->rq_xid,
486                request->rq_peer.peer_ni->pni_name,
487                ptlrpc_peernid2str(&request->rq_peer, str),
488                request->rq_reqmsg->opc);
489
490         if (export != NULL)
491                 class_export_rpc_put(export);
492
493 put_conn:
494         if (request->rq_export != NULL)
495                 class_export_put(request->rq_export);
496
497  out:
498         do_gettimeofday(&work_end);
499
500         timediff = timeval_sub(&work_end, &work_start);
501
502         CDEBUG((timediff / 1000000 > (long)obd_timeout) ? D_ERROR : D_HA,
503                "request "LPU64" opc %u from NID %s processed in %ldus "
504                "(%ldus total)\n", request->rq_xid, request->rq_reqmsg->opc,
505                ptlrpc_peernid2str(&request->rq_peer, str),
506                timediff, timeval_sub(&work_end, &request->rq_arrival_time));
507
508         if (svc->srv_stats != NULL) {
509                 int opc = opcode_offset(request->rq_reqmsg->opc);
510                 if (opc > 0) {
511                         LASSERT(opc < LUSTRE_MAX_OPCODES);
512                         lprocfs_counter_add(svc->srv_stats,
513                                             opc + PTLRPC_LAST_CNTR,
514                                             timediff);
515                 }
516         }
517
518         ptlrpc_server_free_request(svc, request);
519         
520         RETURN(1);
521 }
522
523 static int
524 ptlrpc_server_handle_reply (struct ptlrpc_service *svc) 
525 {
526         struct ptlrpc_reply_state *rs;
527         unsigned long              flags;
528         struct obd_export         *exp;
529         struct obd_device         *obd;
530         struct llog_create_locks  *lcl;
531         int                        nlocks;
532         int                        been_handled;
533         char                       str[PTL_NALFMT_SIZE];
534         ENTRY;
535
536         spin_lock_irqsave (&svc->srv_lock, flags);
537         if (list_empty (&svc->srv_reply_queue)) {
538                 spin_unlock_irqrestore (&svc->srv_lock, flags);
539                 RETURN(0);
540         }
541         
542         rs = list_entry (svc->srv_reply_queue.next,
543                          struct ptlrpc_reply_state, rs_list);
544
545         exp = rs->rs_export;
546         obd = exp->exp_obd;
547
548         LASSERT (rs->rs_difficult);
549         LASSERT (rs->rs_scheduled);
550
551         list_del_init (&rs->rs_list);
552
553         /* Disengage from notifiers carefully (lock ordering!) */
554         spin_unlock(&svc->srv_lock);
555
556         spin_lock (&obd->obd_uncommitted_replies_lock);
557         /* Noop if removed already */
558         list_del_init (&rs->rs_obd_list);
559         spin_unlock (&obd->obd_uncommitted_replies_lock);
560
561         spin_lock (&exp->exp_lock);
562         /* Noop if removed already */
563         list_del_init (&rs->rs_exp_list);
564         spin_unlock (&exp->exp_lock);
565
566         spin_lock(&svc->srv_lock);
567
568         been_handled = rs->rs_handled;
569         rs->rs_handled = 1;
570         
571         nlocks = rs->rs_nlocks;                 /* atomic "steal", but */
572         rs->rs_nlocks = 0;                      /* locks still on rs_locks! */
573
574         lcl = rs->rs_llog_locks;
575         rs->rs_llog_locks = NULL;
576
577         if (nlocks == 0 && !been_handled) {
578                 /* If we see this, we should already have seen the warning
579                  * in mds_steal_ack_locks()  */
580                 CWARN("All locks stolen from rs %p x"LPD64".t"LPD64
581                       " o%d NID %s\n",
582                       rs, 
583                       rs->rs_xid, rs->rs_transno,
584                       rs->rs_msg.opc, 
585                       ptlrpc_peernid2str(&exp->exp_connection->c_peer, str));
586         }
587
588         if ((!been_handled && rs->rs_on_net) || 
589             nlocks > 0 || lcl != NULL) {
590                 spin_unlock_irqrestore(&svc->srv_lock, flags);
591                 
592                 if (!been_handled && rs->rs_on_net) {
593                         PtlMDUnlink(rs->rs_md_h);
594                         /* Ignore return code; we're racing with
595                          * completion... */
596                 }
597
598                 while (nlocks-- > 0)
599                         ldlm_lock_decref(&rs->rs_locks[nlocks], 
600                                          rs->rs_modes[nlocks]);
601
602                 if (lcl != NULL)
603                         llog_create_lock_free(lcl);         
604
605                 spin_lock_irqsave(&svc->srv_lock, flags);
606         }
607
608         rs->rs_scheduled = 0;
609
610         if (!rs->rs_on_net) {
611                 /* Off the net */
612                 svc->srv_n_difficult_replies--;
613                 spin_unlock_irqrestore(&svc->srv_lock, flags);
614                 
615                 class_export_put (exp);
616                 rs->rs_export = NULL;
617                 lustre_free_reply_state (rs);
618                 atomic_dec (&svc->srv_outstanding_replies);
619                 RETURN(1);
620         }
621         
622         /* still on the net; callback will schedule */
623         spin_unlock_irqrestore (&svc->srv_lock, flags);
624         RETURN(1);
625 }
626
627 #ifndef __KERNEL__
628 /* FIXME make use of timeout later */
629 int
630 liblustre_check_services (void *arg) 
631 {
632         int  did_something = 0;
633         int  rc;
634         struct list_head *tmp, *nxt;
635         ENTRY;
636         
637         /* I'm relying on being single threaded, not to have to lock
638          * ptlrpc_all_services etc */
639         list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
640                 struct ptlrpc_service *svc =
641                         list_entry (tmp, struct ptlrpc_service, srv_list);
642                 
643                 if (svc->srv_nthreads != 0)     /* I've recursed */
644                         continue;
645
646                 /* service threads can block for bulk, so this limits us
647                  * (arbitrarily) to recursing 1 stack frame per service.
648                  * Note that the problem with recursion is that we have to
649                  * unwind completely before our caller can resume. */
650                 
651                 svc->srv_nthreads++;
652                 
653                 do {
654                         rc = ptlrpc_server_handle_reply(svc);
655                         rc |= ptlrpc_server_handle_request(svc);
656                         rc |= (ptlrpc_server_post_idle_rqbds(svc) > 0);
657                         did_something |= rc;
658                 } while (rc);
659                 
660                 svc->srv_nthreads--;
661         }
662
663         RETURN(did_something);
664 }
665
666 #else /* __KERNEL__ */
667
668 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
669 void ptlrpc_daemonize(void)
670 {
671         exit_mm(current);
672         lustre_daemonize_helper();
673         exit_files(current);
674         reparent_to_init();
675 }
676
677 static int
678 ptlrpc_retry_rqbds(void *arg)
679 {
680         struct ptlrpc_service *svc = (struct ptlrpc_service *)arg;
681         
682         svc->srv_rqbd_timeout = 0;
683         return (-ETIMEDOUT);
684 }
685
686 static int ptlrpc_main(void *arg)
687 {
688         struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
689         struct ptlrpc_service  *svc = data->svc;
690         struct ptlrpc_thread   *thread = data->thread;
691         unsigned long           flags;
692         ENTRY;
693
694         lock_kernel();
695         ptlrpc_daemonize();
696
697         SIGNAL_MASK_LOCK(current, flags);
698         sigfillset(&current->blocked);
699         RECALC_SIGPENDING;
700         SIGNAL_MASK_UNLOCK(current, flags);
701
702         LASSERTF(strlen(data->name) < sizeof(current->comm),
703                  "name %d > len %d\n",
704                  (int)strlen(data->name), (int)sizeof(current->comm));
705         THREAD_NAME(current->comm, sizeof(current->comm) - 1, "%s", data->name);
706         
707         unlock_kernel();
708
709         /* Record that the thread is running */
710         thread->t_flags = SVC_RUNNING;
711         wake_up(&thread->t_ctl_waitq);
712
713         spin_lock_irqsave(&svc->srv_lock, flags);
714         svc->srv_nthreads++;
715         spin_unlock_irqrestore(&svc->srv_lock, flags);
716
717         /* XXX maintain a list of all managed devices: insert here */
718
719         while ((thread->t_flags & SVC_STOPPING) == 0 ||
720                svc->srv_n_difficult_replies != 0) {
721                 /* Don't exit while there are replies to be handled */
722                 struct l_wait_info lwi = LWI_TIMEOUT(svc->srv_rqbd_timeout,
723                                                      ptlrpc_retry_rqbds, svc);
724
725                 l_wait_event_exclusive (svc->srv_waitq,
726                               ((thread->t_flags & SVC_STOPPING) != 0 &&
727                                svc->srv_n_difficult_replies == 0) ||
728                               (!list_empty(&svc->srv_idle_rqbds) &&
729                                svc->srv_rqbd_timeout == 0) ||
730                               !list_empty (&svc->srv_reply_queue) ||
731                               (!list_empty (&svc->srv_request_queue) &&
732                                (svc->srv_n_difficult_replies == 0 ||
733                                 svc->srv_n_active_reqs <
734                                 (svc->srv_nthreads - 1))),
735                               &lwi);
736
737                 if (!list_empty (&svc->srv_reply_queue))
738                         ptlrpc_server_handle_reply (svc);
739
740                 /* only handle requests if there are no difficult replies
741                  * outstanding, or I'm not the last thread handling
742                  * requests */
743                 if (!list_empty (&svc->srv_request_queue) &&
744                     (svc->srv_n_difficult_replies == 0 ||
745                      svc->srv_n_active_reqs < (svc->srv_nthreads - 1)))
746                         ptlrpc_server_handle_request (svc);
747
748                 if (!list_empty(&svc->srv_idle_rqbds) &&
749                     ptlrpc_server_post_idle_rqbds(svc) < 0) {
750                         /* I just failed to repost request buffers.  Wait
751                          * for a timeout (unless something else happens)
752                          * before I try again */
753                         svc->srv_rqbd_timeout = HZ/10;
754                 }
755         }
756
757         spin_lock_irqsave(&svc->srv_lock, flags);
758
759         svc->srv_nthreads--;                    /* must know immediately */
760         thread->t_flags = SVC_STOPPED;
761         wake_up(&thread->t_ctl_waitq);
762
763         spin_unlock_irqrestore(&svc->srv_lock, flags);
764
765         CDEBUG(D_NET, "service thread exiting, process %d\n", current->pid);
766         return 0;
767 }
768
769 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
770                                struct ptlrpc_thread *thread)
771 {
772         struct l_wait_info lwi = { 0 };
773         unsigned long      flags;
774
775         spin_lock_irqsave(&svc->srv_lock, flags);
776         thread->t_flags = SVC_STOPPING;
777         spin_unlock_irqrestore(&svc->srv_lock, flags);
778
779         wake_up_all(&svc->srv_waitq);
780         l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
781                      &lwi);
782
783         spin_lock_irqsave(&svc->srv_lock, flags);
784         list_del(&thread->t_link);
785         spin_unlock_irqrestore(&svc->srv_lock, flags);
786
787         OBD_FREE(thread, sizeof(*thread));
788 }
789
790 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
791 {
792         unsigned long flags;
793         struct ptlrpc_thread *thread;
794
795         spin_lock_irqsave(&svc->srv_lock, flags);
796         while (!list_empty(&svc->srv_threads)) {
797                 thread = list_entry(svc->srv_threads.next,
798                                     struct ptlrpc_thread, t_link);
799
800                 spin_unlock_irqrestore(&svc->srv_lock, flags);
801                 ptlrpc_stop_thread(svc, thread);
802                 spin_lock_irqsave(&svc->srv_lock, flags);
803         }
804
805         spin_unlock_irqrestore(&svc->srv_lock, flags);
806 }
807
808 /* @base_name should be 12 characters or less - 3 will be added on */
809 int ptlrpc_start_n_threads(struct obd_device *dev, struct ptlrpc_service *svc,
810                            int num_threads, char *base_name)
811 {
812         int i, rc = 0;
813         ENTRY;
814
815         for (i = 0; i < num_threads; i++) {
816                 char name[32];
817                 sprintf(name, "%s_%02d", base_name, i);
818                 rc = ptlrpc_start_thread(dev, svc, name);
819                 if (rc) {
820                         CERROR("cannot start %s thread #%d: rc %d\n", base_name,
821                                i, rc);
822                         ptlrpc_stop_all_threads(svc);
823                 }
824         }
825         RETURN(rc);
826 }
827
828 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
829                         char *name)
830 {
831         struct l_wait_info lwi = { 0 };
832         struct ptlrpc_svc_data d;
833         struct ptlrpc_thread *thread;
834         unsigned long flags;
835         int rc;
836         ENTRY;
837
838         OBD_ALLOC(thread, sizeof(*thread));
839         if (thread == NULL)
840                 RETURN(-ENOMEM);
841         init_waitqueue_head(&thread->t_ctl_waitq);
842         
843         d.dev = dev;
844         d.svc = svc;
845         d.name = name;
846         d.thread = thread;
847
848         spin_lock_irqsave(&svc->srv_lock, flags);
849         list_add(&thread->t_link, &svc->srv_threads);
850         spin_unlock_irqrestore(&svc->srv_lock, flags);
851
852         /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
853          * just drop the VM and FILES in ptlrpc_daemonize() right away.
854          */
855         rc = kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
856         if (rc < 0) {
857                 CERROR("cannot start thread: %d\n", rc);
858                 OBD_FREE(thread, sizeof(*thread));
859                 RETURN(rc);
860         }
861         l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING, &lwi);
862
863         RETURN(0);
864 }
865 #endif
866
867 int ptlrpc_unregister_service(struct ptlrpc_service *service)
868 {
869         int                   i;
870         int                   rc;
871         unsigned long         flags;
872         struct ptlrpc_srv_ni *srv_ni;
873         struct l_wait_info    lwi;
874         struct list_head     *tmp;
875
876         LASSERT(list_empty(&service->srv_threads));
877
878         spin_lock (&ptlrpc_all_services_lock);
879         list_del_init (&service->srv_list);
880         spin_unlock (&ptlrpc_all_services_lock);
881
882         ptlrpc_lprocfs_unregister_service(service);
883
884         for (i = 0; i < ptlrpc_ninterfaces; i++) {
885                 srv_ni = &service->srv_interfaces[i];
886                 CDEBUG(D_NET, "%s: tearing down interface %s\n",
887                        service->srv_name, srv_ni->sni_ni->pni_name);
888
889                 /* Unlink all the request buffers.  This forces a 'final'
890                  * event with its 'unlink' flag set for each posted rqbd */
891                 list_for_each(tmp, &srv_ni->sni_active_rqbds) {
892                         struct ptlrpc_request_buffer_desc *rqbd =
893                                 list_entry(tmp, struct ptlrpc_request_buffer_desc, 
894                                            rqbd_list);
895
896                         rc = PtlMDUnlink(rqbd->rqbd_md_h);
897                         LASSERT (rc == PTL_OK || rc == PTL_MD_INVALID);
898                 }
899
900                 /* Wait for the network to release any buffers it's
901                  * currently filling */
902                 for (;;) {
903                         spin_lock_irqsave(&service->srv_lock, flags);
904                         rc = srv_ni->sni_nrqbd_receiving;
905                         spin_unlock_irqrestore(&service->srv_lock, flags);
906
907                         if (rc == 0)
908                                 break;
909                         
910                         /* Network access will complete in finite time but
911                          * the HUGE timeout lets us CWARN for visibility of
912                          * sluggish NALs */
913                         lwi = LWI_TIMEOUT(300 * HZ, NULL, NULL);
914                         rc = l_wait_event(service->srv_waitq,
915                                           srv_ni->sni_nrqbd_receiving == 0,
916                                           &lwi);
917                         if (rc == -ETIMEDOUT)
918                                 CWARN("Waiting for request buffers on "
919                                       "service %s on interface %s ",
920                                       service->srv_name, srv_ni->sni_ni->pni_name);
921                 }
922
923                 /* schedule all outstanding replies to terminate them */
924                 spin_lock_irqsave(&service->srv_lock, flags);
925                 while (!list_empty(&srv_ni->sni_active_replies)) {
926                         struct ptlrpc_reply_state *rs =
927                                 list_entry(srv_ni->sni_active_replies.next,
928                                            struct ptlrpc_reply_state,
929                                            rs_list);
930                         ptlrpc_schedule_difficult_reply(rs);
931                 }
932                 spin_unlock_irqrestore(&service->srv_lock, flags);
933         }
934
935         /* purge the request queue.  NB No new replies (rqbds all unlinked)
936          * and no service threads, so I'm the only thread noodling the
937          * request queue now */
938         while (!list_empty(&service->srv_request_queue)) {
939                 struct ptlrpc_request *req =
940                         list_entry(service->srv_request_queue.next,
941                                    struct ptlrpc_request,
942                                    rq_list);
943                 
944                 list_del(&req->rq_list);
945                 service->srv_n_queued_reqs--;
946                 service->srv_n_active_reqs++;
947
948                 ptlrpc_server_free_request(service, req);
949         }
950         LASSERT(service->srv_n_queued_reqs == 0);
951         LASSERT(service->srv_n_active_reqs == 0);
952
953         for (i = 0; i < ptlrpc_ninterfaces; i++) {
954                 srv_ni = &service->srv_interfaces[i];
955                 LASSERT(list_empty(&srv_ni->sni_active_rqbds));
956         }
957
958         /* Now free all the request buffers since nothing references them
959          * any more... */
960         while (!list_empty(&service->srv_idle_rqbds)) {
961                 struct ptlrpc_request_buffer_desc *rqbd =
962                         list_entry(service->srv_idle_rqbds.next,
963                                    struct ptlrpc_request_buffer_desc, 
964                                    rqbd_list);
965
966                 ptlrpc_free_rqbd(rqbd);
967         }
968
969         /* wait for all outstanding replies to complete (they were
970          * scheduled having been flagged to abort above) */
971         while (atomic_read(&service->srv_outstanding_replies) != 0) {
972                 struct l_wait_info lwi = LWI_TIMEOUT(10 * HZ, NULL, NULL);
973
974                 rc = l_wait_event(service->srv_waitq,
975                                   !list_empty(&service->srv_reply_queue), &lwi);
976                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
977
978                 if (rc == 0) {
979                         ptlrpc_server_handle_reply(service);
980                         continue;
981                 }
982                 CWARN("Unexpectedly long timeout %p\n", service);
983         }
984
985         OBD_FREE(service,
986                  offsetof(struct ptlrpc_service,
987                           srv_interfaces[ptlrpc_ninterfaces]));
988         return 0;
989 }