Whamcloud - gitweb
- Quiet the FIXMEs from CERRORs to CDEBUGs, because they were reducing LLNL
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/lustre_ha.h>
26
27 void ptlrpc_init_client(struct recovd_obd *recovd, 
28                         int (*recover)(struct ptlrpc_client *recover),
29                         int req_portal,
30                         int rep_portal, struct ptlrpc_client *cl)
31 {
32         memset(cl, 0, sizeof(*cl));
33         cl->cli_recovd = recovd;
34         cl->cli_recover = recover;
35         if (recovd)
36                 recovd_cli_manage(recovd, cl);
37         cl->cli_obd = NULL;
38         cl->cli_request_portal = req_portal;
39         cl->cli_reply_portal = rep_portal;
40         INIT_LIST_HEAD(&cl->cli_delayed_head);
41         INIT_LIST_HEAD(&cl->cli_sending_head);
42         INIT_LIST_HEAD(&cl->cli_dying_head);
43         spin_lock_init(&cl->cli_lock);
44         sema_init(&cl->cli_rpc_sem, 32);
45 }
46
47 __u8 *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
48 {
49         return req->rq_connection->c_remote_uuid;
50 }
51
52 struct ptlrpc_connection *ptlrpc_uuid_to_connection(char *uuid)
53 {
54         struct ptlrpc_connection *c;
55         struct lustre_peer peer;
56         int err;
57
58         err = kportal_uuid_to_peer(uuid, &peer);
59         if (err != 0) {
60                 CERROR("cannot find peer %s!\n", uuid);
61                 return NULL;
62         }
63
64         c = ptlrpc_get_connection(&peer);
65         if (c) { 
66                 memcpy(c->c_remote_uuid, uuid, sizeof(c->c_remote_uuid));
67                 c->c_epoch++;
68         }
69
70         return c;
71 }
72
73 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn, char *uuid)
74 {
75         struct lustre_peer peer;
76         int err;
77
78         err = kportal_uuid_to_peer(uuid, &peer);
79         if (err != 0) {
80                 CERROR("cannot find peer %s!\n", uuid);
81                 return;
82         }
83         
84         memcpy(&conn->c_peer, &peer, sizeof(peer)); 
85         return;
86 }
87
88 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *conn)
89 {
90         struct ptlrpc_bulk_desc *bulk;
91
92         OBD_ALLOC(bulk, sizeof(*bulk));
93         if (bulk != NULL) {
94                 bulk->b_connection = ptlrpc_connection_addref(conn);
95                 atomic_set(&bulk->b_pages_remaining, 0);
96                 init_waitqueue_head(&bulk->b_waitq);
97                 INIT_LIST_HEAD(&bulk->b_page_list);
98         }
99
100         return bulk;
101 }
102
103 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc)
104 {
105         struct ptlrpc_bulk_page *bulk;
106
107         OBD_ALLOC(bulk, sizeof(*bulk));
108         if (bulk != NULL) {
109                 bulk->b_desc = desc;
110                 ptl_set_inv_handle(&bulk->b_md_h);
111                 ptl_set_inv_handle(&bulk->b_me_h);
112                 list_add_tail(&bulk->b_link, &desc->b_page_list);
113                 desc->b_page_count++;
114                 atomic_inc(&desc->b_pages_remaining);
115         }
116         return bulk;
117 }
118
119 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
120 {
121         struct list_head *tmp, *next;
122         ENTRY;
123         if (desc == NULL) {
124                 EXIT;
125                 return;
126         }
127
128         list_for_each_safe(tmp, next, &desc->b_page_list) {
129                 struct ptlrpc_bulk_page *bulk;
130                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
131                 ptlrpc_free_bulk_page(bulk);
132         }
133
134         ptlrpc_put_connection(desc->b_connection);
135
136         OBD_FREE(desc, sizeof(*desc));
137         EXIT;
138 }
139
140 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
141 {
142         ENTRY;
143         if (bulk == NULL) {
144                 EXIT;
145                 return;
146         }
147
148         list_del(&bulk->b_link);
149         bulk->b_desc->b_page_count--;
150         OBD_FREE(bulk, sizeof(*bulk));
151         EXIT;
152 }
153
154 struct ptlrpc_request *ptlrpc_prep_req(struct ptlrpc_client *cl,
155                                        struct ptlrpc_connection *conn,
156                                        int opcode, int count, int *lengths,
157                                        char **bufs)
158 {
159         struct ptlrpc_request *request;
160         int rc;
161         ENTRY;
162
163         OBD_ALLOC(request, sizeof(*request));
164         if (!request) {
165                 CERROR("request allocation out of memory\n");
166                 RETURN(NULL);
167         }
168
169         rc = lustre_pack_msg(count, lengths, bufs,
170                              &request->rq_reqlen, &request->rq_reqmsg);
171         if (rc) {
172                 CERROR("cannot pack request %d\n", rc);
173                 OBD_FREE(request, sizeof(*request));
174                 RETURN(NULL);
175         }
176
177         request->rq_type = PTL_RPC_TYPE_REQUEST;
178         request->rq_client = cl;
179         request->rq_connection = ptlrpc_connection_addref(conn);
180
181         INIT_LIST_HEAD(&request->rq_list);
182         INIT_LIST_HEAD(&request->rq_multi);
183         /* this will be dec()d once in req_finished, once in free_committed */
184         atomic_set(&request->rq_refcount, 2);
185
186         spin_lock(&conn->c_lock);
187         request->rq_xid = HTON__u32(++conn->c_xid_out);
188         spin_unlock(&conn->c_lock);
189
190         request->rq_reqmsg->magic = PTLRPC_MSG_MAGIC; 
191         request->rq_reqmsg->version = PTLRPC_MSG_VERSION;
192         request->rq_reqmsg->opc = HTON__u32(opcode);
193         request->rq_reqmsg->type = HTON__u32(PTL_RPC_MSG_REQUEST);
194
195         RETURN(request);
196 }
197 struct ptlrpc_request *ptlrpc_prep_req2(struct ptlrpc_client *cl,
198                                         struct ptlrpc_connection *conn,
199                                         struct lustre_handle *handle, 
200                                        int opcode, int count, int *lengths,
201                                        char **bufs)
202 {
203         struct ptlrpc_request *req;
204         req = ptlrpc_prep_req(cl, conn, opcode, count, lengths, bufs);
205         ptlrpc_hdl2req(req, handle);
206         return req;
207 }
208
209 void ptlrpc_req_finished(struct ptlrpc_request *request)
210 {
211         if (request == NULL)
212                 return;
213
214         if (request->rq_repmsg != NULL) { 
215                 OBD_FREE(request->rq_repmsg, request->rq_replen);
216                 request->rq_repmsg = NULL;
217                 request->rq_reply_md.start = NULL; 
218         }
219
220         if (atomic_dec_and_test(&request->rq_refcount))
221                 ptlrpc_free_req(request);
222 }
223
224 void ptlrpc_free_req(struct ptlrpc_request *request)
225 {
226         ENTRY;
227         if (request == NULL) {
228                 EXIT;
229                 return;
230         }
231
232         if (request->rq_repmsg != NULL)
233                 OBD_FREE(request->rq_repmsg, request->rq_replen);
234         if (request->rq_reqmsg != NULL)
235                 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
236
237         if (request->rq_client) {
238                 spin_lock(&request->rq_client->cli_lock);
239                 list_del_init(&request->rq_list);
240                 spin_unlock(&request->rq_client->cli_lock);
241         }
242
243         ptlrpc_put_connection(request->rq_connection);
244         list_del(&request->rq_multi);
245         OBD_FREE(request, sizeof(*request));
246         EXIT;
247 }
248
249 static int ptlrpc_check_reply(struct ptlrpc_request *req)
250 {
251         int rc = 0;
252
253         if (req->rq_repmsg != NULL) {
254                 req->rq_transno = NTOH__u64(req->rq_repmsg->transno);
255                 req->rq_flags |= PTL_RPC_FL_REPLIED;
256                 GOTO(out, rc = 1);
257         }
258
259         if (req->rq_flags & PTL_RPC_FL_RESEND) { 
260                 CERROR("-- RESEND --\n");
261                 GOTO(out, rc = 1);
262         }
263
264         if (req->rq_flags & PTL_RPC_FL_RECOVERY) { 
265                 CERROR("-- RESTART --\n");
266                 GOTO(out, rc = 1);
267         }
268
269
270         if (CURRENT_TIME - req->rq_time >= req->rq_timeout) {
271                 CERROR("-- REQ TIMEOUT ON CONNID %d XID %Ld --\n",
272                        req->rq_connid, (unsigned long long)req->rq_xid);
273                 /* clear the timeout */
274                 req->rq_timeout = 0;
275                 req->rq_connection->c_level = LUSTRE_CONN_RECOVD;
276                 req->rq_flags |= PTL_RPC_FL_TIMEOUT;
277                 if (req->rq_client && req->rq_client->cli_recovd)
278                         recovd_cli_fail(req->rq_client);
279                 if (req->rq_level < LUSTRE_CONN_FULL)
280                         rc = 1;
281                 else
282                         rc = 0;
283                 GOTO(out, rc);
284         }
285
286         if (req->rq_timeout) { 
287                 schedule_timeout(req->rq_timeout * HZ);
288         }
289
290         if (sigismember(&(current->pending.signal), SIGKILL) ||
291             sigismember(&(current->pending.signal), SIGTERM) ||
292             sigismember(&(current->pending.signal), SIGINT)) {
293                 req->rq_flags |= PTL_RPC_FL_INTR;
294                 GOTO(out, rc = 1);
295         }
296
297  out:
298         CDEBUG(D_NET, "req = %p, rc = %d\n", req, rc);
299         return rc;
300 }
301
302 int ptlrpc_check_status(struct ptlrpc_request *req, int err)
303 {
304         ENTRY;
305
306         if (err != 0) {
307                 CERROR("err is %d\n", err);
308                 RETURN(err);
309         }
310
311         if (req == NULL) {
312                 CERROR("req == NULL\n");
313                 RETURN(-ENOMEM);
314         }
315
316         if (req->rq_repmsg == NULL) {
317                 CERROR("req->rq_repmsg == NULL\n");
318                 RETURN(-ENOMEM);
319         }
320
321         if (req->rq_repmsg->type == NTOH__u32(PTL_RPC_MSG_ERR)) {
322                 CERROR("req->rq_repmsg->type == PTL_RPC_MSG_ERR\n");
323                 RETURN(-EINVAL);
324         }
325
326         if (req->rq_repmsg->status != 0) {
327                 if (req->rq_repmsg->status < 0)
328                         CERROR("req->rq_repmsg->status is %d\n",
329                                req->rq_repmsg->status);
330                 else
331                         CDEBUG(D_INFO, "req->rq_repmsg->status is %d\n",
332                                req->rq_repmsg->status);
333                 /* XXX: translate this error from net to host */
334                 RETURN(req->rq_repmsg->status);
335         }
336
337         RETURN(0);
338 }
339
340 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
341 {
342         OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
343         request->rq_reqmsg = NULL;
344         request->rq_reqlen = 0;
345 }
346
347 /* Abort this request and cleanup any resources associated with it. */
348 static int ptlrpc_abort(struct ptlrpc_request *request)
349 {
350         /* First remove the ME for the reply; in theory, this means
351          * that we can tear down the buffer safely. */
352         PtlMEUnlink(request->rq_reply_me_h);
353         OBD_FREE(request->rq_reply_md.start, request->rq_replen);
354         request->rq_repmsg = NULL;
355         request->rq_replen = 0;
356         return 0;
357 }
358
359 /* caller must lock cli */
360 void ptlrpc_free_committed(struct ptlrpc_client *cli)
361 {
362         struct list_head *tmp, *saved;
363         struct ptlrpc_request *req;
364
365         list_for_each_safe(tmp, saved, &cli->cli_sending_head) {
366                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
367
368                 if ( (req->rq_flags & PTL_RPC_FL_REPLAY) ) { 
369                         CDEBUG(D_INFO, "Retaining request %Ld for replay\n",
370                                req->rq_xid);
371                         continue;
372                 }
373
374                 /* not yet committed */
375                 if (req->rq_transno > cli->cli_last_committed)
376                         break;
377
378                 CDEBUG(D_INFO, "Marking request %Ld as committed ("
379                        "transno=%Lu, last_committed=%Lu\n", 
380                        req->rq_xid, req->rq_transno, 
381                        cli->cli_last_committed);
382                 if (atomic_dec_and_test(&req->rq_refcount)) {
383                         /* we do this to prevent free_req deadlock */
384                         list_del_init(&req->rq_list); 
385                         req->rq_client = NULL;
386                         ptlrpc_free_req(req);
387                 } else {
388                         list_del_init(&req->rq_list);
389                         list_add(&req->rq_list, &cli->cli_dying_head);
390                 }
391         }
392
393         EXIT;
394         return;
395 }
396
397 void ptlrpc_cleanup_client(struct ptlrpc_client *cli)
398 {
399         struct list_head *tmp, *saved;
400         struct ptlrpc_request *req;
401         ENTRY;
402
403         spin_lock(&cli->cli_lock);
404         list_for_each_safe(tmp, saved, &cli->cli_sending_head) {
405                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
406                 CDEBUG(D_INFO, "Cleaning req %p from sending list.\n", req);
407                 list_del_init(&req->rq_list);
408                 req->rq_client = NULL;
409                 ptlrpc_free_req(req); 
410         }
411         list_for_each_safe(tmp, saved, &cli->cli_dying_head) {
412                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
413                 CERROR("Request %p is on the dying list at cleanup!\n", req);
414                 list_del_init(&req->rq_list);
415                 req->rq_client = NULL;
416                 ptlrpc_free_req(req); 
417         }
418         spin_unlock(&cli->cli_lock);
419
420         EXIT;
421         return;
422 }
423
424 void ptlrpc_continue_req(struct ptlrpc_request *req)
425 {
426         ENTRY;
427         CDEBUG(D_INODE, "continue delayed request %Ld opc %d\n", 
428                req->rq_xid, req->rq_reqmsg->opc); 
429         wake_up_interruptible(&req->rq_wait_for_rep); 
430         EXIT;
431 }
432
433 void ptlrpc_resend_req(struct ptlrpc_request *req)
434 {
435         ENTRY;
436         CDEBUG(D_INODE, "resend request %Ld, opc %d\n", 
437                req->rq_xid, req->rq_reqmsg->opc);
438         req->rq_status = -EAGAIN;
439         req->rq_level = LUSTRE_CONN_RECOVD;
440         req->rq_flags |= PTL_RPC_FL_RESEND;
441         req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
442         wake_up_interruptible(&req->rq_wait_for_rep);
443         EXIT;
444 }
445
446 void ptlrpc_restart_req(struct ptlrpc_request *req)
447 {
448         ENTRY;
449         CDEBUG(D_INODE, "restart completed request %Ld, opc %d\n", 
450                req->rq_xid, req->rq_reqmsg->opc);
451         req->rq_status = -ERESTARTSYS;
452         req->rq_flags |= PTL_RPC_FL_RECOVERY;
453         req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
454         wake_up_interruptible(&req->rq_wait_for_rep);
455         EXIT;
456 }
457
458 int ptlrpc_queue_wait(struct ptlrpc_request *req)
459 {
460         int rc = 0;
461         struct ptlrpc_client *cli = req->rq_client;
462         ENTRY;
463
464         init_waitqueue_head(&req->rq_wait_for_rep);
465         CDEBUG(D_NET, "subsys: %s req %Ld opc %d level %d, conn level %d\n",
466                cli->cli_name, req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
467                req->rq_connection->c_level);
468
469         /* XXX probably both an import and connection level are needed */
470         if (req->rq_level > req->rq_connection->c_level) { 
471                 CERROR("process %d waiting for recovery\n", current->pid);
472                 spin_lock(&cli->cli_lock);
473                 list_del_init(&req->rq_list);
474                 list_add(&req->rq_list, cli->cli_delayed_head.prev); 
475                 spin_unlock(&cli->cli_lock);
476                 wait_event_interruptible
477                         (req->rq_wait_for_rep, 
478                          req->rq_level <= req->rq_connection->c_level);
479                 spin_lock(&cli->cli_lock);
480                 list_del_init(&req->rq_list);
481                 spin_unlock(&cli->cli_lock);
482                 CERROR("process %d resumed\n", current->pid);
483         }
484  resend:
485         req->rq_time = CURRENT_TIME;
486         req->rq_timeout = 100;
487         rc = ptl_send_rpc(req);
488         if (rc) {
489                 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
490                 if ( rc > 0 ) 
491                         rc = -rc;
492                 ptlrpc_cleanup_request_buf(req);
493                 up(&cli->cli_rpc_sem);
494                 RETURN(-rc);
495         }
496
497         spin_lock(&cli->cli_lock);
498         list_del_init(&req->rq_list);
499         list_add_tail(&req->rq_list, &cli->cli_sending_head);
500         spin_unlock(&cli->cli_lock);
501
502         CDEBUG(D_OTHER, "-- sleeping\n");
503         wait_event_interruptible(req->rq_wait_for_rep, 
504                                  ptlrpc_check_reply(req));
505         CDEBUG(D_OTHER, "-- done\n");
506
507         if (req->rq_flags & PTL_RPC_FL_RESEND) {
508                 req->rq_flags &= ~PTL_RPC_FL_RESEND;
509                 goto resend;
510         }
511
512         up(&cli->cli_rpc_sem);
513         if (req->rq_flags & PTL_RPC_FL_TIMEOUT)
514                 GOTO(out, rc = -ETIMEDOUT);
515
516         if (req->rq_flags & PTL_RPC_FL_INTR) {
517                 /* Clean up the dangling reply buffers */
518                 ptlrpc_abort(req);
519                 GOTO(out, rc = -EINTR);
520         }
521
522         if (!(req->rq_flags & PTL_RPC_FL_REPLIED))
523                 GOTO(out, rc = req->rq_status);
524
525         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
526         if (rc) {
527                 CERROR("unpack_rep failed: %d\n", rc);
528                 GOTO(out, rc);
529         }
530         CDEBUG(D_NET, "got rep %Ld\n", req->rq_xid);
531         if (req->rq_repmsg->status == 0)
532                 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
533                        req->rq_replen, req->rq_repmsg->status);
534
535         spin_lock(&cli->cli_lock);
536         cli->cli_last_rcvd = req->rq_repmsg->last_rcvd;
537         cli->cli_last_committed = req->rq_repmsg->last_committed;
538         ptlrpc_free_committed(cli); 
539         spin_unlock(&cli->cli_lock);
540
541         EXIT;
542  out:
543         return rc;
544 }
545
546 int ptlrpc_replay_req(struct ptlrpc_request *req)
547 {
548         int rc = 0;
549         struct ptlrpc_client *cli = req->rq_client;
550         ENTRY;
551
552         init_waitqueue_head(&req->rq_wait_for_rep);
553         CDEBUG(D_NET, "req %Ld opc %d level %d, conn level %d\n",
554                req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
555                req->rq_connection->c_level);
556
557         req->rq_time = CURRENT_TIME;
558         req->rq_timeout = 100;
559         rc = ptl_send_rpc(req);
560         if (rc) {
561                 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
562                 ptlrpc_cleanup_request_buf(req);
563                 up(&cli->cli_rpc_sem);
564                 RETURN(-rc);
565         }
566
567         CDEBUG(D_OTHER, "-- sleeping\n");
568         wait_event_interruptible(req->rq_wait_for_rep, 
569                                  ptlrpc_check_reply(req));
570         CDEBUG(D_OTHER, "-- done\n");
571
572         up(&cli->cli_rpc_sem);
573
574         if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
575                 CERROR("Unknown reason for wakeup\n");
576                 /* XXX Phil - I end up here when I kill obdctl */
577                 ptlrpc_abort(req);
578                 GOTO(out, rc = -EINTR);
579         }
580
581         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
582         if (rc) {
583                 CERROR("unpack_rep failed: %d\n", rc);
584                 GOTO(out, rc);
585         }
586
587         CDEBUG(D_NET, "got rep %Ld\n", req->rq_xid);
588         if (req->rq_repmsg->status == 0)
589                 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
590                        req->rq_replen, req->rq_repmsg->status);
591         else {
592                 CERROR("recovery failed: "); 
593                 CERROR("req %Ld opc %d level %d, conn level %d\n", 
594                        req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
595                        req->rq_connection->c_level);
596                 LBUG();
597         }
598
599  out:
600         RETURN(rc);
601 }