Whamcloud - gitweb
- I introduced a bug in the LOV, for the case where you're writing less than
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/lustre_net.h>
27 #include <linux/lustre_lib.h>
28
29 extern ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq,
30         bulk_source_eq, bulk_sink_eq;
31 static ptl_process_id_t local_id = {PTL_NID_ANY, PTL_PID_ANY};
32
33 static int ptl_send_buf(struct ptlrpc_request *request,
34                         struct ptlrpc_connection *conn, int portal)
35 {
36         int rc;
37         ptl_process_id_t remote_id;
38         ptl_handle_md_t md_h;
39
40         request->rq_req_md.user_ptr = request;
41
42         switch (request->rq_type) {
43         case PTL_RPC_MSG_REQUEST:
44                 request->rq_reqmsg->type = HTON__u32(request->rq_type);
45                 request->rq_req_md.start = request->rq_reqmsg;
46                 request->rq_req_md.length = request->rq_reqlen;
47                 request->rq_req_md.eventq = request_out_eq;
48                 break;
49         case PTL_RPC_MSG_REPLY:
50                 request->rq_repmsg->type = HTON__u32(request->rq_type);
51                 request->rq_req_md.start = request->rq_repmsg;
52                 request->rq_req_md.length = request->rq_replen;
53                 request->rq_req_md.eventq = reply_out_eq;
54                 break;
55         default:
56                 LBUG();
57                 return -1; /* notreached */
58         }
59         request->rq_req_md.threshold = 1;
60         request->rq_req_md.options = PTL_MD_OP_PUT;
61         request->rq_req_md.user_ptr = request;
62
63         rc = PtlMDBind(conn->c_peer.peer_ni, request->rq_req_md, &md_h);
64         if (rc != 0) {
65                 CERROR("PtlMDBind failed: %d\n", rc);
66                 LBUG();
67                 return rc;
68         }
69
70         remote_id.nid = conn->c_peer.peer_nid;
71         remote_id.pid = 0;
72
73         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %Ld\n",
74                request->rq_req_md.length, portal, request->rq_xid);
75
76         if (!portal)
77                 LBUG();
78         rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0, request->rq_xid,
79                     0, 0);
80         if (rc != PTL_OK) {
81                 CERROR("PtlPut(%Lu, %d, %Ld) failed: %d\n", remote_id.nid,
82                        portal, request->rq_xid, rc);
83                 PtlMDUnlink(md_h);
84         }
85
86         return rc;
87 }
88
89 static inline struct iovec *
90 ptlrpc_get_bulk_iov (struct ptlrpc_bulk_desc *desc)
91 {
92         struct iovec *iov;
93
94         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
95                 return (desc->bd_iov);
96
97         OBD_ALLOC (iov, desc->bd_page_count * sizeof (struct iovec));
98         if (iov == NULL)
99                 LBUG();
100
101         return (iov);
102 }
103
104 static inline void
105 ptlrpc_put_bulk_iov (struct ptlrpc_bulk_desc *desc, struct iovec *iov)
106 {
107         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
108                 return;
109
110         OBD_FREE (iov, desc->bd_page_count * sizeof (struct iovec));
111 }
112
113 int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc)
114 {
115         int rc;
116         struct list_head *tmp, *next;
117         ptl_process_id_t remote_id;
118         __u32 xid = 0;
119         struct iovec *iov;
120         ENTRY;
121
122         iov = ptlrpc_get_bulk_iov (desc);
123         if (iov == NULL)
124                 RETURN (-ENOMEM);
125
126         desc->bd_md.start = iov;
127         desc->bd_md.niov = 0;
128         desc->bd_md.length = 0;
129         desc->bd_md.eventq = bulk_source_eq;
130         desc->bd_md.threshold = 2; /* SENT and ACK */
131         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
132         desc->bd_md.user_ptr = desc;
133
134         list_for_each_safe(tmp, next, &desc->bd_page_list) {
135                 struct ptlrpc_bulk_page *bulk;
136                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
137
138                 LASSERT (desc->bd_md.niov < desc->bd_page_count);
139
140                 if (desc->bd_md.niov == 0)
141                         xid = bulk->bp_xid;
142                 LASSERT (xid == bulk->bp_xid);   /* should all be the same */
143
144                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
145                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
146                 desc->bd_md.niov++;
147                 desc->bd_md.length += bulk->bp_buflen;
148         }
149
150         LASSERT (desc->bd_md.niov == desc->bd_page_count);
151         LASSERT (desc->bd_md.niov != 0);
152
153         rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
154                        &desc->bd_md_h);
155
156         ptlrpc_put_bulk_iov (desc, iov);        /* move down to reduce latency to send */
157
158         if (rc != PTL_OK) {
159                 CERROR("PtlMDBind failed: %d\n", rc);
160                 LBUG();
161                 RETURN(rc);
162         }
163
164         remote_id.nid = desc->bd_connection->c_peer.peer_nid;
165         remote_id.pid = 0;
166
167         CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid %Lx pid %d xid %d\n",
168                desc->bd_md.niov, desc->bd_md.length,
169                desc->bd_portal, remote_id.nid, remote_id.pid, xid);
170
171         rc = PtlPut(desc->bd_md_h, PTL_ACK_REQ, remote_id,
172                     desc->bd_portal, 0, xid, 0, 0);
173         if (rc != PTL_OK) {
174                 CERROR("PtlPut(%Lu, %d, %d) failed: %d\n",
175                        remote_id.nid, desc->bd_portal, xid, rc);
176                 PtlMDUnlink(desc->bd_md_h);
177                 LBUG();
178                 RETURN(rc);
179         }
180
181         RETURN(0);
182 }
183
184 int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *desc)
185 {
186         struct list_head *tmp, *next;
187         int rc;
188         __u32 xid = 0;
189         struct iovec *iov;
190         ptl_process_id_t source_id;
191         ENTRY;
192
193         iov = ptlrpc_get_bulk_iov (desc);
194         if (iov == NULL)
195                 return (-ENOMEM);
196
197         desc->bd_md.start = iov;
198         desc->bd_md.niov = 0;
199         desc->bd_md.length = 0;
200         desc->bd_md.threshold = 1;
201         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
202         desc->bd_md.user_ptr = desc;
203         desc->bd_md.eventq = bulk_sink_eq;
204
205         list_for_each_safe(tmp, next, &desc->bd_page_list) {
206                 struct ptlrpc_bulk_page *bulk;
207                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
208
209                 LASSERT (desc->bd_md.niov < desc->bd_page_count);
210
211                 if (desc->bd_md.niov == 0)
212                         xid = bulk->bp_xid;
213                 LASSERT (xid == bulk->bp_xid);   /* should all be the same */
214
215                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
216                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
217                 desc->bd_md.niov++;
218                 desc->bd_md.length += bulk->bp_buflen;
219         }
220
221         LASSERT (desc->bd_md.niov == desc->bd_page_count);
222         LASSERT (desc->bd_md.niov != 0);
223
224         source_id.nid = desc->bd_connection->c_peer.peer_nid;
225         source_id.pid = PTL_PID_ANY;
226
227         rc = PtlMEAttach(desc->bd_connection->c_peer.peer_ni,
228                          desc->bd_portal, source_id, xid, 0,
229                          PTL_UNLINK, PTL_INS_AFTER, &desc->bd_me_h);
230
231         ptlrpc_put_bulk_iov (desc, iov);
232
233         if (rc != PTL_OK) {
234                 CERROR("PtlMEAttach failed: %d\n", rc);
235                 LBUG();
236                 GOTO(cleanup, rc);
237         }
238
239         rc = PtlMDAttach(desc->bd_me_h, desc->bd_md, PTL_UNLINK,
240                          &desc->bd_md_h);
241         if (rc != PTL_OK) {
242                 CERROR("PtlMDAttach failed: %d\n", rc);
243                 LBUG();
244                 GOTO(cleanup, rc);
245         }
246
247         CDEBUG(D_NET, "Setup bulk sink buffers: %u pages %u bytes, xid %u, "
248                "portal %u\n", desc->bd_md.niov, desc->bd_md.length,
249                xid, desc->bd_portal);
250
251         RETURN(0);
252
253  cleanup:
254         ptlrpc_abort_bulk(desc);
255
256         return rc;
257 }
258
259 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
260 {
261         /* This should be safe: these handles are initialized to be
262          * invalid in ptlrpc_prep_bulk() */
263         PtlMDUnlink(desc->bd_md_h);
264         PtlMEUnlink(desc->bd_me_h);
265
266         return 0;
267 }
268
269 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req)
270 {
271         if (req->rq_repmsg == NULL) {
272                 CERROR("bad: someone called ptlrpc_reply when they meant "
273                        "ptlrpc_error\n");
274                 return -EINVAL;
275         }
276
277         /* FIXME: we need to increment the count of handled events */
278         if (req->rq_type != PTL_RPC_MSG_ERR)
279                 req->rq_type = PTL_RPC_MSG_REPLY;
280         //req->rq_repmsg->conn = req->rq_connection->c_remote_conn;
281         //req->rq_repmsg->token = req->rq_connection->c_remote_token;
282         req->rq_repmsg->status = HTON__u32(req->rq_status);
283         return ptl_send_buf(req, req->rq_connection, svc->srv_rep_portal);
284 }
285
286 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req)
287 {
288         int rc;
289         ENTRY;
290
291         if (req->rq_repmsg) {
292                 CERROR("req already has repmsg\n");
293                 LBUG();
294         }
295
296         rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
297         if (rc)
298                 RETURN(rc);
299
300         req->rq_repmsg->type = HTON__u32(PTL_RPC_MSG_ERR);
301
302         rc = ptlrpc_reply(svc, req);
303         RETURN(rc);
304 }
305
306 int ptl_send_rpc(struct ptlrpc_request *request)
307 {
308         int rc;
309         char *repbuf;
310         ptl_process_id_t source_id;
311
312         ENTRY;
313
314         if (request->rq_type != PTL_RPC_MSG_REQUEST) {
315                 CERROR("wrong packet type sent %d\n",
316                        NTOH__u32(request->rq_reqmsg->type));
317                 LBUG();
318                 RETURN(EINVAL);
319         }
320         if (request->rq_replen == 0) {
321                 CERROR("request->rq_replen is 0!\n");
322                 RETURN(EINVAL);
323         }
324
325         /* request->rq_repmsg is set only when the reply comes in, in
326          * client_packet_callback() */
327         if (request->rq_reply_md.start)
328                 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
329
330         OBD_ALLOC(repbuf, request->rq_replen);
331         if (!repbuf) {
332                 LBUG();
333                 RETURN(ENOMEM);
334         }
335
336         // down(&request->rq_client->cli_rpc_sem);
337
338         source_id.nid = request->rq_connection->c_peer.peer_nid;
339         source_id.pid = PTL_PID_ANY;
340
341         rc = PtlMEAttach(request->rq_connection->c_peer.peer_ni,
342                          request->rq_import->imp_client->cli_reply_portal,
343                          source_id, request->rq_xid, 0, PTL_UNLINK,
344                          PTL_INS_AFTER, &request->rq_reply_me_h);
345         if (rc != PTL_OK) {
346                 CERROR("PtlMEAttach failed: %d\n", rc);
347                 LBUG();
348                 GOTO(cleanup, rc);
349         }
350
351         request->rq_reply_md.start = repbuf;
352         request->rq_reply_md.length = request->rq_replen;
353         request->rq_reply_md.threshold = 1;
354         request->rq_reply_md.options = PTL_MD_OP_PUT;
355         request->rq_reply_md.user_ptr = request;
356         request->rq_reply_md.eventq = reply_in_eq;
357
358         rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
359                          PTL_UNLINK, &request->rq_reply_md_h);
360         if (rc != PTL_OK) {
361                 CERROR("PtlMDAttach failed: %d\n", rc);
362                 LBUG();
363                 GOTO(cleanup2, rc);
364         }
365
366         CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %Lu, portal %u\n",
367                request->rq_replen, request->rq_xid,
368                request->rq_import->imp_client->cli_reply_portal);
369
370         rc = ptl_send_buf(request, request->rq_connection,
371                           request->rq_import->imp_client->cli_request_portal);
372         RETURN(rc);
373
374  cleanup2:
375         PtlMEUnlink(request->rq_reply_me_h);
376  cleanup:
377         OBD_FREE(repbuf, request->rq_replen);
378         // up(&request->rq_client->cli_rpc_sem);
379
380         return rc;
381 }
382
383 void ptlrpc_link_svc_me(struct ptlrpc_service *service, int i)
384 {
385         int rc;
386         ptl_md_t dummy;
387         ptl_handle_md_t md_h;
388
389         /* Attach the leading ME on which we build the ring */
390         rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal,
391                          local_id, 0, ~0, PTL_RETAIN, PTL_INS_BEFORE,
392                          &(service->srv_me_h[i]));
393         if (rc != PTL_OK) {
394                 CERROR("PtlMEAttach failed: %d\n", rc);
395                 LBUG();
396         }
397
398         if (service->srv_ref_count[i])
399                 LBUG();
400
401         dummy.start         = service->srv_buf[i];
402         dummy.length        = service->srv_buf_size;
403         dummy.max_offset    = service->srv_buf_size;
404         dummy.threshold     = PTL_MD_THRESH_INF;
405         dummy.options       = PTL_MD_OP_PUT | PTL_MD_AUTO_UNLINK;
406         dummy.user_ptr      = service;
407         dummy.eventq        = service->srv_eq_h;
408         dummy.max_offset    = service->srv_buf_size;
409
410         rc = PtlMDAttach(service->srv_me_h[i], dummy, PTL_UNLINK, &md_h);
411         if (rc != PTL_OK) {
412                 /* cleanup */
413                 CERROR("PtlMDAttach failed: %d\n", rc);
414                 LBUG();
415         }
416 }
417
418 /* ptl_handled_rpc() should be called by the sleeping process once
419  * it finishes processing an event.  This ensures the ref count is
420  * decremented and that the rpc ring buffer cycles properly.
421  */
422 int ptl_handled_rpc(struct ptlrpc_service *service, void *start)
423 {
424         int index;
425
426         spin_lock(&service->srv_lock);
427         for (index = 0; index < service->srv_ring_length; index++)
428                 if (service->srv_buf[index] == start)
429                         break;
430
431         if (index == service->srv_ring_length)
432                 LBUG();
433
434         CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
435                service->srv_ref_count[index]);
436         service->srv_ref_count[index]--;
437
438         if (service->srv_ref_count[index] < 0)
439                 LBUG();
440
441         if (service->srv_ref_count[index] == 0 &&
442             !ptl_is_valid_handle(&(service->srv_me_h[index]))) {
443                 CDEBUG(D_NET, "relinking %d\n", index);
444                 ptlrpc_link_svc_me(service, index);
445         }
446
447         spin_unlock(&service->srv_lock);
448         return 0;
449 }