Whamcloud - gitweb
LU-904 ptlrpc: redo io on -EINPROGRESS
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, 2012, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  */
38
39 #ifndef EXPORT_SYMTAB
40 # define EXPORT_SYMTAB
41 #endif
42 #define DEBUG_SUBSYSTEM S_OSC
43
44 #include <libcfs/libcfs.h>
45
46 #ifndef __KERNEL__
47 # include <liblustre.h>
48 #endif
49
50 #include <lustre_dlm.h>
51 #include <lustre_net.h>
52 #include <lustre/lustre_user.h>
53 #include <obd_cksum.h>
54 #include <obd_ost.h>
55 #include <obd_lov.h>
56
57 #ifdef  __CYGWIN__
58 # include <ctype.h>
59 #endif
60
61 #include <lustre_ha.h>
62 #include <lprocfs_status.h>
63 #include <lustre_log.h>
64 #include <lustre_debug.h>
65 #include <lustre_param.h>
66 #include "osc_internal.h"
67
68 static void osc_release_ppga(struct brw_page **ppga, obd_count count);
69 static int brw_interpret(const struct lu_env *env,
70                          struct ptlrpc_request *req, void *data, int rc);
71 static void osc_check_rpcs0(const struct lu_env *env, struct client_obd *cli,
72                             int ptlrpc);
73 int osc_cleanup(struct obd_device *obd);
74
75 /* Pack OSC object metadata for disk storage (LE byte order). */
76 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
77                       struct lov_stripe_md *lsm)
78 {
79         int lmm_size;
80         ENTRY;
81
82         lmm_size = sizeof(**lmmp);
83         if (!lmmp)
84                 RETURN(lmm_size);
85
86         if (*lmmp && !lsm) {
87                 OBD_FREE(*lmmp, lmm_size);
88                 *lmmp = NULL;
89                 RETURN(0);
90         }
91
92         if (!*lmmp) {
93                 OBD_ALLOC(*lmmp, lmm_size);
94                 if (!*lmmp)
95                         RETURN(-ENOMEM);
96         }
97
98         if (lsm) {
99                 LASSERT(lsm->lsm_object_id);
100                 LASSERT_SEQ_IS_MDT(lsm->lsm_object_seq);
101                 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
102                 (*lmmp)->lmm_object_seq = cpu_to_le64(lsm->lsm_object_seq);
103         }
104
105         RETURN(lmm_size);
106 }
107
108 /* Unpack OSC object metadata from disk storage (LE byte order). */
109 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
110                         struct lov_mds_md *lmm, int lmm_bytes)
111 {
112         int lsm_size;
113         struct obd_import *imp = class_exp2cliimp(exp);
114         ENTRY;
115
116         if (lmm != NULL) {
117                 if (lmm_bytes < sizeof (*lmm)) {
118                         CERROR("lov_mds_md too small: %d, need %d\n",
119                                lmm_bytes, (int)sizeof(*lmm));
120                         RETURN(-EINVAL);
121                 }
122                 /* XXX LOV_MAGIC etc check? */
123
124                 if (lmm->lmm_object_id == 0) {
125                         CERROR("lov_mds_md: zero lmm_object_id\n");
126                         RETURN(-EINVAL);
127                 }
128         }
129
130         lsm_size = lov_stripe_md_size(1);
131         if (lsmp == NULL)
132                 RETURN(lsm_size);
133
134         if (*lsmp != NULL && lmm == NULL) {
135                 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
136                 OBD_FREE(*lsmp, lsm_size);
137                 *lsmp = NULL;
138                 RETURN(0);
139         }
140
141         if (*lsmp == NULL) {
142                 OBD_ALLOC(*lsmp, lsm_size);
143                 if (*lsmp == NULL)
144                         RETURN(-ENOMEM);
145                 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
146                 if ((*lsmp)->lsm_oinfo[0] == NULL) {
147                         OBD_FREE(*lsmp, lsm_size);
148                         RETURN(-ENOMEM);
149                 }
150                 loi_init((*lsmp)->lsm_oinfo[0]);
151         }
152
153         if (lmm != NULL) {
154                 /* XXX zero *lsmp? */
155                 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
156                 (*lsmp)->lsm_object_seq = le64_to_cpu (lmm->lmm_object_seq);
157                 LASSERT((*lsmp)->lsm_object_id);
158                 LASSERT_SEQ_IS_MDT((*lsmp)->lsm_object_seq);
159         }
160
161         if (imp != NULL &&
162             (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
163                 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
164         else
165                 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
166
167         RETURN(lsm_size);
168 }
169
170 static inline void osc_pack_capa(struct ptlrpc_request *req,
171                                  struct ost_body *body, void *capa)
172 {
173         struct obd_capa *oc = (struct obd_capa *)capa;
174         struct lustre_capa *c;
175
176         if (!capa)
177                 return;
178
179         c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
180         LASSERT(c);
181         capa_cpy(c, oc);
182         body->oa.o_valid |= OBD_MD_FLOSSCAPA;
183         DEBUG_CAPA(D_SEC, c, "pack");
184 }
185
186 static inline void osc_pack_req_body(struct ptlrpc_request *req,
187                                      struct obd_info *oinfo)
188 {
189         struct ost_body *body;
190
191         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
192         LASSERT(body);
193
194         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
195         osc_pack_capa(req, body, oinfo->oi_capa);
196 }
197
198 static inline void osc_set_capa_size(struct ptlrpc_request *req,
199                                      const struct req_msg_field *field,
200                                      struct obd_capa *oc)
201 {
202         if (oc == NULL)
203                 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
204         else
205                 /* it is already calculated as sizeof struct obd_capa */
206                 ;
207 }
208
209 static int osc_getattr_interpret(const struct lu_env *env,
210                                  struct ptlrpc_request *req,
211                                  struct osc_async_args *aa, int rc)
212 {
213         struct ost_body *body;
214         ENTRY;
215
216         if (rc != 0)
217                 GOTO(out, rc);
218
219         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
220         if (body) {
221                 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
222                 lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa);
223
224                 /* This should really be sent by the OST */
225                 aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
226                 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
227         } else {
228                 CDEBUG(D_INFO, "can't unpack ost_body\n");
229                 rc = -EPROTO;
230                 aa->aa_oi->oi_oa->o_valid = 0;
231         }
232 out:
233         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
234         RETURN(rc);
235 }
236
237 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
238                              struct ptlrpc_request_set *set)
239 {
240         struct ptlrpc_request *req;
241         struct osc_async_args *aa;
242         int                    rc;
243         ENTRY;
244
245         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
246         if (req == NULL)
247                 RETURN(-ENOMEM);
248
249         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
250         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
251         if (rc) {
252                 ptlrpc_request_free(req);
253                 RETURN(rc);
254         }
255
256         osc_pack_req_body(req, oinfo);
257
258         ptlrpc_request_set_replen(req);
259         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
260
261         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
262         aa = ptlrpc_req_async_args(req);
263         aa->aa_oi = oinfo;
264
265         ptlrpc_set_add_req(set, req);
266         RETURN(0);
267 }
268
269 static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo)
270 {
271         struct ptlrpc_request *req;
272         struct ost_body       *body;
273         int                    rc;
274         ENTRY;
275
276         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
277         if (req == NULL)
278                 RETURN(-ENOMEM);
279
280         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
281         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
282         if (rc) {
283                 ptlrpc_request_free(req);
284                 RETURN(rc);
285         }
286
287         osc_pack_req_body(req, oinfo);
288
289         ptlrpc_request_set_replen(req);
290
291         rc = ptlrpc_queue_wait(req);
292         if (rc)
293                 GOTO(out, rc);
294
295         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
296         if (body == NULL)
297                 GOTO(out, rc = -EPROTO);
298
299         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
300         lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
301
302         /* This should really be sent by the OST */
303         oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
304         oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
305
306         EXIT;
307  out:
308         ptlrpc_req_finished(req);
309         return rc;
310 }
311
312 static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo,
313                        struct obd_trans_info *oti)
314 {
315         struct ptlrpc_request *req;
316         struct ost_body       *body;
317         int                    rc;
318         ENTRY;
319
320         LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
321
322         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
323         if (req == NULL)
324                 RETURN(-ENOMEM);
325
326         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
327         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
328         if (rc) {
329                 ptlrpc_request_free(req);
330                 RETURN(rc);
331         }
332
333         osc_pack_req_body(req, oinfo);
334
335         ptlrpc_request_set_replen(req);
336
337         rc = ptlrpc_queue_wait(req);
338         if (rc)
339                 GOTO(out, rc);
340
341         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
342         if (body == NULL)
343                 GOTO(out, rc = -EPROTO);
344
345         lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
346
347         EXIT;
348 out:
349         ptlrpc_req_finished(req);
350         RETURN(rc);
351 }
352
353 static int osc_setattr_interpret(const struct lu_env *env,
354                                  struct ptlrpc_request *req,
355                                  struct osc_setattr_args *sa, int rc)
356 {
357         struct ost_body *body;
358         ENTRY;
359
360         if (rc != 0)
361                 GOTO(out, rc);
362
363         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
364         if (body == NULL)
365                 GOTO(out, rc = -EPROTO);
366
367         lustre_get_wire_obdo(sa->sa_oa, &body->oa);
368 out:
369         rc = sa->sa_upcall(sa->sa_cookie, rc);
370         RETURN(rc);
371 }
372
373 int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
374                            struct obd_trans_info *oti,
375                            obd_enqueue_update_f upcall, void *cookie,
376                            struct ptlrpc_request_set *rqset)
377 {
378         struct ptlrpc_request   *req;
379         struct osc_setattr_args *sa;
380         int                      rc;
381         ENTRY;
382
383         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
384         if (req == NULL)
385                 RETURN(-ENOMEM);
386
387         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
388         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
389         if (rc) {
390                 ptlrpc_request_free(req);
391                 RETURN(rc);
392         }
393
394         if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
395                 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
396
397         osc_pack_req_body(req, oinfo);
398
399         ptlrpc_request_set_replen(req);
400
401         /* do mds to ost setattr asynchronously */
402         if (!rqset) {
403                 /* Do not wait for response. */
404                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
405         } else {
406                 req->rq_interpret_reply =
407                         (ptlrpc_interpterer_t)osc_setattr_interpret;
408
409                 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
410                 sa = ptlrpc_req_async_args(req);
411                 sa->sa_oa = oinfo->oi_oa;
412                 sa->sa_upcall = upcall;
413                 sa->sa_cookie = cookie;
414
415                 if (rqset == PTLRPCD_SET)
416                         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
417                 else
418                         ptlrpc_set_add_req(rqset, req);
419         }
420
421         RETURN(0);
422 }
423
424 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
425                              struct obd_trans_info *oti,
426                              struct ptlrpc_request_set *rqset)
427 {
428         return osc_setattr_async_base(exp, oinfo, oti,
429                                       oinfo->oi_cb_up, oinfo, rqset);
430 }
431
432 int osc_real_create(struct obd_export *exp, struct obdo *oa,
433                     struct lov_stripe_md **ea, struct obd_trans_info *oti)
434 {
435         struct ptlrpc_request *req;
436         struct ost_body       *body;
437         struct lov_stripe_md  *lsm;
438         int                    rc;
439         ENTRY;
440
441         LASSERT(oa);
442         LASSERT(ea);
443
444         lsm = *ea;
445         if (!lsm) {
446                 rc = obd_alloc_memmd(exp, &lsm);
447                 if (rc < 0)
448                         RETURN(rc);
449         }
450
451         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
452         if (req == NULL)
453                 GOTO(out, rc = -ENOMEM);
454
455         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
456         if (rc) {
457                 ptlrpc_request_free(req);
458                 GOTO(out, rc);
459         }
460
461         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
462         LASSERT(body);
463         lustre_set_wire_obdo(&body->oa, oa);
464
465         ptlrpc_request_set_replen(req);
466
467         if ((oa->o_valid & OBD_MD_FLFLAGS) &&
468             oa->o_flags == OBD_FL_DELORPHAN) {
469                 DEBUG_REQ(D_HA, req,
470                           "delorphan from OST integration");
471                 /* Don't resend the delorphan req */
472                 req->rq_no_resend = req->rq_no_delay = 1;
473         }
474
475         rc = ptlrpc_queue_wait(req);
476         if (rc)
477                 GOTO(out_req, rc);
478
479         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
480         if (body == NULL)
481                 GOTO(out_req, rc = -EPROTO);
482
483         lustre_get_wire_obdo(oa, &body->oa);
484
485         /* This should really be sent by the OST */
486         oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
487         oa->o_valid |= OBD_MD_FLBLKSZ;
488
489         /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
490          * have valid lsm_oinfo data structs, so don't go touching that.
491          * This needs to be fixed in a big way.
492          */
493         lsm->lsm_object_id = oa->o_id;
494         lsm->lsm_object_seq = oa->o_seq;
495         *ea = lsm;
496
497         if (oti != NULL) {
498                 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
499
500                 if (oa->o_valid & OBD_MD_FLCOOKIE) {
501                         if (!oti->oti_logcookies)
502                                 oti_alloc_cookies(oti, 1);
503                         *oti->oti_logcookies = oa->o_lcookie;
504                 }
505         }
506
507         CDEBUG(D_HA, "transno: "LPD64"\n",
508                lustre_msg_get_transno(req->rq_repmsg));
509 out_req:
510         ptlrpc_req_finished(req);
511 out:
512         if (rc && !*ea)
513                 obd_free_memmd(exp, &lsm);
514         RETURN(rc);
515 }
516
517 int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
518                    obd_enqueue_update_f upcall, void *cookie,
519                    struct ptlrpc_request_set *rqset)
520 {
521         struct ptlrpc_request   *req;
522         struct osc_setattr_args *sa;
523         struct ost_body         *body;
524         int                      rc;
525         ENTRY;
526
527         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
528         if (req == NULL)
529                 RETURN(-ENOMEM);
530
531         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
532         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
533         if (rc) {
534                 ptlrpc_request_free(req);
535                 RETURN(rc);
536         }
537         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
538         ptlrpc_at_set_req_timeout(req);
539
540         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
541         LASSERT(body);
542         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
543         osc_pack_capa(req, body, oinfo->oi_capa);
544
545         ptlrpc_request_set_replen(req);
546
547
548         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
549         CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
550         sa = ptlrpc_req_async_args(req);
551         sa->sa_oa     = oinfo->oi_oa;
552         sa->sa_upcall = upcall;
553         sa->sa_cookie = cookie;
554         if (rqset == PTLRPCD_SET)
555                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
556         else
557                 ptlrpc_set_add_req(rqset, req);
558
559         RETURN(0);
560 }
561
562 static int osc_punch(struct obd_export *exp, struct obd_info *oinfo,
563                      struct obd_trans_info *oti,
564                      struct ptlrpc_request_set *rqset)
565 {
566         oinfo->oi_oa->o_size   = oinfo->oi_policy.l_extent.start;
567         oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end;
568         oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
569         return osc_punch_base(exp, oinfo,
570                               oinfo->oi_cb_up, oinfo, rqset);
571 }
572
573 static int osc_sync_interpret(const struct lu_env *env,
574                               struct ptlrpc_request *req,
575                               void *arg, int rc)
576 {
577         struct osc_async_args *aa = arg;
578         struct ost_body *body;
579         ENTRY;
580
581         if (rc)
582                 GOTO(out, rc);
583
584         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
585         if (body == NULL) {
586                 CERROR ("can't unpack ost_body\n");
587                 GOTO(out, rc = -EPROTO);
588         }
589
590         *aa->aa_oi->oi_oa = body->oa;
591 out:
592         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
593         RETURN(rc);
594 }
595
596 static int osc_sync(struct obd_export *exp, struct obd_info *oinfo,
597                     obd_size start, obd_size end,
598                     struct ptlrpc_request_set *set)
599 {
600         struct ptlrpc_request *req;
601         struct ost_body       *body;
602         struct osc_async_args *aa;
603         int                    rc;
604         ENTRY;
605
606         if (!oinfo->oi_oa) {
607                 CDEBUG(D_INFO, "oa NULL\n");
608                 RETURN(-EINVAL);
609         }
610
611         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
612         if (req == NULL)
613                 RETURN(-ENOMEM);
614
615         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
616         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
617         if (rc) {
618                 ptlrpc_request_free(req);
619                 RETURN(rc);
620         }
621
622         /* overload the size and blocks fields in the oa with start/end */
623         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
624         LASSERT(body);
625         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
626         body->oa.o_size = start;
627         body->oa.o_blocks = end;
628         body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
629         osc_pack_capa(req, body, oinfo->oi_capa);
630
631         ptlrpc_request_set_replen(req);
632         req->rq_interpret_reply = osc_sync_interpret;
633
634         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
635         aa = ptlrpc_req_async_args(req);
636         aa->aa_oi = oinfo;
637
638         ptlrpc_set_add_req(set, req);
639         RETURN (0);
640 }
641
642 /* Find and cancel locally locks matched by @mode in the resource found by
643  * @objid. Found locks are added into @cancel list. Returns the amount of
644  * locks added to @cancels list. */
645 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
646                                    cfs_list_t *cancels,
647                                    ldlm_mode_t mode, int lock_flags)
648 {
649         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
650         struct ldlm_res_id res_id;
651         struct ldlm_resource *res;
652         int count;
653         ENTRY;
654
655         osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
656         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
657         if (res == NULL)
658                 RETURN(0);
659
660         LDLM_RESOURCE_ADDREF(res);
661         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
662                                            lock_flags, 0, NULL);
663         LDLM_RESOURCE_DELREF(res);
664         ldlm_resource_putref(res);
665         RETURN(count);
666 }
667
668 static int osc_destroy_interpret(const struct lu_env *env,
669                                  struct ptlrpc_request *req, void *data,
670                                  int rc)
671 {
672         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
673
674         cfs_atomic_dec(&cli->cl_destroy_in_flight);
675         cfs_waitq_signal(&cli->cl_destroy_waitq);
676         return 0;
677 }
678
679 static int osc_can_send_destroy(struct client_obd *cli)
680 {
681         if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
682             cli->cl_max_rpcs_in_flight) {
683                 /* The destroy request can be sent */
684                 return 1;
685         }
686         if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
687             cli->cl_max_rpcs_in_flight) {
688                 /*
689                  * The counter has been modified between the two atomic
690                  * operations.
691                  */
692                 cfs_waitq_signal(&cli->cl_destroy_waitq);
693         }
694         return 0;
695 }
696
697 /* Destroy requests can be async always on the client, and we don't even really
698  * care about the return code since the client cannot do anything at all about
699  * a destroy failure.
700  * When the MDS is unlinking a filename, it saves the file objects into a
701  * recovery llog, and these object records are cancelled when the OST reports
702  * they were destroyed and sync'd to disk (i.e. transaction committed).
703  * If the client dies, or the OST is down when the object should be destroyed,
704  * the records are not cancelled, and when the OST reconnects to the MDS next,
705  * it will retrieve the llog unlink logs and then sends the log cancellation
706  * cookies to the MDS after committing destroy transactions. */
707 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
708                        struct lov_stripe_md *ea, struct obd_trans_info *oti,
709                        struct obd_export *md_export, void *capa)
710 {
711         struct client_obd     *cli = &exp->exp_obd->u.cli;
712         struct ptlrpc_request *req;
713         struct ost_body       *body;
714         CFS_LIST_HEAD(cancels);
715         int rc, count;
716         ENTRY;
717
718         if (!oa) {
719                 CDEBUG(D_INFO, "oa NULL\n");
720                 RETURN(-EINVAL);
721         }
722
723         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
724                                         LDLM_FL_DISCARD_DATA);
725
726         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
727         if (req == NULL) {
728                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
729                 RETURN(-ENOMEM);
730         }
731
732         osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
733         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
734                                0, &cancels, count);
735         if (rc) {
736                 ptlrpc_request_free(req);
737                 RETURN(rc);
738         }
739
740         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
741         ptlrpc_at_set_req_timeout(req);
742
743         if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
744                 oa->o_lcookie = *oti->oti_logcookies;
745         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
746         LASSERT(body);
747         lustre_set_wire_obdo(&body->oa, oa);
748
749         osc_pack_capa(req, body, (struct obd_capa *)capa);
750         ptlrpc_request_set_replen(req);
751
752         /* don't throttle destroy RPCs for the MDT */
753         if (!(cli->cl_import->imp_connect_flags_orig & OBD_CONNECT_MDS)) {
754                 req->rq_interpret_reply = osc_destroy_interpret;
755                 if (!osc_can_send_destroy(cli)) {
756                         struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
757                                                           NULL);
758
759                         /*
760                          * Wait until the number of on-going destroy RPCs drops
761                          * under max_rpc_in_flight
762                          */
763                         l_wait_event_exclusive(cli->cl_destroy_waitq,
764                                                osc_can_send_destroy(cli), &lwi);
765                 }
766         }
767
768         /* Do not wait for response */
769         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
770         RETURN(0);
771 }
772
773 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
774                                 long writing_bytes)
775 {
776         obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
777
778         LASSERT(!(oa->o_valid & bits));
779
780         oa->o_valid |= bits;
781         client_obd_list_lock(&cli->cl_loi_list_lock);
782         oa->o_dirty = cli->cl_dirty;
783         if (cli->cl_dirty - cli->cl_dirty_transit > cli->cl_dirty_max) {
784                 CERROR("dirty %lu - %lu > dirty_max %lu\n",
785                        cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
786                 oa->o_undirty = 0;
787         } else if (cfs_atomic_read(&obd_dirty_pages) -
788                    cfs_atomic_read(&obd_dirty_transit_pages) >
789                    obd_max_dirty_pages + 1){
790                 /* The cfs_atomic_read() allowing the cfs_atomic_inc() are
791                  * not covered by a lock thus they may safely race and trip
792                  * this CERROR() unless we add in a small fudge factor (+1). */
793                 CERROR("dirty %d - %d > system dirty_max %d\n",
794                        cfs_atomic_read(&obd_dirty_pages),
795                        cfs_atomic_read(&obd_dirty_transit_pages),
796                        obd_max_dirty_pages);
797                 oa->o_undirty = 0;
798         } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
799                 CERROR("dirty %lu - dirty_max %lu too big???\n",
800                        cli->cl_dirty, cli->cl_dirty_max);
801                 oa->o_undirty = 0;
802         } else {
803                 long max_in_flight = (cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT)*
804                                 (cli->cl_max_rpcs_in_flight + 1);
805                 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
806         }
807         oa->o_grant = cli->cl_avail_grant;
808         oa->o_dropped = cli->cl_lost_grant;
809         cli->cl_lost_grant = 0;
810         client_obd_list_unlock(&cli->cl_loi_list_lock);
811         CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
812                oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
813
814 }
815
816 static void osc_update_next_shrink(struct client_obd *cli)
817 {
818         cli->cl_next_shrink_grant =
819                 cfs_time_shift(cli->cl_grant_shrink_interval);
820         CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
821                cli->cl_next_shrink_grant);
822 }
823
824 /* caller must hold loi_list_lock */
825 static void osc_consume_write_grant(struct client_obd *cli,
826                                     struct brw_page *pga)
827 {
828         LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
829         LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
830         cfs_atomic_inc(&obd_dirty_pages);
831         cli->cl_dirty += CFS_PAGE_SIZE;
832         cli->cl_avail_grant -= CFS_PAGE_SIZE;
833         pga->flag |= OBD_BRW_FROM_GRANT;
834         CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
835                CFS_PAGE_SIZE, pga, pga->pg);
836         LASSERT(cli->cl_avail_grant >= 0);
837         osc_update_next_shrink(cli);
838 }
839
840 /* the companion to osc_consume_write_grant, called when a brw has completed.
841  * must be called with the loi lock held. */
842 static void osc_release_write_grant(struct client_obd *cli,
843                                     struct brw_page *pga, int sent)
844 {
845         int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
846         ENTRY;
847
848         LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
849         if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
850                 EXIT;
851                 return;
852         }
853
854         pga->flag &= ~OBD_BRW_FROM_GRANT;
855         cfs_atomic_dec(&obd_dirty_pages);
856         cli->cl_dirty -= CFS_PAGE_SIZE;
857         if (pga->flag & OBD_BRW_NOCACHE) {
858                 pga->flag &= ~OBD_BRW_NOCACHE;
859                 cfs_atomic_dec(&obd_dirty_transit_pages);
860                 cli->cl_dirty_transit -= CFS_PAGE_SIZE;
861         }
862         if (!sent) {
863                 /* Reclaim grant from truncated pages. This is used to solve
864                  * write-truncate and grant all gone(to lost_grant) problem.
865                  * For a vfs write this problem can be easily solved by a sync
866                  * write, however, this is not an option for page_mkwrite()
867                  * because grant has to be allocated before a page becomes
868                  * dirty. */
869                 if (cli->cl_avail_grant < PTLRPC_MAX_BRW_SIZE)
870                         cli->cl_avail_grant += CFS_PAGE_SIZE;
871                 else
872                         cli->cl_lost_grant += CFS_PAGE_SIZE;
873                 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
874                        cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
875         } else if (CFS_PAGE_SIZE != blocksize && pga->count != CFS_PAGE_SIZE) {
876                 /* For short writes we shouldn't count parts of pages that
877                  * span a whole block on the OST side, or our accounting goes
878                  * wrong.  Should match the code in filter_grant_check. */
879                 int offset = pga->off & ~CFS_PAGE_MASK;
880                 int count = pga->count + (offset & (blocksize - 1));
881                 int end = (offset + pga->count) & (blocksize - 1);
882                 if (end)
883                         count += blocksize - end;
884
885                 cli->cl_lost_grant += CFS_PAGE_SIZE - count;
886                 CDEBUG(D_CACHE, "lost %lu grant: %lu avail: %lu dirty: %lu\n",
887                        CFS_PAGE_SIZE - count, cli->cl_lost_grant,
888                        cli->cl_avail_grant, cli->cl_dirty);
889         }
890
891         EXIT;
892 }
893
894 static unsigned long rpcs_in_flight(struct client_obd *cli)
895 {
896         return cli->cl_r_in_flight + cli->cl_w_in_flight;
897 }
898
899 /* caller must hold loi_list_lock */
900 void osc_wake_cache_waiters(struct client_obd *cli)
901 {
902         cfs_list_t *l, *tmp;
903         struct osc_cache_waiter *ocw;
904
905         ENTRY;
906         cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
907                 /* if we can't dirty more, we must wait until some is written */
908                 if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
909                    (cfs_atomic_read(&obd_dirty_pages) + 1 >
910                     obd_max_dirty_pages)) {
911                         CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
912                                "osc max %ld, sys max %d\n", cli->cl_dirty,
913                                cli->cl_dirty_max, obd_max_dirty_pages);
914                         return;
915                 }
916
917                 /* if still dirty cache but no grant wait for pending RPCs that
918                  * may yet return us some grant before doing sync writes */
919                 if (cli->cl_w_in_flight && cli->cl_avail_grant < CFS_PAGE_SIZE) {
920                         CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
921                                cli->cl_w_in_flight);
922                         return;
923                 }
924
925                 ocw = cfs_list_entry(l, struct osc_cache_waiter, ocw_entry);
926                 cfs_list_del_init(&ocw->ocw_entry);
927                 if (cli->cl_avail_grant < CFS_PAGE_SIZE) {
928                         /* no more RPCs in flight to return grant, do sync IO */
929                         ocw->ocw_rc = -EDQUOT;
930                         CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
931                 } else {
932                         osc_consume_write_grant(cli,
933                                                 &ocw->ocw_oap->oap_brw_page);
934                 }
935
936                 CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld\n",
937                        ocw, ocw->ocw_oap, cli->cl_avail_grant);
938
939                 cfs_waitq_signal(&ocw->ocw_waitq);
940         }
941
942         EXIT;
943 }
944
945 static void __osc_update_grant(struct client_obd *cli, obd_size grant)
946 {
947         client_obd_list_lock(&cli->cl_loi_list_lock);
948         cli->cl_avail_grant += grant;
949         client_obd_list_unlock(&cli->cl_loi_list_lock);
950 }
951
952 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
953 {
954         if (body->oa.o_valid & OBD_MD_FLGRANT) {
955                 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
956                 __osc_update_grant(cli, body->oa.o_grant);
957         }
958 }
959
960 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
961                               void *key, obd_count vallen, void *val,
962                               struct ptlrpc_request_set *set);
963
964 static int osc_shrink_grant_interpret(const struct lu_env *env,
965                                       struct ptlrpc_request *req,
966                                       void *aa, int rc)
967 {
968         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
969         struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
970         struct ost_body *body;
971
972         if (rc != 0) {
973                 __osc_update_grant(cli, oa->o_grant);
974                 GOTO(out, rc);
975         }
976
977         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
978         LASSERT(body);
979         osc_update_grant(cli, body);
980 out:
981         OBDO_FREE(oa);
982         return rc;
983 }
984
985 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
986 {
987         client_obd_list_lock(&cli->cl_loi_list_lock);
988         oa->o_grant = cli->cl_avail_grant / 4;
989         cli->cl_avail_grant -= oa->o_grant;
990         client_obd_list_unlock(&cli->cl_loi_list_lock);
991         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
992                 oa->o_valid |= OBD_MD_FLFLAGS;
993                 oa->o_flags = 0;
994         }
995         oa->o_flags |= OBD_FL_SHRINK_GRANT;
996         osc_update_next_shrink(cli);
997 }
998
999 /* Shrink the current grant, either from some large amount to enough for a
1000  * full set of in-flight RPCs, or if we have already shrunk to that limit
1001  * then to enough for a single RPC.  This avoids keeping more grant than
1002  * needed, and avoids shrinking the grant piecemeal. */
1003 static int osc_shrink_grant(struct client_obd *cli)
1004 {
1005         long target = (cli->cl_max_rpcs_in_flight + 1) *
1006                       cli->cl_max_pages_per_rpc;
1007
1008         client_obd_list_lock(&cli->cl_loi_list_lock);
1009         if (cli->cl_avail_grant <= target)
1010                 target = cli->cl_max_pages_per_rpc;
1011         client_obd_list_unlock(&cli->cl_loi_list_lock);
1012
1013         return osc_shrink_grant_to_target(cli, target);
1014 }
1015
1016 int osc_shrink_grant_to_target(struct client_obd *cli, long target)
1017 {
1018         int    rc = 0;
1019         struct ost_body     *body;
1020         ENTRY;
1021
1022         client_obd_list_lock(&cli->cl_loi_list_lock);
1023         /* Don't shrink if we are already above or below the desired limit
1024          * We don't want to shrink below a single RPC, as that will negatively
1025          * impact block allocation and long-term performance. */
1026         if (target < cli->cl_max_pages_per_rpc)
1027                 target = cli->cl_max_pages_per_rpc;
1028
1029         if (target >= cli->cl_avail_grant) {
1030                 client_obd_list_unlock(&cli->cl_loi_list_lock);
1031                 RETURN(0);
1032         }
1033         client_obd_list_unlock(&cli->cl_loi_list_lock);
1034
1035         OBD_ALLOC_PTR(body);
1036         if (!body)
1037                 RETURN(-ENOMEM);
1038
1039         osc_announce_cached(cli, &body->oa, 0);
1040
1041         client_obd_list_lock(&cli->cl_loi_list_lock);
1042         body->oa.o_grant = cli->cl_avail_grant - target;
1043         cli->cl_avail_grant = target;
1044         client_obd_list_unlock(&cli->cl_loi_list_lock);
1045         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
1046                 body->oa.o_valid |= OBD_MD_FLFLAGS;
1047                 body->oa.o_flags = 0;
1048         }
1049         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
1050         osc_update_next_shrink(cli);
1051
1052         rc = osc_set_info_async(cli->cl_import->imp_obd->obd_self_export,
1053                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
1054                                 sizeof(*body), body, NULL);
1055         if (rc != 0)
1056                 __osc_update_grant(cli, body->oa.o_grant);
1057         OBD_FREE_PTR(body);
1058         RETURN(rc);
1059 }
1060
1061 #define GRANT_SHRINK_LIMIT PTLRPC_MAX_BRW_SIZE
1062 static int osc_should_shrink_grant(struct client_obd *client)
1063 {
1064         cfs_time_t time = cfs_time_current();
1065         cfs_time_t next_shrink = client->cl_next_shrink_grant;
1066
1067         if ((client->cl_import->imp_connect_data.ocd_connect_flags &
1068              OBD_CONNECT_GRANT_SHRINK) == 0)
1069                 return 0;
1070
1071         if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
1072                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
1073                     client->cl_avail_grant > GRANT_SHRINK_LIMIT)
1074                         return 1;
1075                 else
1076                         osc_update_next_shrink(client);
1077         }
1078         return 0;
1079 }
1080
1081 static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
1082 {
1083         struct client_obd *client;
1084
1085         cfs_list_for_each_entry(client, &item->ti_obd_list,
1086                                 cl_grant_shrink_list) {
1087                 if (osc_should_shrink_grant(client))
1088                         osc_shrink_grant(client);
1089         }
1090         return 0;
1091 }
1092
1093 static int osc_add_shrink_grant(struct client_obd *client)
1094 {
1095         int rc;
1096
1097         rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1098                                        TIMEOUT_GRANT,
1099                                        osc_grant_shrink_grant_cb, NULL,
1100                                        &client->cl_grant_shrink_list);
1101         if (rc) {
1102                 CERROR("add grant client %s error %d\n",
1103                         client->cl_import->imp_obd->obd_name, rc);
1104                 return rc;
1105         }
1106         CDEBUG(D_CACHE, "add grant client %s \n",
1107                client->cl_import->imp_obd->obd_name);
1108         osc_update_next_shrink(client);
1109         return 0;
1110 }
1111
1112 static int osc_del_shrink_grant(struct client_obd *client)
1113 {
1114         return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1115                                          TIMEOUT_GRANT);
1116 }
1117
1118 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1119 {
1120         /*
1121          * ocd_grant is the total grant amount we're expect to hold: if we've
1122          * been evicted, it's the new avail_grant amount, cl_dirty will drop
1123          * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
1124          *
1125          * race is tolerable here: if we're evicted, but imp_state already
1126          * left EVICTED state, then cl_dirty must be 0 already.
1127          */
1128         client_obd_list_lock(&cli->cl_loi_list_lock);
1129         if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1130                 cli->cl_avail_grant = ocd->ocd_grant;
1131         else
1132                 cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
1133
1134         if (cli->cl_avail_grant < 0) {
1135                 CWARN("%s: available grant < 0, the OSS is probably not running"
1136                       " with patch from bug20278 (%ld) \n",
1137                       cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant);
1138                 /* workaround for 1.6 servers which do not have
1139                  * the patch from bug20278 */
1140                 cli->cl_avail_grant = ocd->ocd_grant;
1141         }
1142
1143         client_obd_list_unlock(&cli->cl_loi_list_lock);
1144
1145         CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
1146                cli->cl_import->imp_obd->obd_name,
1147                cli->cl_avail_grant, cli->cl_lost_grant);
1148
1149         if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1150             cfs_list_empty(&cli->cl_grant_shrink_list))
1151                 osc_add_shrink_grant(cli);
1152 }
1153
1154 /* We assume that the reason this OSC got a short read is because it read
1155  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1156  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1157  * this stripe never got written at or beyond this stripe offset yet. */
1158 static void handle_short_read(int nob_read, obd_count page_count,
1159                               struct brw_page **pga)
1160 {
1161         char *ptr;
1162         int i = 0;
1163
1164         /* skip bytes read OK */
1165         while (nob_read > 0) {
1166                 LASSERT (page_count > 0);
1167
1168                 if (pga[i]->count > nob_read) {
1169                         /* EOF inside this page */
1170                         ptr = cfs_kmap(pga[i]->pg) +
1171                                 (pga[i]->off & ~CFS_PAGE_MASK);
1172                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1173                         cfs_kunmap(pga[i]->pg);
1174                         page_count--;
1175                         i++;
1176                         break;
1177                 }
1178
1179                 nob_read -= pga[i]->count;
1180                 page_count--;
1181                 i++;
1182         }
1183
1184         /* zero remaining pages */
1185         while (page_count-- > 0) {
1186                 ptr = cfs_kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
1187                 memset(ptr, 0, pga[i]->count);
1188                 cfs_kunmap(pga[i]->pg);
1189                 i++;
1190         }
1191 }
1192
1193 static int check_write_rcs(struct ptlrpc_request *req,
1194                            int requested_nob, int niocount,
1195                            obd_count page_count, struct brw_page **pga)
1196 {
1197         int     i;
1198         __u32   *remote_rcs;
1199
1200         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1201                                                   sizeof(*remote_rcs) *
1202                                                   niocount);
1203         if (remote_rcs == NULL) {
1204                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1205                 return(-EPROTO);
1206         }
1207
1208         /* return error if any niobuf was in error */
1209         for (i = 0; i < niocount; i++) {
1210                 if ((int)remote_rcs[i] < 0)
1211                         return(remote_rcs[i]);
1212
1213                 if (remote_rcs[i] != 0) {
1214                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1215                                 i, remote_rcs[i], req);
1216                         return(-EPROTO);
1217                 }
1218         }
1219
1220         if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1221                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1222                        req->rq_bulk->bd_nob_transferred, requested_nob);
1223                 return(-EPROTO);
1224         }
1225
1226         return (0);
1227 }
1228
1229 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1230 {
1231         if (p1->flag != p2->flag) {
1232                 unsigned mask = ~(OBD_BRW_FROM_GRANT| OBD_BRW_NOCACHE|
1233                                   OBD_BRW_SYNC|OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
1234
1235                 /* warn if we try to combine flags that we don't know to be
1236                  * safe to combine */
1237                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1238                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1239                               "report this at http://bugs.whamcloud.com/\n",
1240                               p1->flag, p2->flag);
1241                 }
1242                 return 0;
1243         }
1244
1245         return (p1->off + p1->count == p2->off);
1246 }
1247
1248 static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
1249                                    struct brw_page **pga, int opc,
1250                                    cksum_type_t cksum_type)
1251 {
1252         __u32 cksum;
1253         int i = 0;
1254
1255         LASSERT (pg_count > 0);
1256         cksum = init_checksum(cksum_type);
1257         while (nob > 0 && pg_count > 0) {
1258                 unsigned char *ptr = cfs_kmap(pga[i]->pg);
1259                 int off = pga[i]->off & ~CFS_PAGE_MASK;
1260                 int count = pga[i]->count > nob ? nob : pga[i]->count;
1261
1262                 /* corrupt the data before we compute the checksum, to
1263                  * simulate an OST->client data error */
1264                 if (i == 0 && opc == OST_READ &&
1265                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1266                         memcpy(ptr + off, "bad1", min(4, nob));
1267                 cksum = compute_checksum(cksum, ptr + off, count, cksum_type);
1268                 cfs_kunmap(pga[i]->pg);
1269                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d checksum %x\n",
1270                                off, cksum);
1271
1272                 nob -= pga[i]->count;
1273                 pg_count--;
1274                 i++;
1275         }
1276         /* For sending we only compute the wrong checksum instead
1277          * of corrupting the data so it is still correct on a redo */
1278         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1279                 cksum++;
1280
1281         return fini_checksum(cksum, cksum_type);
1282 }
1283
1284 static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
1285                                 struct lov_stripe_md *lsm, obd_count page_count,
1286                                 struct brw_page **pga,
1287                                 struct ptlrpc_request **reqp,
1288                                 struct obd_capa *ocapa, int reserve,
1289                                 int resend)
1290 {
1291         struct ptlrpc_request   *req;
1292         struct ptlrpc_bulk_desc *desc;
1293         struct ost_body         *body;
1294         struct obd_ioobj        *ioobj;
1295         struct niobuf_remote    *niobuf;
1296         int niocount, i, requested_nob, opc, rc;
1297         struct osc_brw_async_args *aa;
1298         struct req_capsule      *pill;
1299         struct brw_page *pg_prev;
1300
1301         ENTRY;
1302         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1303                 RETURN(-ENOMEM); /* Recoverable */
1304         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1305                 RETURN(-EINVAL); /* Fatal */
1306
1307         if ((cmd & OBD_BRW_WRITE) != 0) {
1308                 opc = OST_WRITE;
1309                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1310                                                 cli->cl_import->imp_rq_pool,
1311                                                 &RQF_OST_BRW_WRITE);
1312         } else {
1313                 opc = OST_READ;
1314                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1315         }
1316         if (req == NULL)
1317                 RETURN(-ENOMEM);
1318
1319         for (niocount = i = 1; i < page_count; i++) {
1320                 if (!can_merge_pages(pga[i - 1], pga[i]))
1321                         niocount++;
1322         }
1323
1324         pill = &req->rq_pill;
1325         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1326                              sizeof(*ioobj));
1327         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1328                              niocount * sizeof(*niobuf));
1329         osc_set_capa_size(req, &RMF_CAPA1, ocapa);
1330
1331         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1332         if (rc) {
1333                 ptlrpc_request_free(req);
1334                 RETURN(rc);
1335         }
1336         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1337         ptlrpc_at_set_req_timeout(req);
1338
1339         if (opc == OST_WRITE)
1340                 desc = ptlrpc_prep_bulk_imp(req, page_count,
1341                                             BULK_GET_SOURCE, OST_BULK_PORTAL);
1342         else
1343                 desc = ptlrpc_prep_bulk_imp(req, page_count,
1344                                             BULK_PUT_SINK, OST_BULK_PORTAL);
1345
1346         if (desc == NULL)
1347                 GOTO(out, rc = -ENOMEM);
1348         /* NB request now owns desc and will free it when it gets freed */
1349
1350         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1351         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1352         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1353         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1354
1355         lustre_set_wire_obdo(&body->oa, oa);
1356
1357         obdo_to_ioobj(oa, ioobj);
1358         ioobj->ioo_bufcnt = niocount;
1359         osc_pack_capa(req, body, ocapa);
1360         LASSERT (page_count > 0);
1361         pg_prev = pga[0];
1362         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1363                 struct brw_page *pg = pga[i];
1364                 int poff = pg->off & ~CFS_PAGE_MASK;
1365
1366                 LASSERT(pg->count > 0);
1367                 /* make sure there is no gap in the middle of page array */
1368                 LASSERTF(page_count == 1 ||
1369                          (ergo(i == 0, poff + pg->count == CFS_PAGE_SIZE) &&
1370                           ergo(i > 0 && i < page_count - 1,
1371                                poff == 0 && pg->count == CFS_PAGE_SIZE)   &&
1372                           ergo(i == page_count - 1, poff == 0)),
1373                          "i: %d/%d pg: %p off: "LPU64", count: %u\n",
1374                          i, page_count, pg, pg->off, pg->count);
1375 #ifdef __linux__
1376                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1377                          "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
1378                          " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
1379                          i, page_count,
1380                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1381                          pg_prev->pg, page_private(pg_prev->pg),
1382                          pg_prev->pg->index, pg_prev->off);
1383 #else
1384                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1385                          "i %d p_c %u\n", i, page_count);
1386 #endif
1387                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1388                         (pg->flag & OBD_BRW_SRVLOCK));
1389
1390                 ptlrpc_prep_bulk_page(desc, pg->pg, poff, pg->count);
1391                 requested_nob += pg->count;
1392
1393                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1394                         niobuf--;
1395                         niobuf->len += pg->count;
1396                 } else {
1397                         niobuf->offset = pg->off;
1398                         niobuf->len    = pg->count;
1399                         niobuf->flags  = pg->flag;
1400                 }
1401                 pg_prev = pg;
1402         }
1403
1404         LASSERTF((void *)(niobuf - niocount) ==
1405                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1406                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1407                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1408
1409         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1410         if (resend) {
1411                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1412                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1413                         body->oa.o_flags = 0;
1414                 }
1415                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1416         }
1417
1418         if (osc_should_shrink_grant(cli))
1419                 osc_shrink_grant_local(cli, &body->oa);
1420
1421         /* size[REQ_REC_OFF] still sizeof (*body) */
1422         if (opc == OST_WRITE) {
1423                 if (cli->cl_checksum &&
1424                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1425                         /* store cl_cksum_type in a local variable since
1426                          * it can be changed via lprocfs */
1427                         cksum_type_t cksum_type = cli->cl_cksum_type;
1428
1429                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1430                                 oa->o_flags &= OBD_FL_LOCAL_MASK;
1431                                 body->oa.o_flags = 0;
1432                         }
1433                         body->oa.o_flags |= cksum_type_pack(cksum_type);
1434                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1435                         body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1436                                                              page_count, pga,
1437                                                              OST_WRITE,
1438                                                              cksum_type);
1439                         CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1440                                body->oa.o_cksum);
1441                         /* save this in 'oa', too, for later checking */
1442                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1443                         oa->o_flags |= cksum_type_pack(cksum_type);
1444                 } else {
1445                         /* clear out the checksum flag, in case this is a
1446                          * resend but cl_checksum is no longer set. b=11238 */
1447                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1448                 }
1449                 oa->o_cksum = body->oa.o_cksum;
1450                 /* 1 RC per niobuf */
1451                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1452                                      sizeof(__u32) * niocount);
1453         } else {
1454                 if (cli->cl_checksum &&
1455                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1456                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1457                                 body->oa.o_flags = 0;
1458                         body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1459                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1460                 }
1461         }
1462         ptlrpc_request_set_replen(req);
1463
1464         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1465         aa = ptlrpc_req_async_args(req);
1466         aa->aa_oa = oa;
1467         aa->aa_requested_nob = requested_nob;
1468         aa->aa_nio_count = niocount;
1469         aa->aa_page_count = page_count;
1470         aa->aa_resends = 0;
1471         aa->aa_ppga = pga;
1472         aa->aa_cli = cli;
1473         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1474         if (ocapa && reserve)
1475                 aa->aa_ocapa = capa_get(ocapa);
1476
1477         *reqp = req;
1478         RETURN(0);
1479
1480  out:
1481         ptlrpc_req_finished(req);
1482         RETURN(rc);
1483 }
1484
1485 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1486                                 __u32 client_cksum, __u32 server_cksum, int nob,
1487                                 obd_count page_count, struct brw_page **pga,
1488                                 cksum_type_t client_cksum_type)
1489 {
1490         __u32 new_cksum;
1491         char *msg;
1492         cksum_type_t cksum_type;
1493
1494         if (server_cksum == client_cksum) {
1495                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1496                 return 0;
1497         }
1498
1499         cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1500                                        oa->o_flags : 0);
1501         new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1502                                       cksum_type);
1503
1504         if (cksum_type != client_cksum_type)
1505                 msg = "the server did not use the checksum type specified in "
1506                       "the original request - likely a protocol problem";
1507         else if (new_cksum == server_cksum)
1508                 msg = "changed on the client after we checksummed it - "
1509                       "likely false positive due to mmap IO (bug 11742)";
1510         else if (new_cksum == client_cksum)
1511                 msg = "changed in transit before arrival at OST";
1512         else
1513                 msg = "changed in transit AND doesn't match the original - "
1514                       "likely false positive due to mmap IO (bug 11742)";
1515
1516         LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
1517                            " object "LPU64"/"LPU64" extent ["LPU64"-"LPU64"]\n",
1518                            msg, libcfs_nid2str(peer->nid),
1519                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1520                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1521                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1522                            oa->o_id,
1523                            oa->o_valid & OBD_MD_FLGROUP ? oa->o_seq : (__u64)0,
1524                            pga[0]->off,
1525                            pga[page_count-1]->off + pga[page_count-1]->count - 1);
1526         CERROR("original client csum %x (type %x), server csum %x (type %x), "
1527                "client csum now %x\n", client_cksum, client_cksum_type,
1528                server_cksum, cksum_type, new_cksum);
1529         return 1;
1530 }
1531
1532 /* Note rc enters this function as number of bytes transferred */
1533 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1534 {
1535         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1536         const lnet_process_id_t *peer =
1537                         &req->rq_import->imp_connection->c_peer;
1538         struct client_obd *cli = aa->aa_cli;
1539         struct ost_body *body;
1540         __u32 client_cksum = 0;
1541         ENTRY;
1542
1543         if (rc < 0 && rc != -EDQUOT) {
1544                 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1545                 RETURN(rc);
1546         }
1547
1548         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1549         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1550         if (body == NULL) {
1551                 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1552                 RETURN(-EPROTO);
1553         }
1554
1555         /* set/clear over quota flag for a uid/gid */
1556         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1557             body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1558                 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1559
1560                 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
1561                        body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1562                        body->oa.o_flags);
1563                 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
1564         }
1565
1566         osc_update_grant(cli, body);
1567
1568         if (rc < 0)
1569                 RETURN(rc);
1570
1571         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1572                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1573
1574         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1575                 if (rc > 0) {
1576                         CERROR("Unexpected +ve rc %d\n", rc);
1577                         RETURN(-EPROTO);
1578                 }
1579                 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1580
1581                 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1582                         RETURN(-EAGAIN);
1583
1584                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1585                     check_write_checksum(&body->oa, peer, client_cksum,
1586                                          body->oa.o_cksum, aa->aa_requested_nob,
1587                                          aa->aa_page_count, aa->aa_ppga,
1588                                          cksum_type_unpack(aa->aa_oa->o_flags)))
1589                         RETURN(-EAGAIN);
1590
1591                 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1592                                      aa->aa_page_count, aa->aa_ppga);
1593                 GOTO(out, rc);
1594         }
1595
1596         /* The rest of this function executes only for OST_READs */
1597
1598         /* if unwrap_bulk failed, return -EAGAIN to retry */
1599         rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1600         if (rc < 0)
1601                 GOTO(out, rc = -EAGAIN);
1602
1603         if (rc > aa->aa_requested_nob) {
1604                 CERROR("Unexpected rc %d (%d requested)\n", rc,
1605                        aa->aa_requested_nob);
1606                 RETURN(-EPROTO);
1607         }
1608
1609         if (rc != req->rq_bulk->bd_nob_transferred) {
1610                 CERROR ("Unexpected rc %d (%d transferred)\n",
1611                         rc, req->rq_bulk->bd_nob_transferred);
1612                 return (-EPROTO);
1613         }
1614
1615         if (rc < aa->aa_requested_nob)
1616                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1617
1618         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1619                 static int cksum_counter;
1620                 __u32      server_cksum = body->oa.o_cksum;
1621                 char      *via;
1622                 char      *router;
1623                 cksum_type_t cksum_type;
1624
1625                 cksum_type = cksum_type_unpack(body->oa.o_valid &OBD_MD_FLFLAGS?
1626                                                body->oa.o_flags : 0);
1627                 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1628                                                  aa->aa_ppga, OST_READ,
1629                                                  cksum_type);
1630
1631                 if (peer->nid == req->rq_bulk->bd_sender) {
1632                         via = router = "";
1633                 } else {
1634                         via = " via ";
1635                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
1636                 }
1637
1638                 if (server_cksum == ~0 && rc > 0) {
1639                         CERROR("Protocol error: server %s set the 'checksum' "
1640                                "bit, but didn't send a checksum.  Not fatal, "
1641                                "but please notify on http://bugs.whamcloud.com/\n",
1642                                libcfs_nid2str(peer->nid));
1643                 } else if (server_cksum != client_cksum) {
1644                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1645                                            "%s%s%s inode "DFID" object "
1646                                            LPU64"/"LPU64" extent "
1647                                            "["LPU64"-"LPU64"]\n",
1648                                            req->rq_import->imp_obd->obd_name,
1649                                            libcfs_nid2str(peer->nid),
1650                                            via, router,
1651                                            body->oa.o_valid & OBD_MD_FLFID ?
1652                                                 body->oa.o_parent_seq : (__u64)0,
1653                                            body->oa.o_valid & OBD_MD_FLFID ?
1654                                                 body->oa.o_parent_oid : 0,
1655                                            body->oa.o_valid & OBD_MD_FLFID ?
1656                                                 body->oa.o_parent_ver : 0,
1657                                            body->oa.o_id,
1658                                            body->oa.o_valid & OBD_MD_FLGROUP ?
1659                                                 body->oa.o_seq : (__u64)0,
1660                                            aa->aa_ppga[0]->off,
1661                                            aa->aa_ppga[aa->aa_page_count-1]->off +
1662                                            aa->aa_ppga[aa->aa_page_count-1]->count -
1663                                                                         1);
1664                         CERROR("client %x, server %x, cksum_type %x\n",
1665                                client_cksum, server_cksum, cksum_type);
1666                         cksum_counter = 0;
1667                         aa->aa_oa->o_cksum = client_cksum;
1668                         rc = -EAGAIN;
1669                 } else {
1670                         cksum_counter++;
1671                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1672                         rc = 0;
1673                 }
1674         } else if (unlikely(client_cksum)) {
1675                 static int cksum_missed;
1676
1677                 cksum_missed++;
1678                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1679                         CERROR("Checksum %u requested from %s but not sent\n",
1680                                cksum_missed, libcfs_nid2str(peer->nid));
1681         } else {
1682                 rc = 0;
1683         }
1684 out:
1685         if (rc >= 0)
1686                 lustre_get_wire_obdo(aa->aa_oa, &body->oa);
1687
1688         RETURN(rc);
1689 }
1690
1691 static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1692                             struct lov_stripe_md *lsm,
1693                             obd_count page_count, struct brw_page **pga,
1694                             struct obd_capa *ocapa)
1695 {
1696         struct ptlrpc_request *req;
1697         int                    rc;
1698         cfs_waitq_t            waitq;
1699         int                    generation, resends = 0;
1700         struct l_wait_info     lwi;
1701
1702         ENTRY;
1703
1704         cfs_waitq_init(&waitq);
1705         generation = exp->exp_obd->u.cli.cl_import->imp_generation;
1706
1707 restart_bulk:
1708         rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
1709                                   page_count, pga, &req, ocapa, 0, resends);
1710         if (rc != 0)
1711                 return (rc);
1712
1713         if (resends) {
1714                 req->rq_generation_set = 1;
1715                 req->rq_import_generation = generation;
1716         }
1717
1718         rc = ptlrpc_queue_wait(req);
1719
1720         if (rc == -ETIMEDOUT && req->rq_resend) {
1721                 DEBUG_REQ(D_HA, req,  "BULK TIMEOUT");
1722                 ptlrpc_req_finished(req);
1723                 goto restart_bulk;
1724         }
1725
1726         rc = osc_brw_fini_request(req, rc);
1727
1728         ptlrpc_req_finished(req);
1729         /* When server return -EINPROGRESS, client should always retry
1730          * regardless of the number of times the bulk was resent already.*/
1731         if (osc_recoverable_error(rc)) {
1732                 resends++;
1733                 if (rc != -EINPROGRESS &&
1734                     !client_should_resend(resends, &exp->exp_obd->u.cli)) {
1735                         CERROR("%s: too many resend retries for object: "
1736                                ""LPU64":"LPU64", rc = %d.\n",
1737                                exp->exp_obd->obd_name, oa->o_id, oa->o_seq, rc);
1738                         goto out;
1739                 }
1740                 if (generation !=
1741                     exp->exp_obd->u.cli.cl_import->imp_generation) {
1742                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
1743                                ""LPU64":"LPU64", rc = %d.\n",
1744                                exp->exp_obd->obd_name, oa->o_id, oa->o_seq, rc);
1745                         goto out;
1746                 }
1747
1748                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL,
1749                                        NULL);
1750                 l_wait_event(waitq, 0, &lwi);
1751
1752                 goto restart_bulk;
1753         }
1754 out:
1755         if (rc == -EAGAIN || rc == -EINPROGRESS)
1756                 rc = -EIO;
1757         RETURN (rc);
1758 }
1759
1760 int osc_brw_redo_request(struct ptlrpc_request *request,
1761                          struct osc_brw_async_args *aa)
1762 {
1763         struct ptlrpc_request *new_req;
1764         struct ptlrpc_request_set *set = request->rq_set;
1765         struct osc_brw_async_args *new_aa;
1766         struct osc_async_page *oap;
1767         int rc = 0;
1768         ENTRY;
1769
1770         DEBUG_REQ(D_ERROR, request, "redo for recoverable error");
1771
1772         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1773                                         OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
1774                                   aa->aa_cli, aa->aa_oa,
1775                                   NULL /* lsm unused by osc currently */,
1776                                   aa->aa_page_count, aa->aa_ppga,
1777                                   &new_req, aa->aa_ocapa, 0, 1);
1778         if (rc)
1779                 RETURN(rc);
1780
1781         client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
1782
1783         cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1784                 if (oap->oap_request != NULL) {
1785                         LASSERTF(request == oap->oap_request,
1786                                  "request %p != oap_request %p\n",
1787                                  request, oap->oap_request);
1788                         if (oap->oap_interrupted) {
1789                                 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1790                                 ptlrpc_req_finished(new_req);
1791                                 RETURN(-EINTR);
1792                         }
1793                 }
1794         }
1795         /* New request takes over pga and oaps from old request.
1796          * Note that copying a list_head doesn't work, need to move it... */
1797         aa->aa_resends++;
1798         new_req->rq_interpret_reply = request->rq_interpret_reply;
1799         new_req->rq_async_args = request->rq_async_args;
1800         new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
1801         new_req->rq_generation_set = 1;
1802         new_req->rq_import_generation = request->rq_import_generation;
1803
1804         new_aa = ptlrpc_req_async_args(new_req);
1805
1806         CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
1807         cfs_list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
1808         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1809
1810         cfs_list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1811                 if (oap->oap_request) {
1812                         ptlrpc_req_finished(oap->oap_request);
1813                         oap->oap_request = ptlrpc_request_addref(new_req);
1814                 }
1815         }
1816
1817         new_aa->aa_ocapa = aa->aa_ocapa;
1818         aa->aa_ocapa = NULL;
1819
1820         /* use ptlrpc_set_add_req is safe because interpret functions work
1821          * in check_set context. only one way exist with access to request
1822          * from different thread got -EINTR - this way protected with
1823          * cl_loi_list_lock */
1824         ptlrpc_set_add_req(set, new_req);
1825
1826         client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1827
1828         DEBUG_REQ(D_INFO, new_req, "new request");
1829         RETURN(0);
1830 }
1831
1832 /*
1833  * ugh, we want disk allocation on the target to happen in offset order.  we'll
1834  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1835  * fine for our small page arrays and doesn't require allocation.  its an
1836  * insertion sort that swaps elements that are strides apart, shrinking the
1837  * stride down until its '1' and the array is sorted.
1838  */
1839 static void sort_brw_pages(struct brw_page **array, int num)
1840 {
1841         int stride, i, j;
1842         struct brw_page *tmp;
1843
1844         if (num == 1)
1845                 return;
1846         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1847                 ;
1848
1849         do {
1850                 stride /= 3;
1851                 for (i = stride ; i < num ; i++) {
1852                         tmp = array[i];
1853                         j = i;
1854                         while (j >= stride && array[j - stride]->off > tmp->off) {
1855                                 array[j] = array[j - stride];
1856                                 j -= stride;
1857                         }
1858                         array[j] = tmp;
1859                 }
1860         } while (stride > 1);
1861 }
1862
1863 static obd_count max_unfragmented_pages(struct brw_page **pg, obd_count pages)
1864 {
1865         int count = 1;
1866         int offset;
1867         int i = 0;
1868
1869         LASSERT (pages > 0);
1870         offset = pg[i]->off & ~CFS_PAGE_MASK;
1871
1872         for (;;) {
1873                 pages--;
1874                 if (pages == 0)         /* that's all */
1875                         return count;
1876
1877                 if (offset + pg[i]->count < CFS_PAGE_SIZE)
1878                         return count;   /* doesn't end on page boundary */
1879
1880                 i++;
1881                 offset = pg[i]->off & ~CFS_PAGE_MASK;
1882                 if (offset != 0)        /* doesn't start on page boundary */
1883                         return count;
1884
1885                 count++;
1886         }
1887 }
1888
1889 static struct brw_page **osc_build_ppga(struct brw_page *pga, obd_count count)
1890 {
1891         struct brw_page **ppga;
1892         int i;
1893
1894         OBD_ALLOC(ppga, sizeof(*ppga) * count);
1895         if (ppga == NULL)
1896                 return NULL;
1897
1898         for (i = 0; i < count; i++)
1899                 ppga[i] = pga + i;
1900         return ppga;
1901 }
1902
1903 static void osc_release_ppga(struct brw_page **ppga, obd_count count)
1904 {
1905         LASSERT(ppga != NULL);
1906         OBD_FREE(ppga, sizeof(*ppga) * count);
1907 }
1908
1909 static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
1910                    obd_count page_count, struct brw_page *pga,
1911                    struct obd_trans_info *oti)
1912 {
1913         struct obdo *saved_oa = NULL;
1914         struct brw_page **ppga, **orig;
1915         struct obd_import *imp = class_exp2cliimp(exp);
1916         struct client_obd *cli;
1917         int rc, page_count_orig;
1918         ENTRY;
1919
1920         LASSERT((imp != NULL) && (imp->imp_obd != NULL));
1921         cli = &imp->imp_obd->u.cli;
1922
1923         if (cmd & OBD_BRW_CHECK) {
1924                 /* The caller just wants to know if there's a chance that this
1925                  * I/O can succeed */
1926
1927                 if (imp->imp_invalid)
1928                         RETURN(-EIO);
1929                 RETURN(0);
1930         }
1931
1932         /* test_brw with a failed create can trip this, maybe others. */
1933         LASSERT(cli->cl_max_pages_per_rpc);
1934
1935         rc = 0;
1936
1937         orig = ppga = osc_build_ppga(pga, page_count);
1938         if (ppga == NULL)
1939                 RETURN(-ENOMEM);
1940         page_count_orig = page_count;
1941
1942         sort_brw_pages(ppga, page_count);
1943         while (page_count) {
1944                 obd_count pages_per_brw;
1945
1946                 if (page_count > cli->cl_max_pages_per_rpc)
1947                         pages_per_brw = cli->cl_max_pages_per_rpc;
1948                 else
1949                         pages_per_brw = page_count;
1950
1951                 pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
1952
1953                 if (saved_oa != NULL) {
1954                         /* restore previously saved oa */
1955                         *oinfo->oi_oa = *saved_oa;
1956                 } else if (page_count > pages_per_brw) {
1957                         /* save a copy of oa (brw will clobber it) */
1958                         OBDO_ALLOC(saved_oa);
1959                         if (saved_oa == NULL)
1960                                 GOTO(out, rc = -ENOMEM);
1961                         *saved_oa = *oinfo->oi_oa;
1962                 }
1963
1964                 rc = osc_brw_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
1965                                       pages_per_brw, ppga, oinfo->oi_capa);
1966
1967                 if (rc != 0)
1968                         break;
1969
1970                 page_count -= pages_per_brw;
1971                 ppga += pages_per_brw;
1972         }
1973
1974 out:
1975         osc_release_ppga(orig, page_count_orig);
1976
1977         if (saved_oa != NULL)
1978                 OBDO_FREE(saved_oa);
1979
1980         RETURN(rc);
1981 }
1982
1983 /* The companion to osc_enter_cache(), called when @oap is no longer part of
1984  * the dirty accounting.  Writeback completes or truncate happens before
1985  * writing starts.  Must be called with the loi lock held. */
1986 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1987                            int sent)
1988 {
1989         osc_release_write_grant(cli, &oap->oap_brw_page, sent);
1990 }
1991
1992
1993 /* This maintains the lists of pending pages to read/write for a given object
1994  * (lop).  This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint()
1995  * to quickly find objects that are ready to send an RPC. */
1996 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1997                          int cmd)
1998 {
1999         ENTRY;
2000
2001         if (lop->lop_num_pending == 0)
2002                 RETURN(0);
2003
2004         /* if we have an invalid import we want to drain the queued pages
2005          * by forcing them through rpcs that immediately fail and complete
2006          * the pages.  recovery relies on this to empty the queued pages
2007          * before canceling the locks and evicting down the llite pages */
2008         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2009                 RETURN(1);
2010
2011         /* stream rpcs in queue order as long as as there is an urgent page
2012          * queued.  this is our cheap solution for good batching in the case
2013          * where writepage marks some random page in the middle of the file
2014          * as urgent because of, say, memory pressure */
2015         if (!cfs_list_empty(&lop->lop_urgent)) {
2016                 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
2017                 RETURN(1);
2018         }
2019
2020         if (cmd & OBD_BRW_WRITE) {
2021                 /* trigger a write rpc stream as long as there are dirtiers
2022                  * waiting for space.  as they're waiting, they're not going to
2023                  * create more pages to coalesce with what's waiting.. */
2024                 if (!cfs_list_empty(&cli->cl_cache_waiters)) {
2025                         CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
2026                         RETURN(1);
2027                 }
2028         }
2029         if (lop->lop_num_pending >= cli->cl_max_pages_per_rpc)
2030                 RETURN(1);
2031
2032         RETURN(0);
2033 }
2034
2035 static int lop_makes_hprpc(struct loi_oap_pages *lop)
2036 {
2037         struct osc_async_page *oap;
2038         ENTRY;
2039
2040         if (cfs_list_empty(&lop->lop_urgent))
2041                 RETURN(0);
2042
2043         oap = cfs_list_entry(lop->lop_urgent.next,
2044                          struct osc_async_page, oap_urgent_item);
2045
2046         if (oap->oap_async_flags & ASYNC_HP) {
2047                 CDEBUG(D_CACHE, "hp request forcing RPC\n");
2048                 RETURN(1);
2049         }
2050
2051         RETURN(0);
2052 }
2053
2054 static void on_list(cfs_list_t *item, cfs_list_t *list,
2055                     int should_be_on)
2056 {
2057         if (cfs_list_empty(item) && should_be_on)
2058                 cfs_list_add_tail(item, list);
2059         else if (!cfs_list_empty(item) && !should_be_on)
2060                 cfs_list_del_init(item);
2061 }
2062
2063 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
2064  * can find pages to build into rpcs quickly */
2065 void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
2066 {
2067         if (lop_makes_hprpc(&loi->loi_write_lop) ||
2068             lop_makes_hprpc(&loi->loi_read_lop)) {
2069                 /* HP rpc */
2070                 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0);
2071                 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
2072         } else {
2073                 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
2074                 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
2075                         lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)||
2076                         lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
2077         }
2078
2079         on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
2080                 loi->loi_write_lop.lop_num_pending);
2081
2082         on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
2083                 loi->loi_read_lop.lop_num_pending);
2084 }
2085
2086 static void lop_update_pending(struct client_obd *cli,
2087                                struct loi_oap_pages *lop, int cmd, int delta)
2088 {
2089         lop->lop_num_pending += delta;
2090         if (cmd & OBD_BRW_WRITE)
2091                 cli->cl_pending_w_pages += delta;
2092         else
2093                 cli->cl_pending_r_pages += delta;
2094 }
2095
2096 /**
2097  * this is called when a sync waiter receives an interruption.  Its job is to
2098  * get the caller woken as soon as possible.  If its page hasn't been put in an
2099  * rpc yet it can dequeue immediately.  Otherwise it has to mark the rpc as
2100  * desiring interruption which will forcefully complete the rpc once the rpc
2101  * has timed out.
2102  */
2103 int osc_oap_interrupted(const struct lu_env *env, struct osc_async_page *oap)
2104 {
2105         struct loi_oap_pages *lop;
2106         struct lov_oinfo *loi;
2107         int rc = -EBUSY;
2108         ENTRY;
2109
2110         LASSERT(!oap->oap_interrupted);
2111         oap->oap_interrupted = 1;
2112
2113         /* ok, it's been put in an rpc. only one oap gets a request reference */
2114         if (oap->oap_request != NULL) {
2115                 ptlrpc_mark_interrupted(oap->oap_request);
2116                 ptlrpcd_wake(oap->oap_request);
2117                 ptlrpc_req_finished(oap->oap_request);
2118                 oap->oap_request = NULL;
2119         }
2120
2121         /*
2122          * page completion may be called only if ->cpo_prep() method was
2123          * executed by osc_io_submit(), that also adds page the to pending list
2124          */
2125         if (!cfs_list_empty(&oap->oap_pending_item)) {
2126                 cfs_list_del_init(&oap->oap_pending_item);
2127                 cfs_list_del_init(&oap->oap_urgent_item);
2128
2129                 loi = oap->oap_loi;
2130                 lop = (oap->oap_cmd & OBD_BRW_WRITE) ?
2131                         &loi->loi_write_lop : &loi->loi_read_lop;
2132                 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
2133                 loi_list_maint(oap->oap_cli, oap->oap_loi);
2134                 rc = oap->oap_caller_ops->ap_completion(env,
2135                                           oap->oap_caller_data,
2136                                           oap->oap_cmd, NULL, -EINTR);
2137         }
2138
2139         RETURN(rc);
2140 }
2141
2142 /* this is trying to propogate async writeback errors back up to the
2143  * application.  As an async write fails we record the error code for later if
2144  * the app does an fsync.  As long as errors persist we force future rpcs to be
2145  * sync so that the app can get a sync error and break the cycle of queueing
2146  * pages for which writeback will fail. */
2147 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
2148                            int rc)
2149 {
2150         if (rc) {
2151                 if (!ar->ar_rc)
2152                         ar->ar_rc = rc;
2153
2154                 ar->ar_force_sync = 1;
2155                 ar->ar_min_xid = ptlrpc_sample_next_xid();
2156                 return;
2157
2158         }
2159
2160         if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
2161                 ar->ar_force_sync = 0;
2162 }
2163
2164 void osc_oap_to_pending(struct osc_async_page *oap)
2165 {
2166         struct loi_oap_pages *lop;
2167
2168         if (oap->oap_cmd & OBD_BRW_WRITE)
2169                 lop = &oap->oap_loi->loi_write_lop;
2170         else
2171                 lop = &oap->oap_loi->loi_read_lop;
2172
2173         if (oap->oap_async_flags & ASYNC_HP)
2174                 cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2175         else if (oap->oap_async_flags & ASYNC_URGENT)
2176                 cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
2177         cfs_list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2178         lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
2179 }
2180
2181 /* this must be called holding the loi list lock to give coverage to exit_cache,
2182  * async_flag maintenance, and oap_request */
2183 static void osc_ap_completion(const struct lu_env *env,
2184                               struct client_obd *cli, struct obdo *oa,
2185                               struct osc_async_page *oap, int sent, int rc)
2186 {
2187         __u64 xid = 0;
2188
2189         ENTRY;
2190         if (oap->oap_request != NULL) {
2191                 xid = ptlrpc_req_xid(oap->oap_request);
2192                 ptlrpc_req_finished(oap->oap_request);
2193                 oap->oap_request = NULL;
2194         }
2195
2196         cfs_spin_lock(&oap->oap_lock);
2197         oap->oap_async_flags = 0;
2198         cfs_spin_unlock(&oap->oap_lock);
2199         oap->oap_interrupted = 0;
2200
2201         if (oap->oap_cmd & OBD_BRW_WRITE) {
2202                 osc_process_ar(&cli->cl_ar, xid, rc);
2203                 osc_process_ar(&oap->oap_loi->loi_ar, xid, rc);
2204         }
2205
2206         if (rc == 0 && oa != NULL) {
2207                 if (oa->o_valid & OBD_MD_FLBLOCKS)
2208                         oap->oap_loi->loi_lvb.lvb_blocks = oa->o_blocks;
2209                 if (oa->o_valid & OBD_MD_FLMTIME)
2210                         oap->oap_loi->loi_lvb.lvb_mtime = oa->o_mtime;
2211                 if (oa->o_valid & OBD_MD_FLATIME)
2212                         oap->oap_loi->loi_lvb.lvb_atime = oa->o_atime;
2213                 if (oa->o_valid & OBD_MD_FLCTIME)
2214                         oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime;
2215         }
2216
2217         rc = oap->oap_caller_ops->ap_completion(env, oap->oap_caller_data,
2218                                                 oap->oap_cmd, oa, rc);
2219
2220         /* cl_page_completion() drops PG_locked. so, a new I/O on the page could
2221          * start, but OSC calls it under lock and thus we can add oap back to
2222          * pending safely */
2223         if (rc)
2224                 /* upper layer wants to leave the page on pending queue */
2225                 osc_oap_to_pending(oap);
2226         else
2227                 osc_exit_cache(cli, oap, sent);
2228         EXIT;
2229 }
2230
2231 static int brw_queue_work(const struct lu_env *env, void *data)
2232 {
2233         struct client_obd *cli = data;
2234
2235         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
2236
2237         client_obd_list_lock(&cli->cl_loi_list_lock);
2238         osc_check_rpcs0(env, cli, 1);
2239         client_obd_list_unlock(&cli->cl_loi_list_lock);
2240         RETURN(0);
2241 }
2242
2243 static int brw_interpret(const struct lu_env *env,
2244                          struct ptlrpc_request *req, void *data, int rc)
2245 {
2246         struct osc_brw_async_args *aa = data;
2247         struct client_obd *cli;
2248         int async;
2249         ENTRY;
2250
2251         rc = osc_brw_fini_request(req, rc);
2252         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2253         /* When server return -EINPROGRESS, client should always retry
2254          * regardless of the number of times the bulk was resent already. */
2255         if (osc_recoverable_error(rc)) {
2256                 if (req->rq_import_generation !=
2257                     req->rq_import->imp_generation) {
2258                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2259                                ""LPU64":"LPU64", rc = %d.\n",
2260                                req->rq_import->imp_obd->obd_name,
2261                                aa->aa_oa->o_id, aa->aa_oa->o_seq, rc);
2262                 } else if (rc == -EINPROGRESS ||
2263                     client_should_resend(aa->aa_resends, aa->aa_cli)) {
2264                         rc = osc_brw_redo_request(req, aa);
2265                 } else {
2266                         CERROR("%s: too many resent retries for object: "
2267                                ""LPU64":"LPU64", rc = %d.\n",
2268                                req->rq_import->imp_obd->obd_name,
2269                                aa->aa_oa->o_id, aa->aa_oa->o_seq, rc);
2270                 }
2271
2272                 if (rc == 0)
2273                         RETURN(0);
2274                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2275                         rc = -EIO;
2276         }
2277
2278         if (aa->aa_ocapa) {
2279                 capa_put(aa->aa_ocapa);
2280                 aa->aa_ocapa = NULL;
2281         }
2282
2283         cli = aa->aa_cli;
2284         client_obd_list_lock(&cli->cl_loi_list_lock);
2285
2286         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2287          * is called so we know whether to go to sync BRWs or wait for more
2288          * RPCs to complete */
2289         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2290                 cli->cl_w_in_flight--;
2291         else
2292                 cli->cl_r_in_flight--;
2293
2294         async = cfs_list_empty(&aa->aa_oaps);
2295         if (!async) { /* from osc_send_oap_rpc() */
2296                 struct osc_async_page *oap, *tmp;
2297                 /* the caller may re-use the oap after the completion call so
2298                  * we need to clean it up a little */
2299                 cfs_list_for_each_entry_safe(oap, tmp, &aa->aa_oaps,
2300                                              oap_rpc_item) {
2301                         cfs_list_del_init(&oap->oap_rpc_item);
2302                         osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc);
2303                 }
2304                 OBDO_FREE(aa->aa_oa);
2305         } else { /* from async_internal() */
2306                 obd_count i;
2307                 for (i = 0; i < aa->aa_page_count; i++)
2308                         osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
2309         }
2310         osc_wake_cache_waiters(cli);
2311         osc_check_rpcs0(env, cli, 1);
2312         client_obd_list_unlock(&cli->cl_loi_list_lock);
2313
2314         if (!async)
2315                 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
2316                                   req->rq_bulk->bd_nob_transferred);
2317         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2318         ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
2319
2320         RETURN(rc);
2321 }
2322
2323 static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
2324                                             struct client_obd *cli,
2325                                             cfs_list_t *rpc_list,
2326                                             int page_count, int cmd)
2327 {
2328         struct ptlrpc_request *req;
2329         struct brw_page **pga = NULL;
2330         struct osc_brw_async_args *aa;
2331         struct obdo *oa = NULL;
2332         const struct obd_async_page_ops *ops = NULL;
2333         struct osc_async_page *oap;
2334         struct osc_async_page *tmp;
2335         struct cl_req *clerq = NULL;
2336         enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2337         struct ldlm_lock *lock = NULL;
2338         struct cl_req_attr crattr;
2339         int i, rc, mpflag = 0;
2340
2341         ENTRY;
2342         LASSERT(!cfs_list_empty(rpc_list));
2343
2344         if (cmd & OBD_BRW_MEMALLOC)
2345                 mpflag = cfs_memory_pressure_get_and_set();
2346
2347         memset(&crattr, 0, sizeof crattr);
2348         OBD_ALLOC(pga, sizeof(*pga) * page_count);
2349         if (pga == NULL)
2350                 GOTO(out, req = ERR_PTR(-ENOMEM));
2351
2352         OBDO_ALLOC(oa);
2353         if (oa == NULL)
2354                 GOTO(out, req = ERR_PTR(-ENOMEM));
2355
2356         i = 0;
2357         cfs_list_for_each_entry(oap, rpc_list, oap_rpc_item) {
2358                 struct cl_page *page = osc_oap2cl_page(oap);
2359                 if (ops == NULL) {
2360                         ops = oap->oap_caller_ops;
2361
2362                         clerq = cl_req_alloc(env, page, crt,
2363                                              1 /* only 1-object rpcs for
2364                                                 * now */);
2365                         if (IS_ERR(clerq))
2366                                 GOTO(out, req = (void *)clerq);
2367                         lock = oap->oap_ldlm_lock;
2368                 }
2369                 pga[i] = &oap->oap_brw_page;
2370                 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2371                 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
2372                        pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag);
2373                 i++;
2374                 cl_req_page_add(env, clerq, page);
2375         }
2376
2377         /* always get the data for the obdo for the rpc */
2378         LASSERT(ops != NULL);
2379         crattr.cra_oa = oa;
2380         crattr.cra_capa = NULL;
2381         cl_req_attr_set(env, clerq, &crattr, ~0ULL);
2382         if (lock) {
2383                 oa->o_handle = lock->l_remote_handle;
2384                 oa->o_valid |= OBD_MD_FLHANDLE;
2385         }
2386
2387         rc = cl_req_prep(env, clerq);
2388         if (rc != 0) {
2389                 CERROR("cl_req_prep failed: %d\n", rc);
2390                 GOTO(out, req = ERR_PTR(rc));
2391         }
2392
2393         sort_brw_pages(pga, page_count);
2394         rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
2395                                   pga, &req, crattr.cra_capa, 1, 0);
2396         if (rc != 0) {
2397                 CERROR("prep_req failed: %d\n", rc);
2398                 GOTO(out, req = ERR_PTR(rc));
2399         }
2400
2401         if (cmd & OBD_BRW_MEMALLOC)
2402                 req->rq_memalloc = 1;
2403
2404         /* Need to update the timestamps after the request is built in case
2405          * we race with setattr (locally or in queue at OST).  If OST gets
2406          * later setattr before earlier BRW (as determined by the request xid),
2407          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2408          * way to do this in a single call.  bug 10150 */
2409         cl_req_attr_set(env, clerq, &crattr,
2410                         OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
2411
2412         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2413         aa = ptlrpc_req_async_args(req);
2414         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
2415         cfs_list_splice(rpc_list, &aa->aa_oaps);
2416         CFS_INIT_LIST_HEAD(rpc_list);
2417         aa->aa_clerq = clerq;
2418 out:
2419         if (cmd & OBD_BRW_MEMALLOC)
2420                 cfs_memory_pressure_restore(mpflag);
2421
2422         capa_put(crattr.cra_capa);
2423         if (IS_ERR(req)) {
2424                 if (oa)
2425                         OBDO_FREE(oa);
2426                 if (pga)
2427                         OBD_FREE(pga, sizeof(*pga) * page_count);
2428                 /* this should happen rarely and is pretty bad, it makes the
2429                  * pending list not follow the dirty order */
2430                 client_obd_list_lock(&cli->cl_loi_list_lock);
2431                 cfs_list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
2432                         cfs_list_del_init(&oap->oap_rpc_item);
2433
2434                         /* queued sync pages can be torn down while the pages
2435                          * were between the pending list and the rpc */
2436                         if (oap->oap_interrupted) {
2437                                 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
2438                                 osc_ap_completion(env, cli, NULL, oap, 0,
2439                                                   oap->oap_count);
2440                                 continue;
2441                         }
2442                         osc_ap_completion(env, cli, NULL, oap, 0, PTR_ERR(req));
2443                 }
2444                 if (clerq && !IS_ERR(clerq))
2445                         cl_req_completion(env, clerq, PTR_ERR(req));
2446         }
2447         RETURN(req);
2448 }
2449
2450 /**
2451  * prepare pages for ASYNC io and put pages in send queue.
2452  *
2453  * \param cmd OBD_BRW_* macroses
2454  * \param lop pending pages
2455  *
2456  * \return zero if no page added to send queue.
2457  * \return 1 if pages successfully added to send queue.
2458  * \return negative on errors.
2459  */
2460 static int
2461 osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
2462                  struct lov_oinfo *loi, int cmd,
2463                  struct loi_oap_pages *lop, pdl_policy_t pol)
2464 {
2465         struct ptlrpc_request *req;
2466         obd_count page_count = 0;
2467         struct osc_async_page *oap = NULL, *tmp;
2468         struct osc_brw_async_args *aa;
2469         const struct obd_async_page_ops *ops;
2470         CFS_LIST_HEAD(rpc_list);
2471         int srvlock = 0, mem_tight = 0;
2472         struct cl_object *clob = NULL;
2473         obd_off starting_offset = OBD_OBJECT_EOF;
2474         unsigned int ending_offset;
2475         int starting_page_off = 0;
2476         ENTRY;
2477
2478         /* ASYNC_HP pages first. At present, when the lock the pages is
2479          * to be canceled, the pages covered by the lock will be sent out
2480          * with ASYNC_HP. We have to send out them as soon as possible. */
2481         cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
2482                 if (oap->oap_async_flags & ASYNC_HP)
2483                         cfs_list_move(&oap->oap_pending_item, &rpc_list);
2484                 else if (!(oap->oap_brw_flags & OBD_BRW_SYNC))
2485                         /* only do this for writeback pages. */
2486                         cfs_list_move_tail(&oap->oap_pending_item, &rpc_list);
2487                 if (++page_count >= cli->cl_max_pages_per_rpc)
2488                         break;
2489         }
2490         cfs_list_splice_init(&rpc_list, &lop->lop_pending);
2491         page_count = 0;
2492
2493         /* first we find the pages we're allowed to work with */
2494         cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
2495                                      oap_pending_item) {
2496                 ops = oap->oap_caller_ops;
2497
2498                 LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
2499                          "magic 0x%x\n", oap, oap->oap_magic);
2500
2501                 if (clob == NULL) {
2502                         /* pin object in memory, so that completion call-backs
2503                          * can be safely called under client_obd_list lock. */
2504                         clob = osc_oap2cl_page(oap)->cp_obj;
2505                         cl_object_get(clob);
2506                 }
2507
2508                 if (page_count != 0 &&
2509                     srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
2510                         CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
2511                                " oap %p, page %p, srvlock %u\n",
2512                                oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
2513                         break;
2514                 }
2515
2516                 /* If there is a gap at the start of this page, it can't merge
2517                  * with any previous page, so we'll hand the network a
2518                  * "fragmented" page array that it can't transfer in 1 RDMA */
2519                 if (oap->oap_obj_off < starting_offset) {
2520                         if (starting_page_off != 0)
2521                                 break;
2522
2523                         starting_page_off = oap->oap_page_off;
2524                         starting_offset = oap->oap_obj_off + starting_page_off;
2525                 } else if (oap->oap_page_off != 0)
2526                         break;
2527
2528                 /* in llite being 'ready' equates to the page being locked
2529                  * until completion unlocks it.  commit_write submits a page
2530                  * as not ready because its unlock will happen unconditionally
2531                  * as the call returns.  if we race with commit_write giving
2532                  * us that page we don't want to create a hole in the page
2533                  * stream, so we stop and leave the rpc to be fired by
2534                  * another dirtier or kupdated interval (the not ready page
2535                  * will still be on the dirty list).  we could call in
2536                  * at the end of ll_file_write to process the queue again. */
2537                 if (!(oap->oap_async_flags & ASYNC_READY)) {
2538                         int rc = ops->ap_make_ready(env, oap->oap_caller_data,
2539                                                     cmd);
2540                         if (rc < 0)
2541                                 CDEBUG(D_INODE, "oap %p page %p returned %d "
2542                                                 "instead of ready\n", oap,
2543                                                 oap->oap_page, rc);
2544                         switch (rc) {
2545                         case -EAGAIN:
2546                                 /* llite is telling us that the page is still
2547                                  * in commit_write and that we should try
2548                                  * and put it in an rpc again later.  we
2549                                  * break out of the loop so we don't create
2550                                  * a hole in the sequence of pages in the rpc
2551                                  * stream.*/
2552                                 oap = NULL;
2553                                 break;
2554                         case -EINTR:
2555                                 /* the io isn't needed.. tell the checks
2556                                  * below to complete the rpc with EINTR */
2557                                 cfs_spin_lock(&oap->oap_lock);
2558                                 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
2559                                 cfs_spin_unlock(&oap->oap_lock);
2560                                 oap->oap_count = -EINTR;
2561                                 break;
2562                         case 0:
2563                                 cfs_spin_lock(&oap->oap_lock);
2564                                 oap->oap_async_flags |= ASYNC_READY;
2565                                 cfs_spin_unlock(&oap->oap_lock);
2566                                 break;
2567                         default:
2568                                 LASSERTF(0, "oap %p page %p returned %d "
2569                                             "from make_ready\n", oap,
2570                                             oap->oap_page, rc);
2571                                 break;
2572                         }
2573                 }
2574                 if (oap == NULL)
2575                         break;
2576
2577                 /* take the page out of our book-keeping */
2578                 cfs_list_del_init(&oap->oap_pending_item);
2579                 lop_update_pending(cli, lop, cmd, -1);
2580                 cfs_list_del_init(&oap->oap_urgent_item);
2581
2582                 /* ask the caller for the size of the io as the rpc leaves. */
2583                 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
2584                         oap->oap_count =
2585                                 ops->ap_refresh_count(env, oap->oap_caller_data,
2586                                                       cmd);
2587                         LASSERT(oap->oap_page_off + oap->oap_count <= CFS_PAGE_SIZE);
2588                 }
2589                 if (oap->oap_count <= 0) {
2590                         CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
2591                                oap->oap_count);
2592                         osc_ap_completion(env, cli, NULL,
2593                                           oap, 0, oap->oap_count);
2594                         continue;
2595                 }
2596
2597                 /* now put the page back in our accounting */
2598                 cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
2599                 if (page_count++ == 0)
2600                         srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
2601
2602                 if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
2603                         mem_tight = 1;
2604
2605                 /* End on a PTLRPC_MAX_BRW_SIZE boundary.  We want full-sized
2606                  * RPCs aligned on PTLRPC_MAX_BRW_SIZE boundaries to help reads
2607                  * have the same alignment as the initial writes that allocated
2608                  * extents on the server. */
2609                 ending_offset = oap->oap_obj_off + oap->oap_page_off +
2610                                 oap->oap_count;
2611                 if (!(ending_offset & (PTLRPC_MAX_BRW_SIZE - 1)))
2612                         break;
2613
2614                 if (page_count >= cli->cl_max_pages_per_rpc)
2615                         break;
2616
2617                 /* If there is a gap at the end of this page, it can't merge
2618                  * with any subsequent pages, so we'll hand the network a
2619                  * "fragmented" page array that it can't transfer in 1 RDMA */
2620                 if (oap->oap_page_off + oap->oap_count < CFS_PAGE_SIZE)
2621                         break;
2622         }
2623
2624         loi_list_maint(cli, loi);
2625
2626         client_obd_list_unlock(&cli->cl_loi_list_lock);
2627
2628         if (clob != NULL)
2629                 cl_object_put(env, clob);
2630
2631         if (page_count == 0) {
2632                 client_obd_list_lock(&cli->cl_loi_list_lock);
2633                 RETURN(0);
2634         }
2635
2636         req = osc_build_req(env, cli, &rpc_list, page_count,
2637                             mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
2638         if (IS_ERR(req)) {
2639                 LASSERT(cfs_list_empty(&rpc_list));
2640                 loi_list_maint(cli, loi);
2641                 RETURN(PTR_ERR(req));
2642         }
2643
2644         aa = ptlrpc_req_async_args(req);
2645
2646         starting_offset &= PTLRPC_MAX_BRW_SIZE - 1;
2647         if (cmd == OBD_BRW_READ) {
2648                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2649                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2650                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2651                                       (starting_offset >> CFS_PAGE_SHIFT) + 1);
2652         } else {
2653                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2654                 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
2655                                  cli->cl_w_in_flight);
2656                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2657                                       (starting_offset >> CFS_PAGE_SHIFT) + 1);
2658         }
2659
2660         client_obd_list_lock(&cli->cl_loi_list_lock);
2661
2662         if (cmd == OBD_BRW_READ)
2663                 cli->cl_r_in_flight++;
2664         else
2665                 cli->cl_w_in_flight++;
2666
2667         /* queued sync pages can be torn down while the pages
2668          * were between the pending list and the rpc */
2669         tmp = NULL;
2670         cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2671                 /* only one oap gets a request reference */
2672                 if (tmp == NULL)
2673                         tmp = oap;
2674                 if (oap->oap_interrupted && !req->rq_intr) {
2675                         CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2676                                oap, req);
2677                         ptlrpc_mark_interrupted(req);
2678                 }
2679         }
2680         if (tmp != NULL)
2681                 tmp->oap_request = ptlrpc_request_addref(req);
2682
2683         DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2684                   page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
2685
2686         req->rq_interpret_reply = brw_interpret;
2687
2688         /* XXX: Maybe the caller can check the RPC bulk descriptor to see which
2689          *      CPU/NUMA node the majority of pages were allocated on, and try
2690          *      to assign the async RPC to the CPU core (PDL_POLICY_PREFERRED)
2691          *      to reduce cross-CPU memory traffic.
2692          *
2693          *      But on the other hand, we expect that multiple ptlrpcd threads
2694          *      and the initial write sponsor can run in parallel, especially
2695          *      when data checksum is enabled, which is CPU-bound operation and
2696          *      single ptlrpcd thread cannot process in time. So more ptlrpcd
2697          *      threads sharing BRW load (with PDL_POLICY_ROUND) seems better.
2698          */
2699         ptlrpcd_add_req(req, pol, -1);
2700         RETURN(1);
2701 }
2702
2703 #define LOI_DEBUG(LOI, STR, args...)                                     \
2704         CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR,           \
2705                !cfs_list_empty(&(LOI)->loi_ready_item) ||                \
2706                !cfs_list_empty(&(LOI)->loi_hp_ready_item),               \
2707                (LOI)->loi_write_lop.lop_num_pending,                     \
2708                !cfs_list_empty(&(LOI)->loi_write_lop.lop_urgent),        \
2709                (LOI)->loi_read_lop.lop_num_pending,                      \
2710                !cfs_list_empty(&(LOI)->loi_read_lop.lop_urgent),         \
2711                args)                                                     \
2712
2713 /* This is called by osc_check_rpcs() to find which objects have pages that
2714  * we could be sending.  These lists are maintained by lop_makes_rpc(). */
2715 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
2716 {
2717         ENTRY;
2718
2719         /* First return objects that have blocked locks so that they
2720          * will be flushed quickly and other clients can get the lock,
2721          * then objects which have pages ready to be stuffed into RPCs */
2722         if (!cfs_list_empty(&cli->cl_loi_hp_ready_list))
2723                 RETURN(cfs_list_entry(cli->cl_loi_hp_ready_list.next,
2724                                       struct lov_oinfo, loi_hp_ready_item));
2725         if (!cfs_list_empty(&cli->cl_loi_ready_list))
2726                 RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
2727                                       struct lov_oinfo, loi_ready_item));
2728
2729         /* then if we have cache waiters, return all objects with queued
2730          * writes.  This is especially important when many small files
2731          * have filled up the cache and not been fired into rpcs because
2732          * they don't pass the nr_pending/object threshhold */
2733         if (!cfs_list_empty(&cli->cl_cache_waiters) &&
2734             !cfs_list_empty(&cli->cl_loi_write_list))
2735                 RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2736                                       struct lov_oinfo, loi_write_item));
2737
2738         /* then return all queued objects when we have an invalid import
2739          * so that they get flushed */
2740         if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
2741                 if (!cfs_list_empty(&cli->cl_loi_write_list))
2742                         RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2743                                               struct lov_oinfo,
2744                                               loi_write_item));
2745                 if (!cfs_list_empty(&cli->cl_loi_read_list))
2746                         RETURN(cfs_list_entry(cli->cl_loi_read_list.next,
2747                                               struct lov_oinfo, loi_read_item));
2748         }
2749         RETURN(NULL);
2750 }
2751
2752 static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi)
2753 {
2754         struct osc_async_page *oap;
2755         int hprpc = 0;
2756
2757         if (!cfs_list_empty(&loi->loi_write_lop.lop_urgent)) {
2758                 oap = cfs_list_entry(loi->loi_write_lop.lop_urgent.next,
2759                                      struct osc_async_page, oap_urgent_item);
2760                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2761         }
2762
2763         if (!hprpc && !cfs_list_empty(&loi->loi_read_lop.lop_urgent)) {
2764                 oap = cfs_list_entry(loi->loi_read_lop.lop_urgent.next,
2765                                      struct osc_async_page, oap_urgent_item);
2766                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2767         }
2768
2769         return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
2770 }
2771
2772 /* called with the loi list lock held */
2773 static void osc_check_rpcs0(const struct lu_env *env, struct client_obd *cli, int ptlrpc)
2774 {
2775         struct lov_oinfo *loi;
2776         int rc = 0, race_counter = 0;
2777         pdl_policy_t pol;
2778         ENTRY;
2779
2780         pol = ptlrpc ? PDL_POLICY_SAME : PDL_POLICY_ROUND;
2781
2782         while ((loi = osc_next_loi(cli)) != NULL) {
2783                 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
2784
2785                 if (osc_max_rpc_in_flight(cli, loi))
2786                         break;
2787
2788                 /* attempt some read/write balancing by alternating between
2789                  * reads and writes in an object.  The makes_rpc checks here
2790                  * would be redundant if we were getting read/write work items
2791                  * instead of objects.  we don't want send_oap_rpc to drain a
2792                  * partial read pending queue when we're given this object to
2793                  * do io on writes while there are cache waiters */
2794                 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
2795                         rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE,
2796                                               &loi->loi_write_lop, pol);
2797                         if (rc < 0) {
2798                                 CERROR("Write request failed with %d\n", rc);
2799
2800                                 /* osc_send_oap_rpc failed, mostly because of
2801                                  * memory pressure.
2802                                  *
2803                                  * It can't break here, because if:
2804                                  *  - a page was submitted by osc_io_submit, so
2805                                  *    page locked;
2806                                  *  - no request in flight
2807                                  *  - no subsequent request
2808                                  * The system will be in live-lock state,
2809                                  * because there is no chance to call
2810                                  * osc_io_unplug() and osc_check_rpcs() any
2811                                  * more. pdflush can't help in this case,
2812                                  * because it might be blocked at grabbing
2813                                  * the page lock as we mentioned.
2814                                  *
2815                                  * Anyway, continue to drain pages. */
2816                                 /* break; */
2817                         }
2818
2819                         if (rc > 0)
2820                                 race_counter = 0;
2821                         else if (rc == 0)
2822                                 race_counter++;
2823                 }
2824                 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
2825                         rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ,
2826                                               &loi->loi_read_lop, pol);
2827                         if (rc < 0)
2828                                 CERROR("Read request failed with %d\n", rc);
2829
2830                         if (rc > 0)
2831                                 race_counter = 0;
2832                         else if (rc == 0)
2833                                 race_counter++;
2834                 }
2835
2836                 /* attempt some inter-object balancing by issuing rpcs
2837                  * for each object in turn */
2838                 if (!cfs_list_empty(&loi->loi_hp_ready_item))
2839                         cfs_list_del_init(&loi->loi_hp_ready_item);
2840                 if (!cfs_list_empty(&loi->loi_ready_item))
2841                         cfs_list_del_init(&loi->loi_ready_item);
2842                 if (!cfs_list_empty(&loi->loi_write_item))
2843                         cfs_list_del_init(&loi->loi_write_item);
2844                 if (!cfs_list_empty(&loi->loi_read_item))
2845                         cfs_list_del_init(&loi->loi_read_item);
2846
2847                 loi_list_maint(cli, loi);
2848
2849                 /* send_oap_rpc fails with 0 when make_ready tells it to
2850                  * back off.  llite's make_ready does this when it tries
2851                  * to lock a page queued for write that is already locked.
2852                  * we want to try sending rpcs from many objects, but we
2853                  * don't want to spin failing with 0.  */
2854                 if (race_counter == 10)
2855                         break;
2856         }
2857 }
2858
2859 void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2860 {
2861         osc_check_rpcs0(env, cli, 0);
2862 }
2863
2864 /**
2865  * Non-blocking version of osc_enter_cache() that consumes grant only when it
2866  * is available.
2867  */
2868 int osc_enter_cache_try(const struct lu_env *env,
2869                         struct client_obd *cli, struct lov_oinfo *loi,
2870                         struct osc_async_page *oap, int transient)
2871 {
2872         int has_grant;
2873
2874         has_grant = cli->cl_avail_grant >= CFS_PAGE_SIZE;
2875         if (has_grant) {
2876                 osc_consume_write_grant(cli, &oap->oap_brw_page);
2877                 if (transient) {
2878                         cli->cl_dirty_transit += CFS_PAGE_SIZE;
2879                         cfs_atomic_inc(&obd_dirty_transit_pages);
2880                         oap->oap_brw_flags |= OBD_BRW_NOCACHE;
2881                 }
2882         }
2883         return has_grant;
2884 }
2885
2886 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
2887  * grant or cache space. */
2888 static int osc_enter_cache(const struct lu_env *env,
2889                            struct client_obd *cli, struct lov_oinfo *loi,
2890                            struct osc_async_page *oap)
2891 {
2892         struct osc_cache_waiter ocw;
2893         struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
2894         int rc = -EDQUOT;
2895         ENTRY;
2896
2897         CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
2898                "grant: %lu\n", cli->cl_dirty, cfs_atomic_read(&obd_dirty_pages),
2899                cli->cl_dirty_max, obd_max_dirty_pages,
2900                cli->cl_lost_grant, cli->cl_avail_grant);
2901
2902         /* force the caller to try sync io.  this can jump the list
2903          * of queued writes and create a discontiguous rpc stream */
2904         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
2905             cli->cl_dirty_max < CFS_PAGE_SIZE     ||
2906             cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
2907                 RETURN(-EDQUOT);
2908
2909         /* Hopefully normal case - cache space and write credits available */
2910         if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
2911             cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
2912             osc_enter_cache_try(env, cli, loi, oap, 0))
2913                 RETURN(0);
2914
2915         /* We can get here for two reasons: too many dirty pages in cache, or
2916          * run out of grants. In both cases we should write dirty pages out.
2917          * Adding a cache waiter will trigger urgent write-out no matter what
2918          * RPC size will be.
2919          * The exiting condition is no avail grants and no dirty pages caching,
2920          * that really means there is no space on the OST. */
2921         cfs_waitq_init(&ocw.ocw_waitq);
2922         ocw.ocw_oap = oap;
2923         while (cli->cl_dirty > 0) {
2924                 cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
2925                 ocw.ocw_rc = 0;
2926
2927                 loi_list_maint(cli, loi);
2928                 osc_check_rpcs(env, cli);
2929                 client_obd_list_unlock(&cli->cl_loi_list_lock);
2930
2931                 CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
2932                        cli->cl_import->imp_obd->obd_name, &ocw, oap);
2933
2934                 rc = l_wait_event(ocw.ocw_waitq, cfs_list_empty(&ocw.ocw_entry), &lwi);
2935
2936                 client_obd_list_lock(&cli->cl_loi_list_lock);
2937                 cfs_list_del_init(&ocw.ocw_entry);
2938                 if (rc < 0)
2939                         break;
2940
2941                 rc = ocw.ocw_rc;
2942                 if (rc != -EDQUOT)
2943                         break;
2944         }
2945
2946         RETURN(rc);
2947 }
2948
2949
2950 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
2951                         struct lov_oinfo *loi, cfs_page_t *page,
2952                         obd_off offset, const struct obd_async_page_ops *ops,
2953                         void *data, void **res, int nocache,
2954                         struct lustre_handle *lockh)
2955 {
2956         struct osc_async_page *oap;
2957
2958         ENTRY;
2959
2960         if (!page)
2961                 return cfs_size_round(sizeof(*oap));
2962
2963         oap = *res;
2964         oap->oap_magic = OAP_MAGIC;
2965         oap->oap_cli = &exp->exp_obd->u.cli;
2966         oap->oap_loi = loi;
2967
2968         oap->oap_caller_ops = ops;
2969         oap->oap_caller_data = data;
2970
2971         oap->oap_page = page;
2972         oap->oap_obj_off = offset;
2973         if (!client_is_remote(exp) &&
2974             cfs_capable(CFS_CAP_SYS_RESOURCE))
2975                 oap->oap_brw_flags = OBD_BRW_NOQUOTA;
2976
2977         LASSERT(!(offset & ~CFS_PAGE_MASK));
2978
2979         CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
2980         CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
2981         CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
2982         CFS_INIT_LIST_HEAD(&oap->oap_page_list);
2983
2984         cfs_spin_lock_init(&oap->oap_lock);
2985         CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
2986         RETURN(0);
2987 }
2988
2989 int osc_queue_async_io(const struct lu_env *env, struct obd_export *exp,
2990                        struct lov_stripe_md *lsm, struct lov_oinfo *loi,
2991                        struct osc_async_page *oap, int cmd, int off,
2992                        int count, obd_flag brw_flags, enum async_flags async_flags)
2993 {
2994         struct client_obd *cli = &exp->exp_obd->u.cli;
2995         int rc = 0;
2996         ENTRY;
2997
2998         if (oap->oap_magic != OAP_MAGIC)
2999                 RETURN(-EINVAL);
3000
3001         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
3002                 RETURN(-EIO);
3003
3004         if (!cfs_list_empty(&oap->oap_pending_item) ||
3005             !cfs_list_empty(&oap->oap_urgent_item) ||
3006             !cfs_list_empty(&oap->oap_rpc_item))
3007                 RETURN(-EBUSY);
3008
3009         /* check if the file's owner/group is over quota */
3010         if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)) {
3011                 struct cl_object *obj;
3012                 struct cl_attr    attr; /* XXX put attr into thread info */
3013                 unsigned int qid[MAXQUOTAS];
3014
3015                 obj = cl_object_top(osc_oap2cl_page(oap)->cp_obj);
3016
3017                 cl_object_attr_lock(obj);
3018                 rc = cl_object_attr_get(env, obj, &attr);
3019                 cl_object_attr_unlock(obj);
3020
3021                 qid[USRQUOTA] = attr.cat_uid;
3022                 qid[GRPQUOTA] = attr.cat_gid;
3023                 if (rc == 0 &&
3024                     osc_quota_chkdq(cli, qid) == NO_QUOTA)
3025                         rc = -EDQUOT;
3026                 if (rc)
3027                         RETURN(rc);
3028         }
3029
3030         if (loi == NULL)
3031                 loi = lsm->lsm_oinfo[0];
3032
3033         client_obd_list_lock(&cli->cl_loi_list_lock);
3034
3035         LASSERT(off + count <= CFS_PAGE_SIZE);
3036         oap->oap_cmd = cmd;
3037         oap->oap_page_off = off;
3038         oap->oap_count = count;
3039         oap->oap_brw_flags = brw_flags;
3040         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
3041         if (cfs_memory_pressure_get())
3042                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
3043         cfs_spin_lock(&oap->oap_lock);
3044         oap->oap_async_flags = async_flags;
3045         cfs_spin_unlock(&oap->oap_lock);
3046
3047         if (cmd & OBD_BRW_WRITE) {
3048                 rc = osc_enter_cache(env, cli, loi, oap);
3049                 if (rc) {
3050                         client_obd_list_unlock(&cli->cl_loi_list_lock);
3051                         RETURN(rc);
3052                 }
3053         }
3054
3055         LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
3056                   cmd);
3057
3058         osc_oap_to_pending(oap);
3059         loi_list_maint(cli, loi);
3060         if (!osc_max_rpc_in_flight(cli, loi) &&
3061             lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
3062                 LASSERT(cli->cl_writeback_work != NULL);
3063                 rc = ptlrpcd_queue_work(cli->cl_writeback_work);
3064
3065                 CDEBUG(D_CACHE, "Queued writeback work for client obd %p/%d.\n",
3066                        cli, rc);
3067         }
3068         client_obd_list_unlock(&cli->cl_loi_list_lock);
3069
3070         RETURN(0);
3071 }
3072
3073 /* aka (~was & now & flag), but this is more clear :) */
3074 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
3075
3076 int osc_set_async_flags_base(struct client_obd *cli,
3077                              struct lov_oinfo *loi, struct osc_async_page *oap,
3078                              obd_flag async_flags)
3079 {
3080         struct loi_oap_pages *lop;
3081         int flags = 0;
3082         ENTRY;
3083
3084         LASSERT(!cfs_list_empty(&oap->oap_pending_item));
3085
3086         if (oap->oap_cmd & OBD_BRW_WRITE) {
3087                 lop = &loi->loi_write_lop;
3088         } else {
3089                 lop = &loi->loi_read_lop;
3090         }
3091
3092         if ((oap->oap_async_flags & async_flags) == async_flags)
3093                 RETURN(0);
3094
3095         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
3096                 flags |= ASYNC_READY;
3097
3098         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
3099             cfs_list_empty(&oap->oap_rpc_item)) {
3100                 if (oap->oap_async_flags & ASYNC_HP)
3101                         cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
3102                 else
3103                         cfs_list_add_tail(&oap->oap_urgent_item,
3104                                           &lop->lop_urgent);
3105                 flags |= ASYNC_URGENT;
3106                 loi_list_maint(cli, loi);
3107         }
3108         cfs_spin_lock(&oap->oap_lock);
3109         oap->oap_async_flags |= flags;
3110         cfs_spin_unlock(&oap->oap_lock);
3111
3112         LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
3113                         oap->oap_async_flags);
3114         RETURN(0);
3115 }
3116
3117 int osc_teardown_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
3118                             struct lov_oinfo *loi, struct osc_async_page *oap)
3119 {
3120         struct client_obd *cli = &exp->exp_obd->u.cli;
3121         struct loi_oap_pages *lop;
3122         int rc = 0;
3123         ENTRY;
3124
3125         if (oap->oap_magic != OAP_MAGIC)
3126                 RETURN(-EINVAL);
3127
3128         if (loi == NULL)
3129                 loi = lsm->lsm_oinfo[0];
3130
3131         if (oap->oap_cmd & OBD_BRW_WRITE) {
3132                 lop = &loi->loi_write_lop;
3133         } else {
3134                 lop = &loi->loi_read_lop;
3135         }
3136
3137         client_obd_list_lock(&cli->cl_loi_list_lock);
3138
3139         if (!cfs_list_empty(&oap->oap_rpc_item))
3140                 GOTO(out, rc = -EBUSY);
3141
3142         osc_exit_cache(cli, oap, 0);
3143         osc_wake_cache_waiters(cli);
3144
3145         if (!cfs_list_empty(&oap->oap_urgent_item)) {
3146                 cfs_list_del_init(&oap->oap_urgent_item);
3147                 cfs_spin_lock(&oap->oap_lock);
3148                 oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
3149                 cfs_spin_unlock(&oap->oap_lock);
3150         }
3151         if (!cfs_list_empty(&oap->oap_pending_item)) {
3152                 cfs_list_del_init(&oap->oap_pending_item);
3153                 lop_update_pending(cli, lop, oap->oap_cmd, -1);
3154         }
3155         loi_list_maint(cli, loi);
3156         LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
3157 out:
3158         client_obd_list_unlock(&cli->cl_loi_list_lock);
3159         RETURN(rc);
3160 }
3161
3162 static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
3163                                         struct ldlm_enqueue_info *einfo)
3164 {
3165         void *data = einfo->ei_cbdata;
3166         int set = 0;
3167
3168         LASSERT(lock != NULL);
3169         LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
3170         LASSERT(lock->l_resource->lr_type == einfo->ei_type);
3171         LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
3172         LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
3173
3174         lock_res_and_lock(lock);
3175         cfs_spin_lock(&osc_ast_guard);
3176
3177         if (lock->l_ast_data == NULL)
3178                 lock->l_ast_data = data;
3179         if (lock->l_ast_data == data)
3180                 set = 1;
3181
3182         cfs_spin_unlock(&osc_ast_guard);
3183         unlock_res_and_lock(lock);
3184
3185         return set;
3186 }
3187
3188 static int osc_set_data_with_check(struct lustre_handle *lockh,
3189                                    struct ldlm_enqueue_info *einfo)
3190 {
3191         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
3192         int set = 0;
3193
3194         if (lock != NULL) {
3195                 set = osc_set_lock_data_with_check(lock, einfo);
3196                 LDLM_LOCK_PUT(lock);
3197         } else
3198                 CERROR("lockh %p, data %p - client evicted?\n",
3199                        lockh, einfo->ei_cbdata);
3200         return set;
3201 }
3202
3203 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3204                              ldlm_iterator_t replace, void *data)
3205 {
3206         struct ldlm_res_id res_id;
3207         struct obd_device *obd = class_exp2obd(exp);
3208
3209         osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3210         ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3211         return 0;
3212 }
3213
3214 /* find any ldlm lock of the inode in osc
3215  * return 0    not find
3216  *        1    find one
3217  *      < 0    error */
3218 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3219                            ldlm_iterator_t replace, void *data)
3220 {
3221         struct ldlm_res_id res_id;
3222         struct obd_device *obd = class_exp2obd(exp);
3223         int rc = 0;
3224
3225         osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3226         rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3227         if (rc == LDLM_ITER_STOP)
3228                 return(1);
3229         if (rc == LDLM_ITER_CONTINUE)
3230                 return(0);
3231         return(rc);
3232 }
3233
3234 static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
3235                             obd_enqueue_update_f upcall, void *cookie,
3236                             int *flags, int agl, int rc)
3237 {
3238         int intent = *flags & LDLM_FL_HAS_INTENT;
3239         ENTRY;
3240
3241         if (intent) {
3242                 /* The request was created before ldlm_cli_enqueue call. */
3243                 if (rc == ELDLM_LOCK_ABORTED) {
3244                         struct ldlm_reply *rep;
3245                         rep = req_capsule_server_get(&req->rq_pill,
3246                                                      &RMF_DLM_REP);
3247
3248                         LASSERT(rep != NULL);
3249                         if (rep->lock_policy_res1)
3250                                 rc = rep->lock_policy_res1;
3251                 }
3252         }
3253
3254         if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
3255             (rc == 0)) {
3256                 *flags |= LDLM_FL_LVB_READY;
3257                 CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
3258                        lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
3259         }
3260
3261         /* Call the update callback. */
3262         rc = (*upcall)(cookie, rc);
3263         RETURN(rc);
3264 }
3265
3266 static int osc_enqueue_interpret(const struct lu_env *env,
3267                                  struct ptlrpc_request *req,
3268                                  struct osc_enqueue_args *aa, int rc)
3269 {
3270         struct ldlm_lock *lock;
3271         struct lustre_handle handle;
3272         __u32 mode;
3273         struct ost_lvb *lvb;
3274         __u32 lvb_len;
3275         int *flags = aa->oa_flags;
3276
3277         /* Make a local copy of a lock handle and a mode, because aa->oa_*
3278          * might be freed anytime after lock upcall has been called. */
3279         lustre_handle_copy(&handle, aa->oa_lockh);
3280         mode = aa->oa_ei->ei_mode;
3281
3282         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
3283          * be valid. */
3284         lock = ldlm_handle2lock(&handle);
3285
3286         /* Take an additional reference so that a blocking AST that
3287          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
3288          * to arrive after an upcall has been executed by
3289          * osc_enqueue_fini(). */
3290         ldlm_lock_addref(&handle, mode);
3291
3292         /* Let CP AST to grant the lock first. */
3293         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
3294
3295         if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
3296                 lvb = NULL;
3297                 lvb_len = 0;
3298         } else {
3299                 lvb = aa->oa_lvb;
3300                 lvb_len = sizeof(*aa->oa_lvb);
3301         }
3302
3303         /* Complete obtaining the lock procedure. */
3304         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
3305                                    mode, flags, lvb, lvb_len, &handle, rc);
3306         /* Complete osc stuff. */
3307         rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
3308                               flags, aa->oa_agl, rc);
3309
3310         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
3311
3312         /* Release the lock for async request. */
3313         if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
3314                 /*
3315                  * Releases a reference taken by ldlm_cli_enqueue(), if it is
3316                  * not already released by
3317                  * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
3318                  */
3319                 ldlm_lock_decref(&handle, mode);
3320
3321         LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
3322                  aa->oa_lockh, req, aa);
3323         ldlm_lock_decref(&handle, mode);
3324         LDLM_LOCK_PUT(lock);
3325         return rc;
3326 }
3327
3328 void osc_update_enqueue(struct lustre_handle *lov_lockhp,
3329                         struct lov_oinfo *loi, int flags,
3330                         struct ost_lvb *lvb, __u32 mode, int rc)
3331 {
3332         struct ldlm_lock *lock = ldlm_handle2lock(lov_lockhp);
3333
3334         if (rc == ELDLM_OK) {
3335                 __u64 tmp;
3336
3337                 LASSERT(lock != NULL);
3338                 loi->loi_lvb = *lvb;
3339                 tmp = loi->loi_lvb.lvb_size;
3340                 /* Extend KMS up to the end of this lock and no further
3341                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
3342                 if (tmp > lock->l_policy_data.l_extent.end)
3343                         tmp = lock->l_policy_data.l_extent.end + 1;
3344                 if (tmp >= loi->loi_kms) {
3345                         LDLM_DEBUG(lock, "lock acquired, setting rss="LPU64
3346                                    ", kms="LPU64, loi->loi_lvb.lvb_size, tmp);
3347                         loi_kms_set(loi, tmp);
3348                 } else {
3349                         LDLM_DEBUG(lock, "lock acquired, setting rss="
3350                                    LPU64"; leaving kms="LPU64", end="LPU64,
3351                                    loi->loi_lvb.lvb_size, loi->loi_kms,
3352                                    lock->l_policy_data.l_extent.end);
3353                 }
3354                 ldlm_lock_allow_match(lock);
3355         } else if (rc == ELDLM_LOCK_ABORTED && (flags & LDLM_FL_HAS_INTENT)) {
3356                 LASSERT(lock != NULL);
3357                 loi->loi_lvb = *lvb;
3358                 ldlm_lock_allow_match(lock);
3359                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
3360                        " kms="LPU64"\n", loi->loi_lvb.lvb_size, loi->loi_kms);
3361                 rc = ELDLM_OK;
3362         }
3363
3364         if (lock != NULL) {
3365                 if (rc != ELDLM_OK)
3366                         ldlm_lock_fail_match(lock);
3367
3368                 LDLM_LOCK_PUT(lock);
3369         }
3370 }
3371 EXPORT_SYMBOL(osc_update_enqueue);
3372
3373 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
3374
3375 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
3376  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
3377  * other synchronous requests, however keeping some locks and trying to obtain
3378  * others may take a considerable amount of time in a case of ost failure; and
3379  * when other sync requests do not get released lock from a client, the client
3380  * is excluded from the cluster -- such scenarious make the life difficult, so
3381  * release locks just after they are obtained. */
3382 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3383                      int *flags, ldlm_policy_data_t *policy,
3384                      struct ost_lvb *lvb, int kms_valid,
3385                      obd_enqueue_update_f upcall, void *cookie,
3386                      struct ldlm_enqueue_info *einfo,
3387                      struct lustre_handle *lockh,
3388                      struct ptlrpc_request_set *rqset, int async, int agl)
3389 {
3390         struct obd_device *obd = exp->exp_obd;
3391         struct ptlrpc_request *req = NULL;
3392         int intent = *flags & LDLM_FL_HAS_INTENT;
3393         int match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
3394         ldlm_mode_t mode;
3395         int rc;
3396         ENTRY;
3397
3398         /* Filesystem lock extents are extended to page boundaries so that
3399          * dealing with the page cache is a little smoother.  */
3400         policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3401         policy->l_extent.end |= ~CFS_PAGE_MASK;
3402
3403         /*
3404          * kms is not valid when either object is completely fresh (so that no
3405          * locks are cached), or object was evicted. In the latter case cached
3406          * lock cannot be used, because it would prime inode state with
3407          * potentially stale LVB.
3408          */
3409         if (!kms_valid)
3410                 goto no_match;
3411
3412         /* Next, search for already existing extent locks that will cover us */
3413         /* If we're trying to read, we also search for an existing PW lock.  The
3414          * VFS and page cache already protect us locally, so lots of readers/
3415          * writers can share a single PW lock.
3416          *
3417          * There are problems with conversion deadlocks, so instead of
3418          * converting a read lock to a write lock, we'll just enqueue a new
3419          * one.
3420          *
3421          * At some point we should cancel the read lock instead of making them
3422          * send us a blocking callback, but there are problems with canceling
3423          * locks out from other users right now, too. */
3424         mode = einfo->ei_mode;
3425         if (einfo->ei_mode == LCK_PR)
3426                 mode |= LCK_PW;
3427         mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
3428                                einfo->ei_type, policy, mode, lockh, 0);
3429         if (mode) {
3430                 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
3431
3432                 if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
3433                         /* For AGL, if enqueue RPC is sent but the lock is not
3434                          * granted, then skip to process this strpe.
3435                          * Return -ECANCELED to tell the caller. */
3436                         ldlm_lock_decref(lockh, mode);
3437                         LDLM_LOCK_PUT(matched);
3438                         RETURN(-ECANCELED);
3439                 } else if (osc_set_lock_data_with_check(matched, einfo)) {
3440                         *flags |= LDLM_FL_LVB_READY;
3441                         /* addref the lock only if not async requests and PW
3442                          * lock is matched whereas we asked for PR. */
3443                         if (!rqset && einfo->ei_mode != mode)
3444                                 ldlm_lock_addref(lockh, LCK_PR);
3445                         if (intent) {
3446                                 /* I would like to be able to ASSERT here that
3447                                  * rss <= kms, but I can't, for reasons which
3448                                  * are explained in lov_enqueue() */
3449                         }
3450
3451                         /* We already have a lock, and it's referenced */
3452                         (*upcall)(cookie, ELDLM_OK);
3453
3454                         if (einfo->ei_mode != mode)
3455                                 ldlm_lock_decref(lockh, LCK_PW);
3456                         else if (rqset)
3457                                 /* For async requests, decref the lock. */
3458                                 ldlm_lock_decref(lockh, einfo->ei_mode);
3459                         LDLM_LOCK_PUT(matched);
3460                         RETURN(ELDLM_OK);
3461                 } else {
3462                         ldlm_lock_decref(lockh, mode);
3463                         LDLM_LOCK_PUT(matched);
3464                 }
3465         }
3466
3467  no_match:
3468         if (intent) {
3469                 CFS_LIST_HEAD(cancels);
3470                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3471                                            &RQF_LDLM_ENQUEUE_LVB);
3472                 if (req == NULL)
3473                         RETURN(-ENOMEM);
3474
3475                 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
3476                 if (rc) {
3477                         ptlrpc_request_free(req);
3478                         RETURN(rc);
3479                 }
3480
3481                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
3482                                      sizeof *lvb);
3483                 ptlrpc_request_set_replen(req);
3484         }
3485
3486         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
3487         *flags &= ~LDLM_FL_BLOCK_GRANTED;
3488
3489         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
3490                               sizeof(*lvb), lockh, async);
3491         if (rqset) {
3492                 if (!rc) {
3493                         struct osc_enqueue_args *aa;
3494                         CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3495                         aa = ptlrpc_req_async_args(req);
3496                         aa->oa_ei = einfo;
3497                         aa->oa_exp = exp;
3498                         aa->oa_flags  = flags;
3499                         aa->oa_upcall = upcall;
3500                         aa->oa_cookie = cookie;
3501                         aa->oa_lvb    = lvb;
3502                         aa->oa_lockh  = lockh;
3503                         aa->oa_agl    = !!agl;
3504
3505                         req->rq_interpret_reply =
3506                                 (ptlrpc_interpterer_t)osc_enqueue_interpret;
3507                         if (rqset == PTLRPCD_SET)
3508                                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
3509                         else
3510                                 ptlrpc_set_add_req(rqset, req);
3511                 } else if (intent) {
3512                         ptlrpc_req_finished(req);
3513                 }
3514                 RETURN(rc);
3515         }
3516
3517         rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
3518         if (intent)
3519                 ptlrpc_req_finished(req);
3520
3521         RETURN(rc);
3522 }
3523
3524 static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
3525                        struct ldlm_enqueue_info *einfo,
3526                        struct ptlrpc_request_set *rqset)
3527 {
3528         struct ldlm_res_id res_id;
3529         int rc;
3530         ENTRY;
3531
3532         osc_build_res_name(oinfo->oi_md->lsm_object_id,
3533                            oinfo->oi_md->lsm_object_seq, &res_id);
3534
3535         rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
3536                               &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
3537                               oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid,
3538                               oinfo->oi_cb_up, oinfo, einfo, oinfo->oi_lockh,
3539                               rqset, rqset != NULL, 0);
3540         RETURN(rc);
3541 }
3542
3543 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3544                    __u32 type, ldlm_policy_data_t *policy, __u32 mode,
3545                    int *flags, void *data, struct lustre_handle *lockh,
3546                    int unref)
3547 {
3548         struct obd_device *obd = exp->exp_obd;
3549         int lflags = *flags;
3550         ldlm_mode_t rc;
3551         ENTRY;
3552
3553         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
3554                 RETURN(-EIO);
3555
3556         /* Filesystem lock extents are extended to page boundaries so that
3557          * dealing with the page cache is a little smoother */
3558         policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3559         policy->l_extent.end |= ~CFS_PAGE_MASK;
3560
3561         /* Next, search for already existing extent locks that will cover us */
3562         /* If we're trying to read, we also search for an existing PW lock.  The
3563          * VFS and page cache already protect us locally, so lots of readers/
3564          * writers can share a single PW lock. */
3565         rc = mode;
3566         if (mode == LCK_PR)
3567                 rc |= LCK_PW;
3568         rc = ldlm_lock_match(obd->obd_namespace, lflags,
3569                              res_id, type, policy, rc, lockh, unref);
3570         if (rc) {
3571                 if (data != NULL) {
3572                         if (!osc_set_data_with_check(lockh, data)) {
3573                                 if (!(lflags & LDLM_FL_TEST_LOCK))
3574                                         ldlm_lock_decref(lockh, rc);
3575                                 RETURN(0);
3576                         }
3577                 }
3578                 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
3579                         ldlm_lock_addref(lockh, LCK_PR);
3580                         ldlm_lock_decref(lockh, LCK_PW);
3581                 }
3582                 RETURN(rc);
3583         }
3584         RETURN(rc);
3585 }
3586
3587 int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
3588 {
3589         ENTRY;
3590
3591         if (unlikely(mode == LCK_GROUP))
3592                 ldlm_lock_decref_and_cancel(lockh, mode);
3593         else
3594                 ldlm_lock_decref(lockh, mode);
3595
3596         RETURN(0);
3597 }
3598
3599 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
3600                       __u32 mode, struct lustre_handle *lockh)
3601 {
3602         ENTRY;
3603         RETURN(osc_cancel_base(lockh, mode));
3604 }
3605
3606 static int osc_cancel_unused(struct obd_export *exp,
3607                              struct lov_stripe_md *lsm,
3608                              ldlm_cancel_flags_t flags,
3609                              void *opaque)
3610 {
3611         struct obd_device *obd = class_exp2obd(exp);
3612         struct ldlm_res_id res_id, *resp = NULL;
3613
3614         if (lsm != NULL) {
3615                 resp = osc_build_res_name(lsm->lsm_object_id,
3616                                           lsm->lsm_object_seq, &res_id);
3617         }
3618
3619         return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
3620 }
3621
3622 static int osc_statfs_interpret(const struct lu_env *env,
3623                                 struct ptlrpc_request *req,
3624                                 struct osc_async_args *aa, int rc)
3625 {
3626         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
3627         struct obd_statfs *msfs;
3628         __u64 used;
3629         ENTRY;
3630
3631         if (rc == -EBADR)
3632                 /* The request has in fact never been sent
3633                  * due to issues at a higher level (LOV).
3634                  * Exit immediately since the caller is
3635                  * aware of the problem and takes care
3636                  * of the clean up */
3637                  RETURN(rc);
3638
3639         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3640             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3641                 GOTO(out, rc = 0);
3642
3643         if (rc != 0)
3644                 GOTO(out, rc);
3645
3646         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3647         if (msfs == NULL) {
3648                 GOTO(out, rc = -EPROTO);
3649         }
3650
3651         /* Reinitialize the RDONLY and DEGRADED flags at the client
3652          * on each statfs, so they don't stay set permanently. */
3653         cfs_spin_lock(&cli->cl_oscc.oscc_lock);
3654
3655         if (unlikely(msfs->os_state & OS_STATE_DEGRADED))
3656                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED;
3657         else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_DEGRADED))
3658                 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_DEGRADED;
3659
3660         if (unlikely(msfs->os_state & OS_STATE_READONLY))
3661                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_RDONLY;
3662         else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_RDONLY))
3663                 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_RDONLY;
3664
3665         /* Add a bit of hysteresis so this flag isn't continually flapping,
3666          * and ensure that new files don't get extremely fragmented due to
3667          * only a small amount of available space in the filesystem.
3668          * We want to set the NOSPC flag when there is less than ~0.1% free
3669          * and clear it when there is at least ~0.2% free space, so:
3670          *                   avail < ~0.1% max          max = avail + used
3671          *            1025 * avail < avail + used       used = blocks - free
3672          *            1024 * avail < used
3673          *            1024 * avail < blocks - free
3674          *                   avail < ((blocks - free) >> 10)
3675          *
3676          * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
3677          * lose that amount of space so in those cases we report no space left
3678          * if their is less than 1 GB left.                             */
3679         used = min_t(__u64,(msfs->os_blocks - msfs->os_bfree) >> 10, 1 << 30);
3680         if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) == 0) &&
3681                      ((msfs->os_ffree < 32) || (msfs->os_bavail < used))))
3682                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC;
3683         else if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3684                           (msfs->os_ffree > 64) &&
3685                           (msfs->os_bavail > (used << 1)))) {
3686                 cli->cl_oscc.oscc_flags &= ~(OSCC_FLAG_NOSPC |
3687                                              OSCC_FLAG_NOSPC_BLK);
3688         }
3689
3690         if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3691                      (msfs->os_bavail < used)))
3692                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC_BLK;
3693
3694         cfs_spin_unlock(&cli->cl_oscc.oscc_lock);
3695
3696         *aa->aa_oi->oi_osfs = *msfs;
3697 out:
3698         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3699         RETURN(rc);
3700 }
3701
3702 static int osc_statfs_async(struct obd_device *obd, struct obd_info *oinfo,
3703                             __u64 max_age, struct ptlrpc_request_set *rqset)
3704 {
3705         struct ptlrpc_request *req;
3706         struct osc_async_args *aa;
3707         int                    rc;
3708         ENTRY;
3709
3710         /* We could possibly pass max_age in the request (as an absolute
3711          * timestamp or a "seconds.usec ago") so the target can avoid doing
3712          * extra calls into the filesystem if that isn't necessary (e.g.
3713          * during mount that would help a bit).  Having relative timestamps
3714          * is not so great if request processing is slow, while absolute
3715          * timestamps are not ideal because they need time synchronization. */
3716         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3717         if (req == NULL)
3718                 RETURN(-ENOMEM);
3719
3720         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3721         if (rc) {
3722                 ptlrpc_request_free(req);
3723                 RETURN(rc);
3724         }
3725         ptlrpc_request_set_replen(req);
3726         req->rq_request_portal = OST_CREATE_PORTAL;
3727         ptlrpc_at_set_req_timeout(req);
3728
3729         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3730                 /* procfs requests not want stat in wait for avoid deadlock */
3731                 req->rq_no_resend = 1;
3732                 req->rq_no_delay = 1;
3733         }
3734
3735         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
3736         CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3737         aa = ptlrpc_req_async_args(req);
3738         aa->aa_oi = oinfo;
3739
3740         ptlrpc_set_add_req(rqset, req);
3741         RETURN(0);
3742 }
3743
3744 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
3745                       __u64 max_age, __u32 flags)
3746 {
3747         struct obd_statfs     *msfs;
3748         struct ptlrpc_request *req;
3749         struct obd_import     *imp = NULL;
3750         int rc;
3751         ENTRY;
3752
3753         /*Since the request might also come from lprocfs, so we need
3754          *sync this with client_disconnect_export Bug15684*/
3755         cfs_down_read(&obd->u.cli.cl_sem);
3756         if (obd->u.cli.cl_import)
3757                 imp = class_import_get(obd->u.cli.cl_import);
3758         cfs_up_read(&obd->u.cli.cl_sem);
3759         if (!imp)
3760                 RETURN(-ENODEV);
3761
3762         /* We could possibly pass max_age in the request (as an absolute
3763          * timestamp or a "seconds.usec ago") so the target can avoid doing
3764          * extra calls into the filesystem if that isn't necessary (e.g.
3765          * during mount that would help a bit).  Having relative timestamps
3766          * is not so great if request processing is slow, while absolute
3767          * timestamps are not ideal because they need time synchronization. */
3768         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3769
3770         class_import_put(imp);
3771
3772         if (req == NULL)
3773                 RETURN(-ENOMEM);
3774
3775         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3776         if (rc) {
3777                 ptlrpc_request_free(req);
3778                 RETURN(rc);
3779         }
3780         ptlrpc_request_set_replen(req);
3781         req->rq_request_portal = OST_CREATE_PORTAL;
3782         ptlrpc_at_set_req_timeout(req);
3783
3784         if (flags & OBD_STATFS_NODELAY) {
3785                 /* procfs requests not want stat in wait for avoid deadlock */
3786                 req->rq_no_resend = 1;
3787                 req->rq_no_delay = 1;
3788         }
3789
3790         rc = ptlrpc_queue_wait(req);
3791         if (rc)
3792                 GOTO(out, rc);
3793
3794         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3795         if (msfs == NULL) {
3796                 GOTO(out, rc = -EPROTO);
3797         }
3798
3799         *osfs = *msfs;
3800
3801         EXIT;
3802  out:
3803         ptlrpc_req_finished(req);
3804         return rc;
3805 }
3806
3807 /* Retrieve object striping information.
3808  *
3809  * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
3810  * the maximum number of OST indices which will fit in the user buffer.
3811  * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
3812  */
3813 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
3814 {
3815         /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
3816         struct lov_user_md_v3 lum, *lumk;
3817         struct lov_user_ost_data_v1 *lmm_objects;
3818         int rc = 0, lum_size;
3819         ENTRY;
3820
3821         if (!lsm)
3822                 RETURN(-ENODATA);
3823
3824         /* we only need the header part from user space to get lmm_magic and
3825          * lmm_stripe_count, (the header part is common to v1 and v3) */
3826         lum_size = sizeof(struct lov_user_md_v1);
3827         if (cfs_copy_from_user(&lum, lump, lum_size))
3828                 RETURN(-EFAULT);
3829
3830         if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
3831             (lum.lmm_magic != LOV_USER_MAGIC_V3))
3832                 RETURN(-EINVAL);
3833
3834         /* lov_user_md_vX and lov_mds_md_vX must have the same size */
3835         LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
3836         LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
3837         LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
3838
3839         /* we can use lov_mds_md_size() to compute lum_size
3840          * because lov_user_md_vX and lov_mds_md_vX have the same size */
3841         if (lum.lmm_stripe_count > 0) {
3842                 lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
3843                 OBD_ALLOC(lumk, lum_size);
3844                 if (!lumk)
3845                         RETURN(-ENOMEM);
3846
3847                 if (lum.lmm_magic == LOV_USER_MAGIC_V1)
3848                         lmm_objects = &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
3849                 else
3850                         lmm_objects = &(lumk->lmm_objects[0]);
3851                 lmm_objects->l_object_id = lsm->lsm_object_id;
3852         } else {
3853                 lum_size = lov_mds_md_size(0, lum.lmm_magic);
3854                 lumk = &lum;
3855         }
3856
3857         lumk->lmm_object_id = lsm->lsm_object_id;
3858         lumk->lmm_object_seq = lsm->lsm_object_seq;
3859         lumk->lmm_stripe_count = 1;
3860
3861         if (cfs_copy_to_user(lump, lumk, lum_size))
3862                 rc = -EFAULT;
3863
3864         if (lumk != &lum)
3865                 OBD_FREE(lumk, lum_size);
3866
3867         RETURN(rc);
3868 }
3869
3870
3871 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3872                          void *karg, void *uarg)
3873 {
3874         struct obd_device *obd = exp->exp_obd;
3875         struct obd_ioctl_data *data = karg;
3876         int err = 0;
3877         ENTRY;
3878
3879         if (!cfs_try_module_get(THIS_MODULE)) {
3880                 CERROR("Can't get module. Is it alive?");
3881                 return -EINVAL;
3882         }
3883         switch (cmd) {
3884         case OBD_IOC_LOV_GET_CONFIG: {
3885                 char *buf;
3886                 struct lov_desc *desc;
3887                 struct obd_uuid uuid;
3888
3889                 buf = NULL;
3890                 len = 0;
3891                 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
3892                         GOTO(out, err = -EINVAL);
3893
3894                 data = (struct obd_ioctl_data *)buf;
3895
3896                 if (sizeof(*desc) > data->ioc_inllen1) {
3897                         obd_ioctl_freedata(buf, len);
3898                         GOTO(out, err = -EINVAL);
3899                 }
3900
3901                 if (data->ioc_inllen2 < sizeof(uuid)) {
3902                         obd_ioctl_freedata(buf, len);
3903                         GOTO(out, err = -EINVAL);
3904                 }
3905
3906                 desc = (struct lov_desc *)data->ioc_inlbuf1;
3907                 desc->ld_tgt_count = 1;
3908                 desc->ld_active_tgt_count = 1;
3909                 desc->ld_default_stripe_count = 1;
3910                 desc->ld_default_stripe_size = 0;
3911                 desc->ld_default_stripe_offset = 0;
3912                 desc->ld_pattern = 0;
3913                 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
3914
3915                 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
3916
3917                 err = cfs_copy_to_user((void *)uarg, buf, len);
3918                 if (err)
3919                         err = -EFAULT;
3920                 obd_ioctl_freedata(buf, len);
3921                 GOTO(out, err);
3922         }
3923         case LL_IOC_LOV_SETSTRIPE:
3924                 err = obd_alloc_memmd(exp, karg);
3925                 if (err > 0)
3926                         err = 0;
3927                 GOTO(out, err);
3928         case LL_IOC_LOV_GETSTRIPE:
3929                 err = osc_getstripe(karg, uarg);
3930                 GOTO(out, err);
3931         case OBD_IOC_CLIENT_RECOVER:
3932                 err = ptlrpc_recover_import(obd->u.cli.cl_import,
3933                                             data->ioc_inlbuf1, 0);
3934                 if (err > 0)
3935                         err = 0;
3936                 GOTO(out, err);
3937         case IOC_OSC_SET_ACTIVE:
3938                 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
3939                                                data->ioc_offset);
3940                 GOTO(out, err);
3941         case OBD_IOC_POLL_QUOTACHECK:
3942                 err = osc_quota_poll_check(exp, (struct if_quotacheck *)karg);
3943                 GOTO(out, err);
3944         case OBD_IOC_PING_TARGET:
3945                 err = ptlrpc_obd_ping(obd);
3946                 GOTO(out, err);
3947         default:
3948                 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
3949                        cmd, cfs_curproc_comm());
3950                 GOTO(out, err = -ENOTTY);
3951         }
3952 out:
3953         cfs_module_put(THIS_MODULE);
3954         return err;
3955 }
3956
3957 static int osc_get_info(struct obd_export *exp, obd_count keylen,
3958                         void *key, __u32 *vallen, void *val,
3959                         struct lov_stripe_md *lsm)
3960 {
3961         ENTRY;
3962         if (!vallen || !val)
3963                 RETURN(-EFAULT);
3964
3965         if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
3966                 __u32 *stripe = val;
3967                 *vallen = sizeof(*stripe);
3968                 *stripe = 0;
3969                 RETURN(0);
3970         } else if (KEY_IS(KEY_LAST_ID)) {
3971                 struct ptlrpc_request *req;
3972                 obd_id                *reply;
3973                 char                  *tmp;
3974                 int                    rc;
3975
3976                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3977                                            &RQF_OST_GET_INFO_LAST_ID);
3978                 if (req == NULL)
3979                         RETURN(-ENOMEM);
3980
3981                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3982                                      RCL_CLIENT, keylen);
3983                 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3984                 if (rc) {
3985                         ptlrpc_request_free(req);
3986                         RETURN(rc);
3987                 }
3988
3989                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3990                 memcpy(tmp, key, keylen);
3991
3992                 req->rq_no_delay = req->rq_no_resend = 1;
3993                 ptlrpc_request_set_replen(req);
3994                 rc = ptlrpc_queue_wait(req);
3995                 if (rc)
3996                         GOTO(out, rc);
3997
3998                 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
3999                 if (reply == NULL)
4000                         GOTO(out, rc = -EPROTO);
4001
4002                 *((obd_id *)val) = *reply;
4003         out:
4004                 ptlrpc_req_finished(req);
4005                 RETURN(rc);
4006         } else if (KEY_IS(KEY_FIEMAP)) {
4007                 struct ptlrpc_request *req;
4008                 struct ll_user_fiemap *reply;
4009                 char *tmp;
4010                 int rc;
4011
4012                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
4013                                            &RQF_OST_GET_INFO_FIEMAP);
4014                 if (req == NULL)
4015                         RETURN(-ENOMEM);
4016
4017                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
4018                                      RCL_CLIENT, keylen);
4019                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
4020                                      RCL_CLIENT, *vallen);
4021                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
4022                                      RCL_SERVER, *vallen);
4023
4024                 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
4025                 if (rc) {
4026                         ptlrpc_request_free(req);
4027                         RETURN(rc);
4028                 }
4029
4030                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
4031                 memcpy(tmp, key, keylen);
4032                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
4033                 memcpy(tmp, val, *vallen);
4034
4035                 ptlrpc_request_set_replen(req);
4036                 rc = ptlrpc_queue_wait(req);
4037                 if (rc)
4038                         GOTO(out1, rc);
4039
4040                 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
4041                 if (reply == NULL)
4042                         GOTO(out1, rc = -EPROTO);
4043
4044                 memcpy(val, reply, *vallen);
4045         out1:
4046                 ptlrpc_req_finished(req);
4047
4048                 RETURN(rc);
4049         }
4050
4051         RETURN(-EINVAL);
4052 }
4053
4054 static int osc_setinfo_mds_connect_import(struct obd_import *imp)
4055 {
4056         struct llog_ctxt *ctxt;
4057         int rc = 0;
4058         ENTRY;
4059
4060         ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT);
4061         if (ctxt) {
4062                 rc = llog_initiator_connect(ctxt);
4063                 llog_ctxt_put(ctxt);
4064         } else {
4065                 /* XXX return an error? skip setting below flags? */
4066         }
4067
4068         cfs_spin_lock(&imp->imp_lock);
4069         imp->imp_server_timeout = 1;
4070         imp->imp_pingable = 1;
4071         cfs_spin_unlock(&imp->imp_lock);
4072         CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
4073
4074         RETURN(rc);
4075 }
4076
4077 static int osc_setinfo_mds_conn_interpret(const struct lu_env *env,
4078                                           struct ptlrpc_request *req,
4079                                           void *aa, int rc)
4080 {
4081         ENTRY;
4082         if (rc != 0)
4083                 RETURN(rc);
4084
4085         RETURN(osc_setinfo_mds_connect_import(req->rq_import));
4086 }
4087
4088 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
4089                               void *key, obd_count vallen, void *val,
4090                               struct ptlrpc_request_set *set)
4091 {
4092         struct ptlrpc_request *req;
4093         struct obd_device     *obd = exp->exp_obd;
4094         struct obd_import     *imp = class_exp2cliimp(exp);
4095         char                  *tmp;
4096         int                    rc;
4097         ENTRY;
4098
4099         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
4100
4101         if (KEY_IS(KEY_NEXT_ID)) {
4102                 obd_id new_val;
4103                 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4104
4105                 if (vallen != sizeof(obd_id))
4106                         RETURN(-ERANGE);
4107                 if (val == NULL)
4108                         RETURN(-EINVAL);
4109
4110                 if (vallen != sizeof(obd_id))
4111                         RETURN(-EINVAL);
4112
4113                 /* avoid race between allocate new object and set next id
4114                  * from ll_sync thread */
4115                 cfs_spin_lock(&oscc->oscc_lock);
4116                 new_val = *((obd_id*)val) + 1;
4117                 if (new_val > oscc->oscc_next_id)
4118                         oscc->oscc_next_id = new_val;
4119                 cfs_spin_unlock(&oscc->oscc_lock);
4120                 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
4121                        exp->exp_obd->obd_name,
4122                        obd->u.cli.cl_oscc.oscc_next_id);
4123
4124                 RETURN(0);
4125         }
4126
4127         if (KEY_IS(KEY_CHECKSUM)) {
4128                 if (vallen != sizeof(int))
4129                         RETURN(-EINVAL);
4130                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
4131                 RETURN(0);
4132         }
4133
4134         if (KEY_IS(KEY_SPTLRPC_CONF)) {
4135                 sptlrpc_conf_client_adapt(obd);
4136                 RETURN(0);
4137         }
4138
4139         if (KEY_IS(KEY_FLUSH_CTX)) {
4140                 sptlrpc_import_flush_my_ctx(imp);
4141                 RETURN(0);
4142         }
4143
4144         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
4145                 RETURN(-EINVAL);
4146
4147         /* We pass all other commands directly to OST. Since nobody calls osc
4148            methods directly and everybody is supposed to go through LOV, we
4149            assume lov checked invalid values for us.
4150            The only recognised values so far are evict_by_nid and mds_conn.
4151            Even if something bad goes through, we'd get a -EINVAL from OST
4152            anyway. */
4153
4154         if (KEY_IS(KEY_GRANT_SHRINK))
4155                 req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO);
4156         else
4157                 req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
4158
4159         if (req == NULL)
4160                 RETURN(-ENOMEM);
4161
4162         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
4163                              RCL_CLIENT, keylen);
4164         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
4165                              RCL_CLIENT, vallen);
4166         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
4167         if (rc) {
4168                 ptlrpc_request_free(req);
4169                 RETURN(rc);
4170         }
4171
4172         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
4173         memcpy(tmp, key, keylen);
4174         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
4175         memcpy(tmp, val, vallen);
4176
4177         if (KEY_IS(KEY_MDS_CONN)) {
4178                 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4179
4180                 oscc->oscc_oa.o_seq = (*(__u32 *)val);
4181                 oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
4182                 LASSERT_SEQ_IS_MDT(oscc->oscc_oa.o_seq);
4183                 req->rq_no_delay = req->rq_no_resend = 1;
4184                 req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
4185         } else if (KEY_IS(KEY_GRANT_SHRINK)) {
4186                 struct osc_grant_args *aa;
4187                 struct obdo *oa;
4188
4189                 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
4190                 aa = ptlrpc_req_async_args(req);
4191                 OBDO_ALLOC(oa);
4192                 if (!oa) {
4193                         ptlrpc_req_finished(req);
4194                         RETURN(-ENOMEM);
4195                 }
4196                 *oa = ((struct ost_body *)val)->oa;
4197                 aa->aa_oa = oa;
4198                 req->rq_interpret_reply = osc_shrink_grant_interpret;
4199         }
4200
4201         ptlrpc_request_set_replen(req);
4202         if (!KEY_IS(KEY_GRANT_SHRINK)) {
4203                 LASSERT(set != NULL);
4204                 ptlrpc_set_add_req(set, req);
4205                 ptlrpc_check_set(NULL, set);
4206         } else
4207                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
4208
4209         RETURN(0);
4210 }
4211
4212
4213 static struct llog_operations osc_size_repl_logops = {
4214         lop_cancel: llog_obd_repl_cancel
4215 };
4216
4217 static struct llog_operations osc_mds_ost_orig_logops;
4218
4219 static int __osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4220                            struct obd_device *tgt, struct llog_catid *catid)
4221 {
4222         int rc;
4223         ENTRY;
4224
4225         rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, 1,
4226                         &catid->lci_logid, &osc_mds_ost_orig_logops);
4227         if (rc) {
4228                 CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n");
4229                 GOTO(out, rc);
4230         }
4231
4232         rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, 1,
4233                         NULL, &osc_size_repl_logops);
4234         if (rc) {
4235                 struct llog_ctxt *ctxt =
4236                         llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4237                 if (ctxt)
4238                         llog_cleanup(ctxt);
4239                 CERROR("failed LLOG_SIZE_REPL_CTXT\n");
4240         }
4241         GOTO(out, rc);
4242 out:
4243         if (rc) {
4244                 CERROR("osc '%s' tgt '%s' catid %p rc=%d\n",
4245                        obd->obd_name, tgt->obd_name, catid, rc);
4246                 CERROR("logid "LPX64":0x%x\n",
4247                        catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
4248         }
4249         return rc;
4250 }
4251
4252 static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4253                          struct obd_device *disk_obd, int *index)
4254 {
4255         struct llog_catid catid;
4256         static char name[32] = CATLIST;
4257         int rc;
4258         ENTRY;
4259
4260         LASSERT(olg == &obd->obd_olg);
4261
4262         cfs_mutex_lock(&olg->olg_cat_processing);
4263         rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid);
4264         if (rc) {
4265                 CERROR("rc: %d\n", rc);
4266                 GOTO(out, rc);
4267         }
4268
4269         CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
4270                obd->obd_name, *index, catid.lci_logid.lgl_oid,
4271                catid.lci_logid.lgl_oseq, catid.lci_logid.lgl_ogen);
4272
4273         rc = __osc_llog_init(obd, olg, disk_obd, &catid);
4274         if (rc) {
4275                 CERROR("rc: %d\n", rc);
4276                 GOTO(out, rc);
4277         }
4278
4279         rc = llog_put_cat_list(disk_obd, name, *index, 1, &catid);
4280         if (rc) {
4281                 CERROR("rc: %d\n", rc);
4282                 GOTO(out, rc);
4283         }
4284
4285  out:
4286         cfs_mutex_unlock(&olg->olg_cat_processing);
4287
4288         return rc;
4289 }
4290
4291 static int osc_llog_finish(struct obd_device *obd, int count)
4292 {
4293         struct llog_ctxt *ctxt;
4294         int rc = 0, rc2 = 0;
4295         ENTRY;
4296
4297         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4298         if (ctxt)
4299                 rc = llog_cleanup(ctxt);
4300
4301         ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4302         if (ctxt)
4303                 rc2 = llog_cleanup(ctxt);
4304         if (!rc)
4305                 rc = rc2;
4306
4307         RETURN(rc);
4308 }
4309
4310 static int osc_reconnect(const struct lu_env *env,
4311                          struct obd_export *exp, struct obd_device *obd,
4312                          struct obd_uuid *cluuid,
4313                          struct obd_connect_data *data,
4314                          void *localdata)
4315 {
4316         struct client_obd *cli = &obd->u.cli;
4317
4318         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
4319                 long lost_grant;
4320
4321                 client_obd_list_lock(&cli->cl_loi_list_lock);
4322                 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
4323                                 2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
4324                 lost_grant = cli->cl_lost_grant;
4325                 cli->cl_lost_grant = 0;
4326                 client_obd_list_unlock(&cli->cl_loi_list_lock);
4327
4328                 CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld "
4329                        "cl_dirty: %ld cl_lost_grant: %ld\n", data->ocd_grant,
4330                        cli->cl_avail_grant, cli->cl_dirty, lost_grant);
4331                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
4332                        " ocd_grant: %d\n", data->ocd_connect_flags,
4333                        data->ocd_version, data->ocd_grant);
4334         }
4335
4336         RETURN(0);
4337 }
4338
4339 static int osc_disconnect(struct obd_export *exp)
4340 {
4341         struct obd_device *obd = class_exp2obd(exp);
4342         struct llog_ctxt  *ctxt;
4343         int rc;
4344
4345         ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4346         if (ctxt) {
4347                 if (obd->u.cli.cl_conn_count == 1) {
4348                         /* Flush any remaining cancel messages out to the
4349                          * target */
4350                         llog_sync(ctxt, exp);
4351                 }
4352                 llog_ctxt_put(ctxt);
4353         } else {
4354                 CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n",
4355                        obd);
4356         }
4357
4358         rc = client_disconnect_export(exp);
4359         /**
4360          * Initially we put del_shrink_grant before disconnect_export, but it
4361          * causes the following problem if setup (connect) and cleanup
4362          * (disconnect) are tangled together.
4363          *      connect p1                     disconnect p2
4364          *   ptlrpc_connect_import
4365          *     ...............               class_manual_cleanup
4366          *                                     osc_disconnect
4367          *                                     del_shrink_grant
4368          *   ptlrpc_connect_interrupt
4369          *     init_grant_shrink
4370          *   add this client to shrink list
4371          *                                      cleanup_osc
4372          * Bang! pinger trigger the shrink.
4373          * So the osc should be disconnected from the shrink list, after we
4374          * are sure the import has been destroyed. BUG18662
4375          */
4376         if (obd->u.cli.cl_import == NULL)
4377                 osc_del_shrink_grant(&obd->u.cli);
4378         return rc;
4379 }
4380
4381 static int osc_import_event(struct obd_device *obd,
4382                             struct obd_import *imp,
4383                             enum obd_import_event event)
4384 {
4385         struct client_obd *cli;
4386         int rc = 0;
4387
4388         ENTRY;
4389         LASSERT(imp->imp_obd == obd);
4390
4391         switch (event) {
4392         case IMP_EVENT_DISCON: {
4393                 /* Only do this on the MDS OSC's */
4394                 if (imp->imp_server_timeout) {
4395                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4396
4397                         cfs_spin_lock(&oscc->oscc_lock);
4398                         oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
4399                         cfs_spin_unlock(&oscc->oscc_lock);
4400                 }
4401                 cli = &obd->u.cli;
4402                 client_obd_list_lock(&cli->cl_loi_list_lock);
4403                 cli->cl_avail_grant = 0;
4404                 cli->cl_lost_grant = 0;
4405                 client_obd_list_unlock(&cli->cl_loi_list_lock);
4406                 break;
4407         }
4408         case IMP_EVENT_INACTIVE: {
4409                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
4410                 break;
4411         }
4412         case IMP_EVENT_INVALIDATE: {
4413                 struct ldlm_namespace *ns = obd->obd_namespace;
4414                 struct lu_env         *env;
4415                 int                    refcheck;
4416
4417                 env = cl_env_get(&refcheck);
4418                 if (!IS_ERR(env)) {
4419                         /* Reset grants */
4420                         cli = &obd->u.cli;
4421                         client_obd_list_lock(&cli->cl_loi_list_lock);
4422                         /* all pages go to failing rpcs due to the invalid
4423                          * import */
4424                         osc_check_rpcs(env, cli);
4425                         client_obd_list_unlock(&cli->cl_loi_list_lock);
4426
4427                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
4428                         cl_env_put(env, &refcheck);
4429                 } else
4430                         rc = PTR_ERR(env);
4431                 break;
4432         }
4433         case IMP_EVENT_ACTIVE: {
4434                 /* Only do this on the MDS OSC's */
4435                 if (imp->imp_server_timeout) {
4436                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4437
4438                         cfs_spin_lock(&oscc->oscc_lock);
4439                         oscc->oscc_flags &= ~(OSCC_FLAG_NOSPC |
4440                                               OSCC_FLAG_NOSPC_BLK);
4441                         cfs_spin_unlock(&oscc->oscc_lock);
4442                 }
4443                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
4444                 break;
4445         }
4446         case IMP_EVENT_OCD: {
4447                 struct obd_connect_data *ocd = &imp->imp_connect_data;
4448
4449                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
4450                         osc_init_grant(&obd->u.cli, ocd);
4451
4452                 /* See bug 7198 */
4453                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
4454                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
4455
4456                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
4457                 break;
4458         }
4459         case IMP_EVENT_DEACTIVATE: {
4460                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
4461                 break;
4462         }
4463         case IMP_EVENT_ACTIVATE: {
4464                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
4465                 break;
4466         }
4467         default:
4468                 CERROR("Unknown import event %d\n", event);
4469                 LBUG();
4470         }
4471         RETURN(rc);
4472 }
4473
4474 /**
4475  * Determine whether the lock can be canceled before replaying the lock
4476  * during recovery, see bug16774 for detailed information.
4477  *
4478  * \retval zero the lock can't be canceled
4479  * \retval other ok to cancel
4480  */
4481 static int osc_cancel_for_recovery(struct ldlm_lock *lock)
4482 {
4483         check_res_locked(lock->l_resource);
4484
4485         /*
4486          * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
4487          *
4488          * XXX as a future improvement, we can also cancel unused write lock
4489          * if it doesn't have dirty data and active mmaps.
4490          */
4491         if (lock->l_resource->lr_type == LDLM_EXTENT &&
4492             (lock->l_granted_mode == LCK_PR ||
4493              lock->l_granted_mode == LCK_CR) &&
4494             (osc_dlm_lock_pageref(lock) == 0))
4495                 RETURN(1);
4496
4497         RETURN(0);
4498 }
4499
4500 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
4501 {
4502         struct client_obd *cli = &obd->u.cli;
4503         int rc;
4504         ENTRY;
4505
4506         ENTRY;
4507         rc = ptlrpcd_addref();
4508         if (rc)
4509                 RETURN(rc);
4510
4511         rc = client_obd_setup(obd, lcfg);
4512         if (rc == 0) {
4513                 void *handler;
4514                 handler = ptlrpcd_alloc_work(cli->cl_import,
4515                                              brw_queue_work, cli);
4516                 if (!IS_ERR(handler))
4517                         cli->cl_writeback_work = handler;
4518                 else
4519                         rc = PTR_ERR(handler);
4520         }
4521
4522         if (rc == 0) {
4523                 struct lprocfs_static_vars lvars = { 0 };
4524
4525                 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
4526                 lprocfs_osc_init_vars(&lvars);
4527                 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) {
4528                         lproc_osc_attach_seqstat(obd);
4529                         sptlrpc_lprocfs_cliobd_attach(obd);
4530                         ptlrpc_lprocfs_register_obd(obd);
4531                 }
4532
4533                 oscc_init(obd);
4534                 /* We need to allocate a few requests more, because
4535                    brw_interpret tries to create new requests before freeing
4536                    previous ones. Ideally we want to have 2x max_rpcs_in_flight
4537                    reserved, but I afraid that might be too much wasted RAM
4538                    in fact, so 2 is just my guess and still should work. */
4539                 cli->cl_import->imp_rq_pool =
4540                         ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
4541                                             OST_MAXREQSIZE,
4542                                             ptlrpc_add_rqs_to_pool);
4543
4544                 CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
4545
4546                 ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
4547         }
4548
4549         if (rc)
4550                 ptlrpcd_decref();
4551         RETURN(rc);
4552 }
4553
4554 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
4555 {
4556         int rc = 0;
4557         ENTRY;
4558
4559         switch (stage) {
4560         case OBD_CLEANUP_EARLY: {
4561                 struct obd_import *imp;
4562                 imp = obd->u.cli.cl_import;
4563                 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
4564                 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
4565                 ptlrpc_deactivate_import(imp);
4566                 cfs_spin_lock(&imp->imp_lock);
4567                 imp->imp_pingable = 0;
4568                 cfs_spin_unlock(&imp->imp_lock);
4569                 break;
4570         }
4571         case OBD_CLEANUP_EXPORTS: {
4572                 struct client_obd *cli = &obd->u.cli;
4573                 /* LU-464
4574                  * for echo client, export may be on zombie list, wait for
4575                  * zombie thread to cull it, because cli.cl_import will be
4576                  * cleared in client_disconnect_export():
4577                  *   class_export_destroy() -> obd_cleanup() ->
4578                  *   echo_device_free() -> echo_client_cleanup() ->
4579                  *   obd_disconnect() -> osc_disconnect() ->
4580                  *   client_disconnect_export()
4581                  */
4582                 obd_zombie_barrier();
4583                 if (cli->cl_writeback_work) {
4584                         ptlrpcd_destroy_work(cli->cl_writeback_work);
4585                         cli->cl_writeback_work = NULL;
4586                 }
4587                 obd_cleanup_client_import(obd);
4588                 ptlrpc_lprocfs_unregister_obd(obd);
4589                 lprocfs_obd_cleanup(obd);
4590                 rc = obd_llog_finish(obd, 0);
4591                 if (rc != 0)
4592                         CERROR("failed to cleanup llogging subsystems\n");
4593                 break;
4594                 }
4595         }
4596         RETURN(rc);
4597 }
4598
4599 int osc_cleanup(struct obd_device *obd)
4600 {
4601         int rc;
4602
4603         ENTRY;
4604
4605         /* free memory of osc quota cache */
4606         osc_quota_cleanup(obd);
4607
4608         rc = client_obd_cleanup(obd);
4609
4610         ptlrpcd_decref();
4611         RETURN(rc);
4612 }
4613
4614 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
4615 {
4616         struct lprocfs_static_vars lvars = { 0 };
4617         int rc = 0;
4618
4619         lprocfs_osc_init_vars(&lvars);
4620
4621         switch (lcfg->lcfg_command) {
4622         default:
4623                 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
4624                                               lcfg, obd);
4625                 if (rc > 0)
4626                         rc = 0;
4627                 break;
4628         }
4629
4630         return(rc);
4631 }
4632
4633 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
4634 {
4635         return osc_process_config_base(obd, buf);
4636 }
4637
4638 struct obd_ops osc_obd_ops = {
4639         .o_owner                = THIS_MODULE,
4640         .o_setup                = osc_setup,
4641         .o_precleanup           = osc_precleanup,
4642         .o_cleanup              = osc_cleanup,
4643         .o_add_conn             = client_import_add_conn,
4644         .o_del_conn             = client_import_del_conn,
4645         .o_connect              = client_connect_import,
4646         .o_reconnect            = osc_reconnect,
4647         .o_disconnect           = osc_disconnect,
4648         .o_statfs               = osc_statfs,
4649         .o_statfs_async         = osc_statfs_async,
4650         .o_packmd               = osc_packmd,
4651         .o_unpackmd             = osc_unpackmd,
4652         .o_precreate            = osc_precreate,
4653         .o_create               = osc_create,
4654         .o_create_async         = osc_create_async,
4655         .o_destroy              = osc_destroy,
4656         .o_getattr              = osc_getattr,
4657         .o_getattr_async        = osc_getattr_async,
4658         .o_setattr              = osc_setattr,
4659         .o_setattr_async        = osc_setattr_async,
4660         .o_brw                  = osc_brw,
4661         .o_punch                = osc_punch,
4662         .o_sync                 = osc_sync,
4663         .o_enqueue              = osc_enqueue,
4664         .o_change_cbdata        = osc_change_cbdata,
4665         .o_find_cbdata          = osc_find_cbdata,
4666         .o_cancel               = osc_cancel,
4667         .o_cancel_unused        = osc_cancel_unused,
4668         .o_iocontrol            = osc_iocontrol,
4669         .o_get_info             = osc_get_info,
4670         .o_set_info_async       = osc_set_info_async,
4671         .o_import_event         = osc_import_event,
4672         .o_llog_init            = osc_llog_init,
4673         .o_llog_finish          = osc_llog_finish,
4674         .o_process_config       = osc_process_config,
4675         .o_quotactl             = osc_quotactl,
4676         .o_quotacheck           = osc_quotacheck,
4677         .o_quota_adjust_qunit   = osc_quota_adjust_qunit,
4678 };
4679
4680 extern struct lu_kmem_descr osc_caches[];
4681 extern cfs_spinlock_t       osc_ast_guard;
4682 extern cfs_lock_class_key_t osc_ast_guard_class;
4683
4684 int __init osc_init(void)
4685 {
4686         struct lprocfs_static_vars lvars = { 0 };
4687         int rc;
4688         ENTRY;
4689
4690         /* print an address of _any_ initialized kernel symbol from this
4691          * module, to allow debugging with gdb that doesn't support data
4692          * symbols from modules.*/
4693         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
4694
4695         rc = lu_kmem_init(osc_caches);
4696
4697         lprocfs_osc_init_vars(&lvars);
4698
4699         osc_quota_init();
4700         rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
4701                                  LUSTRE_OSC_NAME, &osc_device_type);
4702         if (rc) {
4703                 lu_kmem_fini(osc_caches);
4704                 RETURN(rc);
4705         }
4706
4707         cfs_spin_lock_init(&osc_ast_guard);
4708         cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
4709
4710         osc_mds_ost_orig_logops = llog_lvfs_ops;
4711         osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
4712         osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
4713         osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add;
4714         osc_mds_ost_orig_logops.lop_connect = llog_origin_connect;
4715
4716         RETURN(rc);
4717 }
4718
4719 #ifdef __KERNEL__
4720 static void /*__exit*/ osc_exit(void)
4721 {
4722         lu_device_type_fini(&osc_device_type);
4723
4724         osc_quota_exit();
4725         class_unregister_type(LUSTRE_OSC_NAME);
4726         lu_kmem_fini(osc_caches);
4727 }
4728
4729 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4730 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
4731 MODULE_LICENSE("GPL");
4732
4733 cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);
4734 #endif