Whamcloud - gitweb
6d87a0a61ce12e1eb798c43b73a503cb67f1a3a8
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, 2012, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  */
38
39 #ifndef EXPORT_SYMTAB
40 # define EXPORT_SYMTAB
41 #endif
42 #define DEBUG_SUBSYSTEM S_OSC
43
44 #include <libcfs/libcfs.h>
45
46 #ifndef __KERNEL__
47 # include <liblustre.h>
48 #endif
49
50 #include <lustre_dlm.h>
51 #include <lustre_net.h>
52 #include <lustre/lustre_user.h>
53 #include <obd_cksum.h>
54 #include <obd_ost.h>
55 #include <obd_lov.h>
56
57 #ifdef  __CYGWIN__
58 # include <ctype.h>
59 #endif
60
61 #include <lustre_ha.h>
62 #include <lprocfs_status.h>
63 #include <lustre_log.h>
64 #include <lustre_debug.h>
65 #include <lustre_param.h>
66 #include "osc_internal.h"
67
68 static void osc_release_ppga(struct brw_page **ppga, obd_count count);
69 static int brw_interpret(const struct lu_env *env,
70                          struct ptlrpc_request *req, void *data, int rc);
71 static void osc_check_rpcs0(const struct lu_env *env, struct client_obd *cli,
72                             int ptlrpc);
73 int osc_cleanup(struct obd_device *obd);
74
75 /* Pack OSC object metadata for disk storage (LE byte order). */
76 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
77                       struct lov_stripe_md *lsm)
78 {
79         int lmm_size;
80         ENTRY;
81
82         lmm_size = sizeof(**lmmp);
83         if (!lmmp)
84                 RETURN(lmm_size);
85
86         if (*lmmp && !lsm) {
87                 OBD_FREE(*lmmp, lmm_size);
88                 *lmmp = NULL;
89                 RETURN(0);
90         }
91
92         if (!*lmmp) {
93                 OBD_ALLOC(*lmmp, lmm_size);
94                 if (!*lmmp)
95                         RETURN(-ENOMEM);
96         }
97
98         if (lsm) {
99                 LASSERT(lsm->lsm_object_id);
100                 LASSERT_SEQ_IS_MDT(lsm->lsm_object_seq);
101                 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
102                 (*lmmp)->lmm_object_seq = cpu_to_le64(lsm->lsm_object_seq);
103         }
104
105         RETURN(lmm_size);
106 }
107
108 /* Unpack OSC object metadata from disk storage (LE byte order). */
109 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
110                         struct lov_mds_md *lmm, int lmm_bytes)
111 {
112         int lsm_size;
113         struct obd_import *imp = class_exp2cliimp(exp);
114         ENTRY;
115
116         if (lmm != NULL) {
117                 if (lmm_bytes < sizeof (*lmm)) {
118                         CERROR("lov_mds_md too small: %d, need %d\n",
119                                lmm_bytes, (int)sizeof(*lmm));
120                         RETURN(-EINVAL);
121                 }
122                 /* XXX LOV_MAGIC etc check? */
123
124                 if (lmm->lmm_object_id == 0) {
125                         CERROR("lov_mds_md: zero lmm_object_id\n");
126                         RETURN(-EINVAL);
127                 }
128         }
129
130         lsm_size = lov_stripe_md_size(1);
131         if (lsmp == NULL)
132                 RETURN(lsm_size);
133
134         if (*lsmp != NULL && lmm == NULL) {
135                 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
136                 OBD_FREE(*lsmp, lsm_size);
137                 *lsmp = NULL;
138                 RETURN(0);
139         }
140
141         if (*lsmp == NULL) {
142                 OBD_ALLOC(*lsmp, lsm_size);
143                 if (*lsmp == NULL)
144                         RETURN(-ENOMEM);
145                 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
146                 if ((*lsmp)->lsm_oinfo[0] == NULL) {
147                         OBD_FREE(*lsmp, lsm_size);
148                         RETURN(-ENOMEM);
149                 }
150                 loi_init((*lsmp)->lsm_oinfo[0]);
151         }
152
153         if (lmm != NULL) {
154                 /* XXX zero *lsmp? */
155                 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
156                 (*lsmp)->lsm_object_seq = le64_to_cpu (lmm->lmm_object_seq);
157                 LASSERT((*lsmp)->lsm_object_id);
158                 LASSERT_SEQ_IS_MDT((*lsmp)->lsm_object_seq);
159         }
160
161         if (imp != NULL &&
162             (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
163                 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
164         else
165                 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
166
167         RETURN(lsm_size);
168 }
169
170 static inline void osc_pack_capa(struct ptlrpc_request *req,
171                                  struct ost_body *body, void *capa)
172 {
173         struct obd_capa *oc = (struct obd_capa *)capa;
174         struct lustre_capa *c;
175
176         if (!capa)
177                 return;
178
179         c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
180         LASSERT(c);
181         capa_cpy(c, oc);
182         body->oa.o_valid |= OBD_MD_FLOSSCAPA;
183         DEBUG_CAPA(D_SEC, c, "pack");
184 }
185
186 static inline void osc_pack_req_body(struct ptlrpc_request *req,
187                                      struct obd_info *oinfo)
188 {
189         struct ost_body *body;
190
191         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
192         LASSERT(body);
193
194         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
195         osc_pack_capa(req, body, oinfo->oi_capa);
196 }
197
198 static inline void osc_set_capa_size(struct ptlrpc_request *req,
199                                      const struct req_msg_field *field,
200                                      struct obd_capa *oc)
201 {
202         if (oc == NULL)
203                 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
204         else
205                 /* it is already calculated as sizeof struct obd_capa */
206                 ;
207 }
208
209 static int osc_getattr_interpret(const struct lu_env *env,
210                                  struct ptlrpc_request *req,
211                                  struct osc_async_args *aa, int rc)
212 {
213         struct ost_body *body;
214         ENTRY;
215
216         if (rc != 0)
217                 GOTO(out, rc);
218
219         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
220         if (body) {
221                 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
222                 lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa);
223
224                 /* This should really be sent by the OST */
225                 aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
226                 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
227         } else {
228                 CDEBUG(D_INFO, "can't unpack ost_body\n");
229                 rc = -EPROTO;
230                 aa->aa_oi->oi_oa->o_valid = 0;
231         }
232 out:
233         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
234         RETURN(rc);
235 }
236
237 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
238                              struct ptlrpc_request_set *set)
239 {
240         struct ptlrpc_request *req;
241         struct osc_async_args *aa;
242         int                    rc;
243         ENTRY;
244
245         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
246         if (req == NULL)
247                 RETURN(-ENOMEM);
248
249         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
250         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
251         if (rc) {
252                 ptlrpc_request_free(req);
253                 RETURN(rc);
254         }
255
256         osc_pack_req_body(req, oinfo);
257
258         ptlrpc_request_set_replen(req);
259         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
260
261         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
262         aa = ptlrpc_req_async_args(req);
263         aa->aa_oi = oinfo;
264
265         ptlrpc_set_add_req(set, req);
266         RETURN(0);
267 }
268
269 static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo)
270 {
271         struct ptlrpc_request *req;
272         struct ost_body       *body;
273         int                    rc;
274         ENTRY;
275
276         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
277         if (req == NULL)
278                 RETURN(-ENOMEM);
279
280         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
281         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
282         if (rc) {
283                 ptlrpc_request_free(req);
284                 RETURN(rc);
285         }
286
287         osc_pack_req_body(req, oinfo);
288
289         ptlrpc_request_set_replen(req);
290
291         rc = ptlrpc_queue_wait(req);
292         if (rc)
293                 GOTO(out, rc);
294
295         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
296         if (body == NULL)
297                 GOTO(out, rc = -EPROTO);
298
299         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
300         lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
301
302         /* This should really be sent by the OST */
303         oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
304         oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
305
306         EXIT;
307  out:
308         ptlrpc_req_finished(req);
309         return rc;
310 }
311
312 static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo,
313                        struct obd_trans_info *oti)
314 {
315         struct ptlrpc_request *req;
316         struct ost_body       *body;
317         int                    rc;
318         ENTRY;
319
320         LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
321
322         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
323         if (req == NULL)
324                 RETURN(-ENOMEM);
325
326         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
327         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
328         if (rc) {
329                 ptlrpc_request_free(req);
330                 RETURN(rc);
331         }
332
333         osc_pack_req_body(req, oinfo);
334
335         ptlrpc_request_set_replen(req);
336
337         rc = ptlrpc_queue_wait(req);
338         if (rc)
339                 GOTO(out, rc);
340
341         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
342         if (body == NULL)
343                 GOTO(out, rc = -EPROTO);
344
345         lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
346
347         EXIT;
348 out:
349         ptlrpc_req_finished(req);
350         RETURN(rc);
351 }
352
353 static int osc_setattr_interpret(const struct lu_env *env,
354                                  struct ptlrpc_request *req,
355                                  struct osc_setattr_args *sa, int rc)
356 {
357         struct ost_body *body;
358         ENTRY;
359
360         if (rc != 0)
361                 GOTO(out, rc);
362
363         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
364         if (body == NULL)
365                 GOTO(out, rc = -EPROTO);
366
367         lustre_get_wire_obdo(sa->sa_oa, &body->oa);
368 out:
369         rc = sa->sa_upcall(sa->sa_cookie, rc);
370         RETURN(rc);
371 }
372
373 int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
374                            struct obd_trans_info *oti,
375                            obd_enqueue_update_f upcall, void *cookie,
376                            struct ptlrpc_request_set *rqset)
377 {
378         struct ptlrpc_request   *req;
379         struct osc_setattr_args *sa;
380         int                      rc;
381         ENTRY;
382
383         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
384         if (req == NULL)
385                 RETURN(-ENOMEM);
386
387         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
388         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
389         if (rc) {
390                 ptlrpc_request_free(req);
391                 RETURN(rc);
392         }
393
394         if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
395                 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
396
397         osc_pack_req_body(req, oinfo);
398
399         ptlrpc_request_set_replen(req);
400
401         /* do mds to ost setattr asynchronously */
402         if (!rqset) {
403                 /* Do not wait for response. */
404                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
405         } else {
406                 req->rq_interpret_reply =
407                         (ptlrpc_interpterer_t)osc_setattr_interpret;
408
409                 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
410                 sa = ptlrpc_req_async_args(req);
411                 sa->sa_oa = oinfo->oi_oa;
412                 sa->sa_upcall = upcall;
413                 sa->sa_cookie = cookie;
414
415                 if (rqset == PTLRPCD_SET)
416                         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
417                 else
418                         ptlrpc_set_add_req(rqset, req);
419         }
420
421         RETURN(0);
422 }
423
424 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
425                              struct obd_trans_info *oti,
426                              struct ptlrpc_request_set *rqset)
427 {
428         return osc_setattr_async_base(exp, oinfo, oti,
429                                       oinfo->oi_cb_up, oinfo, rqset);
430 }
431
432 int osc_real_create(struct obd_export *exp, struct obdo *oa,
433                     struct lov_stripe_md **ea, struct obd_trans_info *oti)
434 {
435         struct ptlrpc_request *req;
436         struct ost_body       *body;
437         struct lov_stripe_md  *lsm;
438         int                    rc;
439         ENTRY;
440
441         LASSERT(oa);
442         LASSERT(ea);
443
444         lsm = *ea;
445         if (!lsm) {
446                 rc = obd_alloc_memmd(exp, &lsm);
447                 if (rc < 0)
448                         RETURN(rc);
449         }
450
451         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
452         if (req == NULL)
453                 GOTO(out, rc = -ENOMEM);
454
455         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
456         if (rc) {
457                 ptlrpc_request_free(req);
458                 GOTO(out, rc);
459         }
460
461         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
462         LASSERT(body);
463         lustre_set_wire_obdo(&body->oa, oa);
464
465         ptlrpc_request_set_replen(req);
466
467         if ((oa->o_valid & OBD_MD_FLFLAGS) &&
468             oa->o_flags == OBD_FL_DELORPHAN) {
469                 DEBUG_REQ(D_HA, req,
470                           "delorphan from OST integration");
471                 /* Don't resend the delorphan req */
472                 req->rq_no_resend = req->rq_no_delay = 1;
473         }
474
475         rc = ptlrpc_queue_wait(req);
476         if (rc)
477                 GOTO(out_req, rc);
478
479         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
480         if (body == NULL)
481                 GOTO(out_req, rc = -EPROTO);
482
483         lustre_get_wire_obdo(oa, &body->oa);
484
485         /* This should really be sent by the OST */
486         oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
487         oa->o_valid |= OBD_MD_FLBLKSZ;
488
489         /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
490          * have valid lsm_oinfo data structs, so don't go touching that.
491          * This needs to be fixed in a big way.
492          */
493         lsm->lsm_object_id = oa->o_id;
494         lsm->lsm_object_seq = oa->o_seq;
495         *ea = lsm;
496
497         if (oti != NULL) {
498                 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
499
500                 if (oa->o_valid & OBD_MD_FLCOOKIE) {
501                         if (!oti->oti_logcookies)
502                                 oti_alloc_cookies(oti, 1);
503                         *oti->oti_logcookies = oa->o_lcookie;
504                 }
505         }
506
507         CDEBUG(D_HA, "transno: "LPD64"\n",
508                lustre_msg_get_transno(req->rq_repmsg));
509 out_req:
510         ptlrpc_req_finished(req);
511 out:
512         if (rc && !*ea)
513                 obd_free_memmd(exp, &lsm);
514         RETURN(rc);
515 }
516
517 int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
518                    obd_enqueue_update_f upcall, void *cookie,
519                    struct ptlrpc_request_set *rqset)
520 {
521         struct ptlrpc_request   *req;
522         struct osc_setattr_args *sa;
523         struct ost_body         *body;
524         int                      rc;
525         ENTRY;
526
527         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
528         if (req == NULL)
529                 RETURN(-ENOMEM);
530
531         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
532         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
533         if (rc) {
534                 ptlrpc_request_free(req);
535                 RETURN(rc);
536         }
537         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
538         ptlrpc_at_set_req_timeout(req);
539
540         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
541         LASSERT(body);
542         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
543         osc_pack_capa(req, body, oinfo->oi_capa);
544
545         ptlrpc_request_set_replen(req);
546
547
548         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
549         CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
550         sa = ptlrpc_req_async_args(req);
551         sa->sa_oa     = oinfo->oi_oa;
552         sa->sa_upcall = upcall;
553         sa->sa_cookie = cookie;
554         if (rqset == PTLRPCD_SET)
555                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
556         else
557                 ptlrpc_set_add_req(rqset, req);
558
559         RETURN(0);
560 }
561
562 static int osc_punch(struct obd_export *exp, struct obd_info *oinfo,
563                      struct obd_trans_info *oti,
564                      struct ptlrpc_request_set *rqset)
565 {
566         oinfo->oi_oa->o_size   = oinfo->oi_policy.l_extent.start;
567         oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end;
568         oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
569         return osc_punch_base(exp, oinfo,
570                               oinfo->oi_cb_up, oinfo, rqset);
571 }
572
573 static int osc_sync_interpret(const struct lu_env *env,
574                               struct ptlrpc_request *req,
575                               void *arg, int rc)
576 {
577         struct osc_async_args *aa = arg;
578         struct ost_body *body;
579         ENTRY;
580
581         if (rc)
582                 GOTO(out, rc);
583
584         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
585         if (body == NULL) {
586                 CERROR ("can't unpack ost_body\n");
587                 GOTO(out, rc = -EPROTO);
588         }
589
590         *aa->aa_oi->oi_oa = body->oa;
591 out:
592         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
593         RETURN(rc);
594 }
595
596 static int osc_sync(struct obd_export *exp, struct obd_info *oinfo,
597                     obd_size start, obd_size end,
598                     struct ptlrpc_request_set *set)
599 {
600         struct ptlrpc_request *req;
601         struct ost_body       *body;
602         struct osc_async_args *aa;
603         int                    rc;
604         ENTRY;
605
606         if (!oinfo->oi_oa) {
607                 CDEBUG(D_INFO, "oa NULL\n");
608                 RETURN(-EINVAL);
609         }
610
611         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
612         if (req == NULL)
613                 RETURN(-ENOMEM);
614
615         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
616         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
617         if (rc) {
618                 ptlrpc_request_free(req);
619                 RETURN(rc);
620         }
621
622         /* overload the size and blocks fields in the oa with start/end */
623         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
624         LASSERT(body);
625         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
626         body->oa.o_size = start;
627         body->oa.o_blocks = end;
628         body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
629         osc_pack_capa(req, body, oinfo->oi_capa);
630
631         ptlrpc_request_set_replen(req);
632         req->rq_interpret_reply = osc_sync_interpret;
633
634         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
635         aa = ptlrpc_req_async_args(req);
636         aa->aa_oi = oinfo;
637
638         ptlrpc_set_add_req(set, req);
639         RETURN (0);
640 }
641
642 /* Find and cancel locally locks matched by @mode in the resource found by
643  * @objid. Found locks are added into @cancel list. Returns the amount of
644  * locks added to @cancels list. */
645 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
646                                    cfs_list_t *cancels,
647                                    ldlm_mode_t mode, int lock_flags)
648 {
649         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
650         struct ldlm_res_id res_id;
651         struct ldlm_resource *res;
652         int count;
653         ENTRY;
654
655         osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
656         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
657         if (res == NULL)
658                 RETURN(0);
659
660         LDLM_RESOURCE_ADDREF(res);
661         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
662                                            lock_flags, 0, NULL);
663         LDLM_RESOURCE_DELREF(res);
664         ldlm_resource_putref(res);
665         RETURN(count);
666 }
667
668 static int osc_destroy_interpret(const struct lu_env *env,
669                                  struct ptlrpc_request *req, void *data,
670                                  int rc)
671 {
672         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
673
674         cfs_atomic_dec(&cli->cl_destroy_in_flight);
675         cfs_waitq_signal(&cli->cl_destroy_waitq);
676         return 0;
677 }
678
679 static int osc_can_send_destroy(struct client_obd *cli)
680 {
681         if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
682             cli->cl_max_rpcs_in_flight) {
683                 /* The destroy request can be sent */
684                 return 1;
685         }
686         if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
687             cli->cl_max_rpcs_in_flight) {
688                 /*
689                  * The counter has been modified between the two atomic
690                  * operations.
691                  */
692                 cfs_waitq_signal(&cli->cl_destroy_waitq);
693         }
694         return 0;
695 }
696
697 /* Destroy requests can be async always on the client, and we don't even really
698  * care about the return code since the client cannot do anything at all about
699  * a destroy failure.
700  * When the MDS is unlinking a filename, it saves the file objects into a
701  * recovery llog, and these object records are cancelled when the OST reports
702  * they were destroyed and sync'd to disk (i.e. transaction committed).
703  * If the client dies, or the OST is down when the object should be destroyed,
704  * the records are not cancelled, and when the OST reconnects to the MDS next,
705  * it will retrieve the llog unlink logs and then sends the log cancellation
706  * cookies to the MDS after committing destroy transactions. */
707 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
708                        struct lov_stripe_md *ea, struct obd_trans_info *oti,
709                        struct obd_export *md_export, void *capa)
710 {
711         struct client_obd     *cli = &exp->exp_obd->u.cli;
712         struct ptlrpc_request *req;
713         struct ost_body       *body;
714         CFS_LIST_HEAD(cancels);
715         int rc, count;
716         ENTRY;
717
718         if (!oa) {
719                 CDEBUG(D_INFO, "oa NULL\n");
720                 RETURN(-EINVAL);
721         }
722
723         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
724                                         LDLM_FL_DISCARD_DATA);
725
726         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
727         if (req == NULL) {
728                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
729                 RETURN(-ENOMEM);
730         }
731
732         osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
733         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
734                                0, &cancels, count);
735         if (rc) {
736                 ptlrpc_request_free(req);
737                 RETURN(rc);
738         }
739
740         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
741         ptlrpc_at_set_req_timeout(req);
742
743         if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
744                 oa->o_lcookie = *oti->oti_logcookies;
745         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
746         LASSERT(body);
747         lustre_set_wire_obdo(&body->oa, oa);
748
749         osc_pack_capa(req, body, (struct obd_capa *)capa);
750         ptlrpc_request_set_replen(req);
751
752         /* don't throttle destroy RPCs for the MDT */
753         if (!(cli->cl_import->imp_connect_flags_orig & OBD_CONNECT_MDS)) {
754                 req->rq_interpret_reply = osc_destroy_interpret;
755                 if (!osc_can_send_destroy(cli)) {
756                         struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
757                                                           NULL);
758
759                         /*
760                          * Wait until the number of on-going destroy RPCs drops
761                          * under max_rpc_in_flight
762                          */
763                         l_wait_event_exclusive(cli->cl_destroy_waitq,
764                                                osc_can_send_destroy(cli), &lwi);
765                 }
766         }
767
768         /* Do not wait for response */
769         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
770         RETURN(0);
771 }
772
773 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
774                                 long writing_bytes)
775 {
776         obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
777
778         LASSERT(!(oa->o_valid & bits));
779
780         oa->o_valid |= bits;
781         client_obd_list_lock(&cli->cl_loi_list_lock);
782         oa->o_dirty = cli->cl_dirty;
783         if (cli->cl_dirty - cli->cl_dirty_transit > cli->cl_dirty_max) {
784                 CERROR("dirty %lu - %lu > dirty_max %lu\n",
785                        cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
786                 oa->o_undirty = 0;
787         } else if (cfs_atomic_read(&obd_dirty_pages) -
788                    cfs_atomic_read(&obd_dirty_transit_pages) >
789                    obd_max_dirty_pages + 1){
790                 /* The cfs_atomic_read() allowing the cfs_atomic_inc() are
791                  * not covered by a lock thus they may safely race and trip
792                  * this CERROR() unless we add in a small fudge factor (+1). */
793                 CERROR("dirty %d - %d > system dirty_max %d\n",
794                        cfs_atomic_read(&obd_dirty_pages),
795                        cfs_atomic_read(&obd_dirty_transit_pages),
796                        obd_max_dirty_pages);
797                 oa->o_undirty = 0;
798         } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
799                 CERROR("dirty %lu - dirty_max %lu too big???\n",
800                        cli->cl_dirty, cli->cl_dirty_max);
801                 oa->o_undirty = 0;
802         } else {
803                 long max_in_flight = (cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT)*
804                                 (cli->cl_max_rpcs_in_flight + 1);
805                 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
806         }
807         oa->o_grant = cli->cl_avail_grant;
808         oa->o_dropped = cli->cl_lost_grant;
809         cli->cl_lost_grant = 0;
810         client_obd_list_unlock(&cli->cl_loi_list_lock);
811         CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
812                oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
813
814 }
815
816 static void osc_update_next_shrink(struct client_obd *cli)
817 {
818         cli->cl_next_shrink_grant =
819                 cfs_time_shift(cli->cl_grant_shrink_interval);
820         CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
821                cli->cl_next_shrink_grant);
822 }
823
824 /* caller must hold loi_list_lock */
825 static void osc_consume_write_grant(struct client_obd *cli,
826                                     struct brw_page *pga)
827 {
828         LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
829         LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
830         cfs_atomic_inc(&obd_dirty_pages);
831         cli->cl_dirty += CFS_PAGE_SIZE;
832         cli->cl_avail_grant -= CFS_PAGE_SIZE;
833         pga->flag |= OBD_BRW_FROM_GRANT;
834         CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
835                CFS_PAGE_SIZE, pga, pga->pg);
836         LASSERT(cli->cl_avail_grant >= 0);
837         osc_update_next_shrink(cli);
838 }
839
840 /* the companion to osc_consume_write_grant, called when a brw has completed.
841  * must be called with the loi lock held. */
842 static void osc_release_write_grant(struct client_obd *cli,
843                                     struct brw_page *pga, int sent)
844 {
845         int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
846         ENTRY;
847
848         LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
849         if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
850                 EXIT;
851                 return;
852         }
853
854         pga->flag &= ~OBD_BRW_FROM_GRANT;
855         cfs_atomic_dec(&obd_dirty_pages);
856         cli->cl_dirty -= CFS_PAGE_SIZE;
857         if (pga->flag & OBD_BRW_NOCACHE) {
858                 pga->flag &= ~OBD_BRW_NOCACHE;
859                 cfs_atomic_dec(&obd_dirty_transit_pages);
860                 cli->cl_dirty_transit -= CFS_PAGE_SIZE;
861         }
862         if (!sent) {
863                 /* Reclaim grant from truncated pages. This is used to solve
864                  * write-truncate and grant all gone(to lost_grant) problem.
865                  * For a vfs write this problem can be easily solved by a sync
866                  * write, however, this is not an option for page_mkwrite()
867                  * because grant has to be allocated before a page becomes
868                  * dirty. */
869                 if (cli->cl_avail_grant < PTLRPC_MAX_BRW_SIZE)
870                         cli->cl_avail_grant += CFS_PAGE_SIZE;
871                 else
872                         cli->cl_lost_grant += CFS_PAGE_SIZE;
873                 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
874                        cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
875         } else if (CFS_PAGE_SIZE != blocksize && pga->count != CFS_PAGE_SIZE) {
876                 /* For short writes we shouldn't count parts of pages that
877                  * span a whole block on the OST side, or our accounting goes
878                  * wrong.  Should match the code in filter_grant_check. */
879                 int offset = pga->off & ~CFS_PAGE_MASK;
880                 int count = pga->count + (offset & (blocksize - 1));
881                 int end = (offset + pga->count) & (blocksize - 1);
882                 if (end)
883                         count += blocksize - end;
884
885                 cli->cl_lost_grant += CFS_PAGE_SIZE - count;
886                 CDEBUG(D_CACHE, "lost %lu grant: %lu avail: %lu dirty: %lu\n",
887                        CFS_PAGE_SIZE - count, cli->cl_lost_grant,
888                        cli->cl_avail_grant, cli->cl_dirty);
889         }
890
891         EXIT;
892 }
893
894 static unsigned long rpcs_in_flight(struct client_obd *cli)
895 {
896         return cli->cl_r_in_flight + cli->cl_w_in_flight;
897 }
898
899 /* caller must hold loi_list_lock */
900 void osc_wake_cache_waiters(struct client_obd *cli)
901 {
902         cfs_list_t *l, *tmp;
903         struct osc_cache_waiter *ocw;
904
905         ENTRY;
906         cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
907                 /* if we can't dirty more, we must wait until some is written */
908                 if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
909                    (cfs_atomic_read(&obd_dirty_pages) + 1 >
910                     obd_max_dirty_pages)) {
911                         CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
912                                "osc max %ld, sys max %d\n", cli->cl_dirty,
913                                cli->cl_dirty_max, obd_max_dirty_pages);
914                         return;
915                 }
916
917                 /* if still dirty cache but no grant wait for pending RPCs that
918                  * may yet return us some grant before doing sync writes */
919                 if (cli->cl_w_in_flight && cli->cl_avail_grant < CFS_PAGE_SIZE) {
920                         CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
921                                cli->cl_w_in_flight);
922                         return;
923                 }
924
925                 ocw = cfs_list_entry(l, struct osc_cache_waiter, ocw_entry);
926                 cfs_list_del_init(&ocw->ocw_entry);
927                 if (cli->cl_avail_grant < CFS_PAGE_SIZE) {
928                         /* no more RPCs in flight to return grant, do sync IO */
929                         ocw->ocw_rc = -EDQUOT;
930                         CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
931                 } else {
932                         osc_consume_write_grant(cli,
933                                                 &ocw->ocw_oap->oap_brw_page);
934                 }
935
936                 CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld\n",
937                        ocw, ocw->ocw_oap, cli->cl_avail_grant);
938
939                 cfs_waitq_signal(&ocw->ocw_waitq);
940         }
941
942         EXIT;
943 }
944
945 static void __osc_update_grant(struct client_obd *cli, obd_size grant)
946 {
947         client_obd_list_lock(&cli->cl_loi_list_lock);
948         cli->cl_avail_grant += grant;
949         client_obd_list_unlock(&cli->cl_loi_list_lock);
950 }
951
952 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
953 {
954         if (body->oa.o_valid & OBD_MD_FLGRANT) {
955                 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
956                 __osc_update_grant(cli, body->oa.o_grant);
957         }
958 }
959
960 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
961                               void *key, obd_count vallen, void *val,
962                               struct ptlrpc_request_set *set);
963
964 static int osc_shrink_grant_interpret(const struct lu_env *env,
965                                       struct ptlrpc_request *req,
966                                       void *aa, int rc)
967 {
968         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
969         struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
970         struct ost_body *body;
971
972         if (rc != 0) {
973                 __osc_update_grant(cli, oa->o_grant);
974                 GOTO(out, rc);
975         }
976
977         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
978         LASSERT(body);
979         osc_update_grant(cli, body);
980 out:
981         OBDO_FREE(oa);
982         return rc;
983 }
984
985 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
986 {
987         client_obd_list_lock(&cli->cl_loi_list_lock);
988         oa->o_grant = cli->cl_avail_grant / 4;
989         cli->cl_avail_grant -= oa->o_grant;
990         client_obd_list_unlock(&cli->cl_loi_list_lock);
991         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
992                 oa->o_valid |= OBD_MD_FLFLAGS;
993                 oa->o_flags = 0;
994         }
995         oa->o_flags |= OBD_FL_SHRINK_GRANT;
996         osc_update_next_shrink(cli);
997 }
998
999 /* Shrink the current grant, either from some large amount to enough for a
1000  * full set of in-flight RPCs, or if we have already shrunk to that limit
1001  * then to enough for a single RPC.  This avoids keeping more grant than
1002  * needed, and avoids shrinking the grant piecemeal. */
1003 static int osc_shrink_grant(struct client_obd *cli)
1004 {
1005         long target = (cli->cl_max_rpcs_in_flight + 1) *
1006                       cli->cl_max_pages_per_rpc;
1007
1008         client_obd_list_lock(&cli->cl_loi_list_lock);
1009         if (cli->cl_avail_grant <= target)
1010                 target = cli->cl_max_pages_per_rpc;
1011         client_obd_list_unlock(&cli->cl_loi_list_lock);
1012
1013         return osc_shrink_grant_to_target(cli, target);
1014 }
1015
1016 int osc_shrink_grant_to_target(struct client_obd *cli, long target)
1017 {
1018         int    rc = 0;
1019         struct ost_body     *body;
1020         ENTRY;
1021
1022         client_obd_list_lock(&cli->cl_loi_list_lock);
1023         /* Don't shrink if we are already above or below the desired limit
1024          * We don't want to shrink below a single RPC, as that will negatively
1025          * impact block allocation and long-term performance. */
1026         if (target < cli->cl_max_pages_per_rpc)
1027                 target = cli->cl_max_pages_per_rpc;
1028
1029         if (target >= cli->cl_avail_grant) {
1030                 client_obd_list_unlock(&cli->cl_loi_list_lock);
1031                 RETURN(0);
1032         }
1033         client_obd_list_unlock(&cli->cl_loi_list_lock);
1034
1035         OBD_ALLOC_PTR(body);
1036         if (!body)
1037                 RETURN(-ENOMEM);
1038
1039         osc_announce_cached(cli, &body->oa, 0);
1040
1041         client_obd_list_lock(&cli->cl_loi_list_lock);
1042         body->oa.o_grant = cli->cl_avail_grant - target;
1043         cli->cl_avail_grant = target;
1044         client_obd_list_unlock(&cli->cl_loi_list_lock);
1045         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
1046                 body->oa.o_valid |= OBD_MD_FLFLAGS;
1047                 body->oa.o_flags = 0;
1048         }
1049         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
1050         osc_update_next_shrink(cli);
1051
1052         rc = osc_set_info_async(cli->cl_import->imp_obd->obd_self_export,
1053                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
1054                                 sizeof(*body), body, NULL);
1055         if (rc != 0)
1056                 __osc_update_grant(cli, body->oa.o_grant);
1057         OBD_FREE_PTR(body);
1058         RETURN(rc);
1059 }
1060
1061 #define GRANT_SHRINK_LIMIT PTLRPC_MAX_BRW_SIZE
1062 static int osc_should_shrink_grant(struct client_obd *client)
1063 {
1064         cfs_time_t time = cfs_time_current();
1065         cfs_time_t next_shrink = client->cl_next_shrink_grant;
1066
1067         if ((client->cl_import->imp_connect_data.ocd_connect_flags &
1068              OBD_CONNECT_GRANT_SHRINK) == 0)
1069                 return 0;
1070
1071         if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
1072                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
1073                     client->cl_avail_grant > GRANT_SHRINK_LIMIT)
1074                         return 1;
1075                 else
1076                         osc_update_next_shrink(client);
1077         }
1078         return 0;
1079 }
1080
1081 static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
1082 {
1083         struct client_obd *client;
1084
1085         cfs_list_for_each_entry(client, &item->ti_obd_list,
1086                                 cl_grant_shrink_list) {
1087                 if (osc_should_shrink_grant(client))
1088                         osc_shrink_grant(client);
1089         }
1090         return 0;
1091 }
1092
1093 static int osc_add_shrink_grant(struct client_obd *client)
1094 {
1095         int rc;
1096
1097         rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1098                                        TIMEOUT_GRANT,
1099                                        osc_grant_shrink_grant_cb, NULL,
1100                                        &client->cl_grant_shrink_list);
1101         if (rc) {
1102                 CERROR("add grant client %s error %d\n",
1103                         client->cl_import->imp_obd->obd_name, rc);
1104                 return rc;
1105         }
1106         CDEBUG(D_CACHE, "add grant client %s \n",
1107                client->cl_import->imp_obd->obd_name);
1108         osc_update_next_shrink(client);
1109         return 0;
1110 }
1111
1112 static int osc_del_shrink_grant(struct client_obd *client)
1113 {
1114         return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1115                                          TIMEOUT_GRANT);
1116 }
1117
1118 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1119 {
1120         /*
1121          * ocd_grant is the total grant amount we're expect to hold: if we've
1122          * been evicted, it's the new avail_grant amount, cl_dirty will drop
1123          * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
1124          *
1125          * race is tolerable here: if we're evicted, but imp_state already
1126          * left EVICTED state, then cl_dirty must be 0 already.
1127          */
1128         client_obd_list_lock(&cli->cl_loi_list_lock);
1129         if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1130                 cli->cl_avail_grant = ocd->ocd_grant;
1131         else
1132                 cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
1133
1134         if (cli->cl_avail_grant < 0) {
1135                 CWARN("%s: available grant < 0, the OSS is probably not running"
1136                       " with patch from bug20278 (%ld) \n",
1137                       cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant);
1138                 /* workaround for 1.6 servers which do not have
1139                  * the patch from bug20278 */
1140                 cli->cl_avail_grant = ocd->ocd_grant;
1141         }
1142
1143         client_obd_list_unlock(&cli->cl_loi_list_lock);
1144
1145         CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
1146                cli->cl_import->imp_obd->obd_name,
1147                cli->cl_avail_grant, cli->cl_lost_grant);
1148
1149         if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1150             cfs_list_empty(&cli->cl_grant_shrink_list))
1151                 osc_add_shrink_grant(cli);
1152 }
1153
1154 /* We assume that the reason this OSC got a short read is because it read
1155  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1156  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1157  * this stripe never got written at or beyond this stripe offset yet. */
1158 static void handle_short_read(int nob_read, obd_count page_count,
1159                               struct brw_page **pga)
1160 {
1161         char *ptr;
1162         int i = 0;
1163
1164         /* skip bytes read OK */
1165         while (nob_read > 0) {
1166                 LASSERT (page_count > 0);
1167
1168                 if (pga[i]->count > nob_read) {
1169                         /* EOF inside this page */
1170                         ptr = cfs_kmap(pga[i]->pg) +
1171                                 (pga[i]->off & ~CFS_PAGE_MASK);
1172                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1173                         cfs_kunmap(pga[i]->pg);
1174                         page_count--;
1175                         i++;
1176                         break;
1177                 }
1178
1179                 nob_read -= pga[i]->count;
1180                 page_count--;
1181                 i++;
1182         }
1183
1184         /* zero remaining pages */
1185         while (page_count-- > 0) {
1186                 ptr = cfs_kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
1187                 memset(ptr, 0, pga[i]->count);
1188                 cfs_kunmap(pga[i]->pg);
1189                 i++;
1190         }
1191 }
1192
1193 static int check_write_rcs(struct ptlrpc_request *req,
1194                            int requested_nob, int niocount,
1195                            obd_count page_count, struct brw_page **pga)
1196 {
1197         int     i;
1198         __u32   *remote_rcs;
1199
1200         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1201                                                   sizeof(*remote_rcs) *
1202                                                   niocount);
1203         if (remote_rcs == NULL) {
1204                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1205                 return(-EPROTO);
1206         }
1207
1208         /* return error if any niobuf was in error */
1209         for (i = 0; i < niocount; i++) {
1210                 if ((int)remote_rcs[i] < 0)
1211                         return(remote_rcs[i]);
1212
1213                 if (remote_rcs[i] != 0) {
1214                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1215                                 i, remote_rcs[i], req);
1216                         return(-EPROTO);
1217                 }
1218         }
1219
1220         if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1221                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1222                        req->rq_bulk->bd_nob_transferred, requested_nob);
1223                 return(-EPROTO);
1224         }
1225
1226         return (0);
1227 }
1228
1229 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1230 {
1231         if (p1->flag != p2->flag) {
1232                 unsigned mask = ~(OBD_BRW_FROM_GRANT| OBD_BRW_NOCACHE|
1233                                   OBD_BRW_SYNC|OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
1234
1235                 /* warn if we try to combine flags that we don't know to be
1236                  * safe to combine */
1237                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1238                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1239                               "report this at http://bugs.whamcloud.com/\n",
1240                               p1->flag, p2->flag);
1241                 }
1242                 return 0;
1243         }
1244
1245         return (p1->off + p1->count == p2->off);
1246 }
1247
1248 static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
1249                                    struct brw_page **pga, int opc,
1250                                    cksum_type_t cksum_type)
1251 {
1252         __u32 cksum;
1253         int i = 0;
1254
1255         LASSERT (pg_count > 0);
1256         cksum = init_checksum(cksum_type);
1257         while (nob > 0 && pg_count > 0) {
1258                 unsigned char *ptr = cfs_kmap(pga[i]->pg);
1259                 int off = pga[i]->off & ~CFS_PAGE_MASK;
1260                 int count = pga[i]->count > nob ? nob : pga[i]->count;
1261
1262                 /* corrupt the data before we compute the checksum, to
1263                  * simulate an OST->client data error */
1264                 if (i == 0 && opc == OST_READ &&
1265                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1266                         memcpy(ptr + off, "bad1", min(4, nob));
1267                 cksum = compute_checksum(cksum, ptr + off, count, cksum_type);
1268                 cfs_kunmap(pga[i]->pg);
1269                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d checksum %x\n",
1270                                off, cksum);
1271
1272                 nob -= pga[i]->count;
1273                 pg_count--;
1274                 i++;
1275         }
1276         /* For sending we only compute the wrong checksum instead
1277          * of corrupting the data so it is still correct on a redo */
1278         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1279                 cksum++;
1280
1281         return fini_checksum(cksum, cksum_type);
1282 }
1283
1284 static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
1285                                 struct lov_stripe_md *lsm, obd_count page_count,
1286                                 struct brw_page **pga,
1287                                 struct ptlrpc_request **reqp,
1288                                 struct obd_capa *ocapa, int reserve,
1289                                 int resend)
1290 {
1291         struct ptlrpc_request   *req;
1292         struct ptlrpc_bulk_desc *desc;
1293         struct ost_body         *body;
1294         struct obd_ioobj        *ioobj;
1295         struct niobuf_remote    *niobuf;
1296         int niocount, i, requested_nob, opc, rc;
1297         struct osc_brw_async_args *aa;
1298         struct req_capsule      *pill;
1299         struct brw_page *pg_prev;
1300
1301         ENTRY;
1302         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1303                 RETURN(-ENOMEM); /* Recoverable */
1304         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1305                 RETURN(-EINVAL); /* Fatal */
1306
1307         if ((cmd & OBD_BRW_WRITE) != 0) {
1308                 opc = OST_WRITE;
1309                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1310                                                 cli->cl_import->imp_rq_pool,
1311                                                 &RQF_OST_BRW_WRITE);
1312         } else {
1313                 opc = OST_READ;
1314                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1315         }
1316         if (req == NULL)
1317                 RETURN(-ENOMEM);
1318
1319         for (niocount = i = 1; i < page_count; i++) {
1320                 if (!can_merge_pages(pga[i - 1], pga[i]))
1321                         niocount++;
1322         }
1323
1324         pill = &req->rq_pill;
1325         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1326                              sizeof(*ioobj));
1327         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1328                              niocount * sizeof(*niobuf));
1329         osc_set_capa_size(req, &RMF_CAPA1, ocapa);
1330
1331         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1332         if (rc) {
1333                 ptlrpc_request_free(req);
1334                 RETURN(rc);
1335         }
1336         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1337         ptlrpc_at_set_req_timeout(req);
1338
1339         if (opc == OST_WRITE)
1340                 desc = ptlrpc_prep_bulk_imp(req, page_count,
1341                                             BULK_GET_SOURCE, OST_BULK_PORTAL);
1342         else
1343                 desc = ptlrpc_prep_bulk_imp(req, page_count,
1344                                             BULK_PUT_SINK, OST_BULK_PORTAL);
1345
1346         if (desc == NULL)
1347                 GOTO(out, rc = -ENOMEM);
1348         /* NB request now owns desc and will free it when it gets freed */
1349
1350         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1351         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1352         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1353         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1354
1355         lustre_set_wire_obdo(&body->oa, oa);
1356
1357         obdo_to_ioobj(oa, ioobj);
1358         ioobj->ioo_bufcnt = niocount;
1359         osc_pack_capa(req, body, ocapa);
1360         LASSERT (page_count > 0);
1361         pg_prev = pga[0];
1362         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1363                 struct brw_page *pg = pga[i];
1364                 int poff = pg->off & ~CFS_PAGE_MASK;
1365
1366                 LASSERT(pg->count > 0);
1367                 /* make sure there is no gap in the middle of page array */
1368                 LASSERTF(page_count == 1 ||
1369                          (ergo(i == 0, poff + pg->count == CFS_PAGE_SIZE) &&
1370                           ergo(i > 0 && i < page_count - 1,
1371                                poff == 0 && pg->count == CFS_PAGE_SIZE)   &&
1372                           ergo(i == page_count - 1, poff == 0)),
1373                          "i: %d/%d pg: %p off: "LPU64", count: %u\n",
1374                          i, page_count, pg, pg->off, pg->count);
1375 #ifdef __linux__
1376                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1377                          "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
1378                          " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
1379                          i, page_count,
1380                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1381                          pg_prev->pg, page_private(pg_prev->pg),
1382                          pg_prev->pg->index, pg_prev->off);
1383 #else
1384                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1385                          "i %d p_c %u\n", i, page_count);
1386 #endif
1387                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1388                         (pg->flag & OBD_BRW_SRVLOCK));
1389
1390                 ptlrpc_prep_bulk_page(desc, pg->pg, poff, pg->count);
1391                 requested_nob += pg->count;
1392
1393                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1394                         niobuf--;
1395                         niobuf->len += pg->count;
1396                 } else {
1397                         niobuf->offset = pg->off;
1398                         niobuf->len    = pg->count;
1399                         niobuf->flags  = pg->flag;
1400                 }
1401                 pg_prev = pg;
1402         }
1403
1404         LASSERTF((void *)(niobuf - niocount) ==
1405                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1406                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1407                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1408
1409         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1410         if (resend) {
1411                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1412                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1413                         body->oa.o_flags = 0;
1414                 }
1415                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1416         }
1417
1418         if (osc_should_shrink_grant(cli))
1419                 osc_shrink_grant_local(cli, &body->oa);
1420
1421         /* size[REQ_REC_OFF] still sizeof (*body) */
1422         if (opc == OST_WRITE) {
1423                 if (cli->cl_checksum &&
1424                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1425                         /* store cl_cksum_type in a local variable since
1426                          * it can be changed via lprocfs */
1427                         cksum_type_t cksum_type = cli->cl_cksum_type;
1428
1429                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1430                                 oa->o_flags &= OBD_FL_LOCAL_MASK;
1431                                 body->oa.o_flags = 0;
1432                         }
1433                         body->oa.o_flags |= cksum_type_pack(cksum_type);
1434                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1435                         body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1436                                                              page_count, pga,
1437                                                              OST_WRITE,
1438                                                              cksum_type);
1439                         CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1440                                body->oa.o_cksum);
1441                         /* save this in 'oa', too, for later checking */
1442                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1443                         oa->o_flags |= cksum_type_pack(cksum_type);
1444                 } else {
1445                         /* clear out the checksum flag, in case this is a
1446                          * resend but cl_checksum is no longer set. b=11238 */
1447                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1448                 }
1449                 oa->o_cksum = body->oa.o_cksum;
1450                 /* 1 RC per niobuf */
1451                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1452                                      sizeof(__u32) * niocount);
1453         } else {
1454                 if (cli->cl_checksum &&
1455                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1456                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1457                                 body->oa.o_flags = 0;
1458                         body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1459                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1460                 }
1461         }
1462         ptlrpc_request_set_replen(req);
1463
1464         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1465         aa = ptlrpc_req_async_args(req);
1466         aa->aa_oa = oa;
1467         aa->aa_requested_nob = requested_nob;
1468         aa->aa_nio_count = niocount;
1469         aa->aa_page_count = page_count;
1470         aa->aa_resends = 0;
1471         aa->aa_ppga = pga;
1472         aa->aa_cli = cli;
1473         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1474         if (ocapa && reserve)
1475                 aa->aa_ocapa = capa_get(ocapa);
1476
1477         *reqp = req;
1478         RETURN(0);
1479
1480  out:
1481         ptlrpc_req_finished(req);
1482         RETURN(rc);
1483 }
1484
1485 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1486                                 __u32 client_cksum, __u32 server_cksum, int nob,
1487                                 obd_count page_count, struct brw_page **pga,
1488                                 cksum_type_t client_cksum_type)
1489 {
1490         __u32 new_cksum;
1491         char *msg;
1492         cksum_type_t cksum_type;
1493
1494         if (server_cksum == client_cksum) {
1495                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1496                 return 0;
1497         }
1498
1499         cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1500                                        oa->o_flags : 0);
1501         new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1502                                       cksum_type);
1503
1504         if (cksum_type != client_cksum_type)
1505                 msg = "the server did not use the checksum type specified in "
1506                       "the original request - likely a protocol problem";
1507         else if (new_cksum == server_cksum)
1508                 msg = "changed on the client after we checksummed it - "
1509                       "likely false positive due to mmap IO (bug 11742)";
1510         else if (new_cksum == client_cksum)
1511                 msg = "changed in transit before arrival at OST";
1512         else
1513                 msg = "changed in transit AND doesn't match the original - "
1514                       "likely false positive due to mmap IO (bug 11742)";
1515
1516         LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
1517                            " object "LPU64"/"LPU64" extent ["LPU64"-"LPU64"]\n",
1518                            msg, libcfs_nid2str(peer->nid),
1519                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1520                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1521                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1522                            oa->o_id,
1523                            oa->o_valid & OBD_MD_FLGROUP ? oa->o_seq : (__u64)0,
1524                            pga[0]->off,
1525                            pga[page_count-1]->off + pga[page_count-1]->count - 1);
1526         CERROR("original client csum %x (type %x), server csum %x (type %x), "
1527                "client csum now %x\n", client_cksum, client_cksum_type,
1528                server_cksum, cksum_type, new_cksum);
1529         return 1;
1530 }
1531
1532 /* Note rc enters this function as number of bytes transferred */
1533 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1534 {
1535         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1536         const lnet_process_id_t *peer =
1537                         &req->rq_import->imp_connection->c_peer;
1538         struct client_obd *cli = aa->aa_cli;
1539         struct ost_body *body;
1540         __u32 client_cksum = 0;
1541         ENTRY;
1542
1543         if (rc < 0 && rc != -EDQUOT) {
1544                 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1545                 RETURN(rc);
1546         }
1547
1548         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1549         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1550         if (body == NULL) {
1551                 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1552                 RETURN(-EPROTO);
1553         }
1554
1555         /* set/clear over quota flag for a uid/gid */
1556         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1557             body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1558                 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1559
1560                 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
1561                        body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1562                        body->oa.o_flags);
1563                 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
1564         }
1565
1566         osc_update_grant(cli, body);
1567
1568         if (rc < 0)
1569                 RETURN(rc);
1570
1571         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1572                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1573
1574         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1575                 if (rc > 0) {
1576                         CERROR("Unexpected +ve rc %d\n", rc);
1577                         RETURN(-EPROTO);
1578                 }
1579                 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1580
1581                 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1582                         RETURN(-EAGAIN);
1583
1584                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1585                     check_write_checksum(&body->oa, peer, client_cksum,
1586                                          body->oa.o_cksum, aa->aa_requested_nob,
1587                                          aa->aa_page_count, aa->aa_ppga,
1588                                          cksum_type_unpack(aa->aa_oa->o_flags)))
1589                         RETURN(-EAGAIN);
1590
1591                 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1592                                      aa->aa_page_count, aa->aa_ppga);
1593                 GOTO(out, rc);
1594         }
1595
1596         /* The rest of this function executes only for OST_READs */
1597
1598         /* if unwrap_bulk failed, return -EAGAIN to retry */
1599         rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1600         if (rc < 0)
1601                 GOTO(out, rc = -EAGAIN);
1602
1603         if (rc > aa->aa_requested_nob) {
1604                 CERROR("Unexpected rc %d (%d requested)\n", rc,
1605                        aa->aa_requested_nob);
1606                 RETURN(-EPROTO);
1607         }
1608
1609         if (rc != req->rq_bulk->bd_nob_transferred) {
1610                 CERROR ("Unexpected rc %d (%d transferred)\n",
1611                         rc, req->rq_bulk->bd_nob_transferred);
1612                 return (-EPROTO);
1613         }
1614
1615         if (rc < aa->aa_requested_nob)
1616                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1617
1618         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1619                 static int cksum_counter;
1620                 __u32      server_cksum = body->oa.o_cksum;
1621                 char      *via;
1622                 char      *router;
1623                 cksum_type_t cksum_type;
1624
1625                 cksum_type = cksum_type_unpack(body->oa.o_valid &OBD_MD_FLFLAGS?
1626                                                body->oa.o_flags : 0);
1627                 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1628                                                  aa->aa_ppga, OST_READ,
1629                                                  cksum_type);
1630
1631                 if (peer->nid == req->rq_bulk->bd_sender) {
1632                         via = router = "";
1633                 } else {
1634                         via = " via ";
1635                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
1636                 }
1637
1638                 if (server_cksum == ~0 && rc > 0) {
1639                         CERROR("Protocol error: server %s set the 'checksum' "
1640                                "bit, but didn't send a checksum.  Not fatal, "
1641                                "but please notify on http://bugs.whamcloud.com/\n",
1642                                libcfs_nid2str(peer->nid));
1643                 } else if (server_cksum != client_cksum) {
1644                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1645                                            "%s%s%s inode "DFID" object "
1646                                            LPU64"/"LPU64" extent "
1647                                            "["LPU64"-"LPU64"]\n",
1648                                            req->rq_import->imp_obd->obd_name,
1649                                            libcfs_nid2str(peer->nid),
1650                                            via, router,
1651                                            body->oa.o_valid & OBD_MD_FLFID ?
1652                                                 body->oa.o_parent_seq : (__u64)0,
1653                                            body->oa.o_valid & OBD_MD_FLFID ?
1654                                                 body->oa.o_parent_oid : 0,
1655                                            body->oa.o_valid & OBD_MD_FLFID ?
1656                                                 body->oa.o_parent_ver : 0,
1657                                            body->oa.o_id,
1658                                            body->oa.o_valid & OBD_MD_FLGROUP ?
1659                                                 body->oa.o_seq : (__u64)0,
1660                                            aa->aa_ppga[0]->off,
1661                                            aa->aa_ppga[aa->aa_page_count-1]->off +
1662                                            aa->aa_ppga[aa->aa_page_count-1]->count -
1663                                                                         1);
1664                         CERROR("client %x, server %x, cksum_type %x\n",
1665                                client_cksum, server_cksum, cksum_type);
1666                         cksum_counter = 0;
1667                         aa->aa_oa->o_cksum = client_cksum;
1668                         rc = -EAGAIN;
1669                 } else {
1670                         cksum_counter++;
1671                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1672                         rc = 0;
1673                 }
1674         } else if (unlikely(client_cksum)) {
1675                 static int cksum_missed;
1676
1677                 cksum_missed++;
1678                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1679                         CERROR("Checksum %u requested from %s but not sent\n",
1680                                cksum_missed, libcfs_nid2str(peer->nid));
1681         } else {
1682                 rc = 0;
1683         }
1684 out:
1685         if (rc >= 0)
1686                 lustre_get_wire_obdo(aa->aa_oa, &body->oa);
1687
1688         RETURN(rc);
1689 }
1690
1691 static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1692                             struct lov_stripe_md *lsm,
1693                             obd_count page_count, struct brw_page **pga,
1694                             struct obd_capa *ocapa)
1695 {
1696         struct ptlrpc_request *req;
1697         int                    rc;
1698         cfs_waitq_t            waitq;
1699         int                    resends = 0;
1700         struct l_wait_info     lwi;
1701
1702         ENTRY;
1703
1704         cfs_waitq_init(&waitq);
1705
1706 restart_bulk:
1707         rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
1708                                   page_count, pga, &req, ocapa, 0, resends);
1709         if (rc != 0)
1710                 return (rc);
1711
1712         rc = ptlrpc_queue_wait(req);
1713
1714         if (rc == -ETIMEDOUT && req->rq_resend) {
1715                 DEBUG_REQ(D_HA, req,  "BULK TIMEOUT");
1716                 ptlrpc_req_finished(req);
1717                 goto restart_bulk;
1718         }
1719
1720         rc = osc_brw_fini_request(req, rc);
1721
1722         ptlrpc_req_finished(req);
1723         if (osc_recoverable_error(rc)) {
1724                 resends++;
1725                 if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
1726                         CERROR("too many resend retries, returning error\n");
1727                         RETURN(-EIO);
1728                 }
1729
1730                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL, NULL);
1731                 l_wait_event(waitq, 0, &lwi);
1732
1733                 goto restart_bulk;
1734         }
1735
1736         RETURN (rc);
1737 }
1738
1739 int osc_brw_redo_request(struct ptlrpc_request *request,
1740                          struct osc_brw_async_args *aa)
1741 {
1742         struct ptlrpc_request *new_req;
1743         struct ptlrpc_request_set *set = request->rq_set;
1744         struct osc_brw_async_args *new_aa;
1745         struct osc_async_page *oap;
1746         int rc = 0;
1747         ENTRY;
1748
1749         if (!client_should_resend(aa->aa_resends, aa->aa_cli)) {
1750                 CERROR("too many resent retries, returning error\n");
1751                 RETURN(-EIO);
1752         }
1753
1754         DEBUG_REQ(D_ERROR, request, "redo for recoverable error");
1755
1756         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1757                                         OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
1758                                   aa->aa_cli, aa->aa_oa,
1759                                   NULL /* lsm unused by osc currently */,
1760                                   aa->aa_page_count, aa->aa_ppga,
1761                                   &new_req, aa->aa_ocapa, 0, 1);
1762         if (rc)
1763                 RETURN(rc);
1764
1765         client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
1766
1767         cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1768                 if (oap->oap_request != NULL) {
1769                         LASSERTF(request == oap->oap_request,
1770                                  "request %p != oap_request %p\n",
1771                                  request, oap->oap_request);
1772                         if (oap->oap_interrupted) {
1773                                 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1774                                 ptlrpc_req_finished(new_req);
1775                                 RETURN(-EINTR);
1776                         }
1777                 }
1778         }
1779         /* New request takes over pga and oaps from old request.
1780          * Note that copying a list_head doesn't work, need to move it... */
1781         aa->aa_resends++;
1782         new_req->rq_interpret_reply = request->rq_interpret_reply;
1783         new_req->rq_async_args = request->rq_async_args;
1784         new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
1785
1786         new_aa = ptlrpc_req_async_args(new_req);
1787
1788         CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
1789         cfs_list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
1790         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1791
1792         cfs_list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1793                 if (oap->oap_request) {
1794                         ptlrpc_req_finished(oap->oap_request);
1795                         oap->oap_request = ptlrpc_request_addref(new_req);
1796                 }
1797         }
1798
1799         new_aa->aa_ocapa = aa->aa_ocapa;
1800         aa->aa_ocapa = NULL;
1801
1802         /* use ptlrpc_set_add_req is safe because interpret functions work
1803          * in check_set context. only one way exist with access to request
1804          * from different thread got -EINTR - this way protected with
1805          * cl_loi_list_lock */
1806         ptlrpc_set_add_req(set, new_req);
1807
1808         client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1809
1810         DEBUG_REQ(D_INFO, new_req, "new request");
1811         RETURN(0);
1812 }
1813
1814 /*
1815  * ugh, we want disk allocation on the target to happen in offset order.  we'll
1816  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1817  * fine for our small page arrays and doesn't require allocation.  its an
1818  * insertion sort that swaps elements that are strides apart, shrinking the
1819  * stride down until its '1' and the array is sorted.
1820  */
1821 static void sort_brw_pages(struct brw_page **array, int num)
1822 {
1823         int stride, i, j;
1824         struct brw_page *tmp;
1825
1826         if (num == 1)
1827                 return;
1828         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1829                 ;
1830
1831         do {
1832                 stride /= 3;
1833                 for (i = stride ; i < num ; i++) {
1834                         tmp = array[i];
1835                         j = i;
1836                         while (j >= stride && array[j - stride]->off > tmp->off) {
1837                                 array[j] = array[j - stride];
1838                                 j -= stride;
1839                         }
1840                         array[j] = tmp;
1841                 }
1842         } while (stride > 1);
1843 }
1844
1845 static obd_count max_unfragmented_pages(struct brw_page **pg, obd_count pages)
1846 {
1847         int count = 1;
1848         int offset;
1849         int i = 0;
1850
1851         LASSERT (pages > 0);
1852         offset = pg[i]->off & ~CFS_PAGE_MASK;
1853
1854         for (;;) {
1855                 pages--;
1856                 if (pages == 0)         /* that's all */
1857                         return count;
1858
1859                 if (offset + pg[i]->count < CFS_PAGE_SIZE)
1860                         return count;   /* doesn't end on page boundary */
1861
1862                 i++;
1863                 offset = pg[i]->off & ~CFS_PAGE_MASK;
1864                 if (offset != 0)        /* doesn't start on page boundary */
1865                         return count;
1866
1867                 count++;
1868         }
1869 }
1870
1871 static struct brw_page **osc_build_ppga(struct brw_page *pga, obd_count count)
1872 {
1873         struct brw_page **ppga;
1874         int i;
1875
1876         OBD_ALLOC(ppga, sizeof(*ppga) * count);
1877         if (ppga == NULL)
1878                 return NULL;
1879
1880         for (i = 0; i < count; i++)
1881                 ppga[i] = pga + i;
1882         return ppga;
1883 }
1884
1885 static void osc_release_ppga(struct brw_page **ppga, obd_count count)
1886 {
1887         LASSERT(ppga != NULL);
1888         OBD_FREE(ppga, sizeof(*ppga) * count);
1889 }
1890
1891 static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
1892                    obd_count page_count, struct brw_page *pga,
1893                    struct obd_trans_info *oti)
1894 {
1895         struct obdo *saved_oa = NULL;
1896         struct brw_page **ppga, **orig;
1897         struct obd_import *imp = class_exp2cliimp(exp);
1898         struct client_obd *cli;
1899         int rc, page_count_orig;
1900         ENTRY;
1901
1902         LASSERT((imp != NULL) && (imp->imp_obd != NULL));
1903         cli = &imp->imp_obd->u.cli;
1904
1905         if (cmd & OBD_BRW_CHECK) {
1906                 /* The caller just wants to know if there's a chance that this
1907                  * I/O can succeed */
1908
1909                 if (imp->imp_invalid)
1910                         RETURN(-EIO);
1911                 RETURN(0);
1912         }
1913
1914         /* test_brw with a failed create can trip this, maybe others. */
1915         LASSERT(cli->cl_max_pages_per_rpc);
1916
1917         rc = 0;
1918
1919         orig = ppga = osc_build_ppga(pga, page_count);
1920         if (ppga == NULL)
1921                 RETURN(-ENOMEM);
1922         page_count_orig = page_count;
1923
1924         sort_brw_pages(ppga, page_count);
1925         while (page_count) {
1926                 obd_count pages_per_brw;
1927
1928                 if (page_count > cli->cl_max_pages_per_rpc)
1929                         pages_per_brw = cli->cl_max_pages_per_rpc;
1930                 else
1931                         pages_per_brw = page_count;
1932
1933                 pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
1934
1935                 if (saved_oa != NULL) {
1936                         /* restore previously saved oa */
1937                         *oinfo->oi_oa = *saved_oa;
1938                 } else if (page_count > pages_per_brw) {
1939                         /* save a copy of oa (brw will clobber it) */
1940                         OBDO_ALLOC(saved_oa);
1941                         if (saved_oa == NULL)
1942                                 GOTO(out, rc = -ENOMEM);
1943                         *saved_oa = *oinfo->oi_oa;
1944                 }
1945
1946                 rc = osc_brw_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
1947                                       pages_per_brw, ppga, oinfo->oi_capa);
1948
1949                 if (rc != 0)
1950                         break;
1951
1952                 page_count -= pages_per_brw;
1953                 ppga += pages_per_brw;
1954         }
1955
1956 out:
1957         osc_release_ppga(orig, page_count_orig);
1958
1959         if (saved_oa != NULL)
1960                 OBDO_FREE(saved_oa);
1961
1962         RETURN(rc);
1963 }
1964
1965 /* The companion to osc_enter_cache(), called when @oap is no longer part of
1966  * the dirty accounting.  Writeback completes or truncate happens before
1967  * writing starts.  Must be called with the loi lock held. */
1968 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1969                            int sent)
1970 {
1971         osc_release_write_grant(cli, &oap->oap_brw_page, sent);
1972 }
1973
1974
1975 /* This maintains the lists of pending pages to read/write for a given object
1976  * (lop).  This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint()
1977  * to quickly find objects that are ready to send an RPC. */
1978 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1979                          int cmd)
1980 {
1981         ENTRY;
1982
1983         if (lop->lop_num_pending == 0)
1984                 RETURN(0);
1985
1986         /* if we have an invalid import we want to drain the queued pages
1987          * by forcing them through rpcs that immediately fail and complete
1988          * the pages.  recovery relies on this to empty the queued pages
1989          * before canceling the locks and evicting down the llite pages */
1990         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1991                 RETURN(1);
1992
1993         /* stream rpcs in queue order as long as as there is an urgent page
1994          * queued.  this is our cheap solution for good batching in the case
1995          * where writepage marks some random page in the middle of the file
1996          * as urgent because of, say, memory pressure */
1997         if (!cfs_list_empty(&lop->lop_urgent)) {
1998                 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
1999                 RETURN(1);
2000         }
2001
2002         if (cmd & OBD_BRW_WRITE) {
2003                 /* trigger a write rpc stream as long as there are dirtiers
2004                  * waiting for space.  as they're waiting, they're not going to
2005                  * create more pages to coalesce with what's waiting.. */
2006                 if (!cfs_list_empty(&cli->cl_cache_waiters)) {
2007                         CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
2008                         RETURN(1);
2009                 }
2010         }
2011         if (lop->lop_num_pending >= cli->cl_max_pages_per_rpc)
2012                 RETURN(1);
2013
2014         RETURN(0);
2015 }
2016
2017 static int lop_makes_hprpc(struct loi_oap_pages *lop)
2018 {
2019         struct osc_async_page *oap;
2020         ENTRY;
2021
2022         if (cfs_list_empty(&lop->lop_urgent))
2023                 RETURN(0);
2024
2025         oap = cfs_list_entry(lop->lop_urgent.next,
2026                          struct osc_async_page, oap_urgent_item);
2027
2028         if (oap->oap_async_flags & ASYNC_HP) {
2029                 CDEBUG(D_CACHE, "hp request forcing RPC\n");
2030                 RETURN(1);
2031         }
2032
2033         RETURN(0);
2034 }
2035
2036 static void on_list(cfs_list_t *item, cfs_list_t *list,
2037                     int should_be_on)
2038 {
2039         if (cfs_list_empty(item) && should_be_on)
2040                 cfs_list_add_tail(item, list);
2041         else if (!cfs_list_empty(item) && !should_be_on)
2042                 cfs_list_del_init(item);
2043 }
2044
2045 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
2046  * can find pages to build into rpcs quickly */
2047 void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
2048 {
2049         if (lop_makes_hprpc(&loi->loi_write_lop) ||
2050             lop_makes_hprpc(&loi->loi_read_lop)) {
2051                 /* HP rpc */
2052                 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0);
2053                 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
2054         } else {
2055                 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
2056                 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
2057                         lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)||
2058                         lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
2059         }
2060
2061         on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
2062                 loi->loi_write_lop.lop_num_pending);
2063
2064         on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
2065                 loi->loi_read_lop.lop_num_pending);
2066 }
2067
2068 static void lop_update_pending(struct client_obd *cli,
2069                                struct loi_oap_pages *lop, int cmd, int delta)
2070 {
2071         lop->lop_num_pending += delta;
2072         if (cmd & OBD_BRW_WRITE)
2073                 cli->cl_pending_w_pages += delta;
2074         else
2075                 cli->cl_pending_r_pages += delta;
2076 }
2077
2078 /**
2079  * this is called when a sync waiter receives an interruption.  Its job is to
2080  * get the caller woken as soon as possible.  If its page hasn't been put in an
2081  * rpc yet it can dequeue immediately.  Otherwise it has to mark the rpc as
2082  * desiring interruption which will forcefully complete the rpc once the rpc
2083  * has timed out.
2084  */
2085 int osc_oap_interrupted(const struct lu_env *env, struct osc_async_page *oap)
2086 {
2087         struct loi_oap_pages *lop;
2088         struct lov_oinfo *loi;
2089         int rc = -EBUSY;
2090         ENTRY;
2091
2092         LASSERT(!oap->oap_interrupted);
2093         oap->oap_interrupted = 1;
2094
2095         /* ok, it's been put in an rpc. only one oap gets a request reference */
2096         if (oap->oap_request != NULL) {
2097                 ptlrpc_mark_interrupted(oap->oap_request);
2098                 ptlrpcd_wake(oap->oap_request);
2099                 ptlrpc_req_finished(oap->oap_request);
2100                 oap->oap_request = NULL;
2101         }
2102
2103         /*
2104          * page completion may be called only if ->cpo_prep() method was
2105          * executed by osc_io_submit(), that also adds page the to pending list
2106          */
2107         if (!cfs_list_empty(&oap->oap_pending_item)) {
2108                 cfs_list_del_init(&oap->oap_pending_item);
2109                 cfs_list_del_init(&oap->oap_urgent_item);
2110
2111                 loi = oap->oap_loi;
2112                 lop = (oap->oap_cmd & OBD_BRW_WRITE) ?
2113                         &loi->loi_write_lop : &loi->loi_read_lop;
2114                 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
2115                 loi_list_maint(oap->oap_cli, oap->oap_loi);
2116                 rc = oap->oap_caller_ops->ap_completion(env,
2117                                           oap->oap_caller_data,
2118                                           oap->oap_cmd, NULL, -EINTR);
2119         }
2120
2121         RETURN(rc);
2122 }
2123
2124 /* this is trying to propogate async writeback errors back up to the
2125  * application.  As an async write fails we record the error code for later if
2126  * the app does an fsync.  As long as errors persist we force future rpcs to be
2127  * sync so that the app can get a sync error and break the cycle of queueing
2128  * pages for which writeback will fail. */
2129 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
2130                            int rc)
2131 {
2132         if (rc) {
2133                 if (!ar->ar_rc)
2134                         ar->ar_rc = rc;
2135
2136                 ar->ar_force_sync = 1;
2137                 ar->ar_min_xid = ptlrpc_sample_next_xid();
2138                 return;
2139
2140         }
2141
2142         if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
2143                 ar->ar_force_sync = 0;
2144 }
2145
2146 void osc_oap_to_pending(struct osc_async_page *oap)
2147 {
2148         struct loi_oap_pages *lop;
2149
2150         if (oap->oap_cmd & OBD_BRW_WRITE)
2151                 lop = &oap->oap_loi->loi_write_lop;
2152         else
2153                 lop = &oap->oap_loi->loi_read_lop;
2154
2155         if (oap->oap_async_flags & ASYNC_HP)
2156                 cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2157         else if (oap->oap_async_flags & ASYNC_URGENT)
2158                 cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
2159         cfs_list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2160         lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
2161 }
2162
2163 /* this must be called holding the loi list lock to give coverage to exit_cache,
2164  * async_flag maintenance, and oap_request */
2165 static void osc_ap_completion(const struct lu_env *env,
2166                               struct client_obd *cli, struct obdo *oa,
2167                               struct osc_async_page *oap, int sent, int rc)
2168 {
2169         __u64 xid = 0;
2170
2171         ENTRY;
2172         if (oap->oap_request != NULL) {
2173                 xid = ptlrpc_req_xid(oap->oap_request);
2174                 ptlrpc_req_finished(oap->oap_request);
2175                 oap->oap_request = NULL;
2176         }
2177
2178         cfs_spin_lock(&oap->oap_lock);
2179         oap->oap_async_flags = 0;
2180         cfs_spin_unlock(&oap->oap_lock);
2181         oap->oap_interrupted = 0;
2182
2183         if (oap->oap_cmd & OBD_BRW_WRITE) {
2184                 osc_process_ar(&cli->cl_ar, xid, rc);
2185                 osc_process_ar(&oap->oap_loi->loi_ar, xid, rc);
2186         }
2187
2188         if (rc == 0 && oa != NULL) {
2189                 if (oa->o_valid & OBD_MD_FLBLOCKS)
2190                         oap->oap_loi->loi_lvb.lvb_blocks = oa->o_blocks;
2191                 if (oa->o_valid & OBD_MD_FLMTIME)
2192                         oap->oap_loi->loi_lvb.lvb_mtime = oa->o_mtime;
2193                 if (oa->o_valid & OBD_MD_FLATIME)
2194                         oap->oap_loi->loi_lvb.lvb_atime = oa->o_atime;
2195                 if (oa->o_valid & OBD_MD_FLCTIME)
2196                         oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime;
2197         }
2198
2199         rc = oap->oap_caller_ops->ap_completion(env, oap->oap_caller_data,
2200                                                 oap->oap_cmd, oa, rc);
2201
2202         /* cl_page_completion() drops PG_locked. so, a new I/O on the page could
2203          * start, but OSC calls it under lock and thus we can add oap back to
2204          * pending safely */
2205         if (rc)
2206                 /* upper layer wants to leave the page on pending queue */
2207                 osc_oap_to_pending(oap);
2208         else
2209                 osc_exit_cache(cli, oap, sent);
2210         EXIT;
2211 }
2212
2213 static int brw_queue_work(const struct lu_env *env, void *data)
2214 {
2215         struct client_obd *cli = data;
2216
2217         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
2218
2219         client_obd_list_lock(&cli->cl_loi_list_lock);
2220         osc_check_rpcs0(env, cli, 1);
2221         client_obd_list_unlock(&cli->cl_loi_list_lock);
2222         RETURN(0);
2223 }
2224
2225 static int brw_interpret(const struct lu_env *env,
2226                          struct ptlrpc_request *req, void *data, int rc)
2227 {
2228         struct osc_brw_async_args *aa = data;
2229         struct client_obd *cli;
2230         int async;
2231         ENTRY;
2232
2233         rc = osc_brw_fini_request(req, rc);
2234         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2235         if (osc_recoverable_error(rc)) {
2236                 rc = osc_brw_redo_request(req, aa);
2237                 if (rc == 0)
2238                         RETURN(0);
2239         }
2240
2241         if (aa->aa_ocapa) {
2242                 capa_put(aa->aa_ocapa);
2243                 aa->aa_ocapa = NULL;
2244         }
2245
2246         cli = aa->aa_cli;
2247         client_obd_list_lock(&cli->cl_loi_list_lock);
2248
2249         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2250          * is called so we know whether to go to sync BRWs or wait for more
2251          * RPCs to complete */
2252         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2253                 cli->cl_w_in_flight--;
2254         else
2255                 cli->cl_r_in_flight--;
2256
2257         async = cfs_list_empty(&aa->aa_oaps);
2258         if (!async) { /* from osc_send_oap_rpc() */
2259                 struct osc_async_page *oap, *tmp;
2260                 /* the caller may re-use the oap after the completion call so
2261                  * we need to clean it up a little */
2262                 cfs_list_for_each_entry_safe(oap, tmp, &aa->aa_oaps,
2263                                              oap_rpc_item) {
2264                         cfs_list_del_init(&oap->oap_rpc_item);
2265                         osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc);
2266                 }
2267                 OBDO_FREE(aa->aa_oa);
2268         } else { /* from async_internal() */
2269                 obd_count i;
2270                 for (i = 0; i < aa->aa_page_count; i++)
2271                         osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
2272         }
2273         osc_wake_cache_waiters(cli);
2274         osc_check_rpcs0(env, cli, 1);
2275         client_obd_list_unlock(&cli->cl_loi_list_lock);
2276
2277         if (!async)
2278                 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
2279                                   req->rq_bulk->bd_nob_transferred);
2280         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2281         ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
2282
2283         RETURN(rc);
2284 }
2285
2286 static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
2287                                             struct client_obd *cli,
2288                                             cfs_list_t *rpc_list,
2289                                             int page_count, int cmd)
2290 {
2291         struct ptlrpc_request *req;
2292         struct brw_page **pga = NULL;
2293         struct osc_brw_async_args *aa;
2294         struct obdo *oa = NULL;
2295         const struct obd_async_page_ops *ops = NULL;
2296         struct osc_async_page *oap;
2297         struct osc_async_page *tmp;
2298         struct cl_req *clerq = NULL;
2299         enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2300         struct ldlm_lock *lock = NULL;
2301         struct cl_req_attr crattr;
2302         int i, rc, mpflag = 0;
2303
2304         ENTRY;
2305         LASSERT(!cfs_list_empty(rpc_list));
2306
2307         if (cmd & OBD_BRW_MEMALLOC)
2308                 mpflag = cfs_memory_pressure_get_and_set();
2309
2310         memset(&crattr, 0, sizeof crattr);
2311         OBD_ALLOC(pga, sizeof(*pga) * page_count);
2312         if (pga == NULL)
2313                 GOTO(out, req = ERR_PTR(-ENOMEM));
2314
2315         OBDO_ALLOC(oa);
2316         if (oa == NULL)
2317                 GOTO(out, req = ERR_PTR(-ENOMEM));
2318
2319         i = 0;
2320         cfs_list_for_each_entry(oap, rpc_list, oap_rpc_item) {
2321                 struct cl_page *page = osc_oap2cl_page(oap);
2322                 if (ops == NULL) {
2323                         ops = oap->oap_caller_ops;
2324
2325                         clerq = cl_req_alloc(env, page, crt,
2326                                              1 /* only 1-object rpcs for
2327                                                 * now */);
2328                         if (IS_ERR(clerq))
2329                                 GOTO(out, req = (void *)clerq);
2330                         lock = oap->oap_ldlm_lock;
2331                 }
2332                 pga[i] = &oap->oap_brw_page;
2333                 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2334                 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
2335                        pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag);
2336                 i++;
2337                 cl_req_page_add(env, clerq, page);
2338         }
2339
2340         /* always get the data for the obdo for the rpc */
2341         LASSERT(ops != NULL);
2342         crattr.cra_oa = oa;
2343         crattr.cra_capa = NULL;
2344         cl_req_attr_set(env, clerq, &crattr, ~0ULL);
2345         if (lock) {
2346                 oa->o_handle = lock->l_remote_handle;
2347                 oa->o_valid |= OBD_MD_FLHANDLE;
2348         }
2349
2350         rc = cl_req_prep(env, clerq);
2351         if (rc != 0) {
2352                 CERROR("cl_req_prep failed: %d\n", rc);
2353                 GOTO(out, req = ERR_PTR(rc));
2354         }
2355
2356         sort_brw_pages(pga, page_count);
2357         rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
2358                                   pga, &req, crattr.cra_capa, 1, 0);
2359         if (rc != 0) {
2360                 CERROR("prep_req failed: %d\n", rc);
2361                 GOTO(out, req = ERR_PTR(rc));
2362         }
2363
2364         if (cmd & OBD_BRW_MEMALLOC)
2365                 req->rq_memalloc = 1;
2366
2367         /* Need to update the timestamps after the request is built in case
2368          * we race with setattr (locally or in queue at OST).  If OST gets
2369          * later setattr before earlier BRW (as determined by the request xid),
2370          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2371          * way to do this in a single call.  bug 10150 */
2372         cl_req_attr_set(env, clerq, &crattr,
2373                         OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
2374
2375         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2376         aa = ptlrpc_req_async_args(req);
2377         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
2378         cfs_list_splice(rpc_list, &aa->aa_oaps);
2379         CFS_INIT_LIST_HEAD(rpc_list);
2380         aa->aa_clerq = clerq;
2381 out:
2382         if (cmd & OBD_BRW_MEMALLOC)
2383                 cfs_memory_pressure_restore(mpflag);
2384
2385         capa_put(crattr.cra_capa);
2386         if (IS_ERR(req)) {
2387                 if (oa)
2388                         OBDO_FREE(oa);
2389                 if (pga)
2390                         OBD_FREE(pga, sizeof(*pga) * page_count);
2391                 /* this should happen rarely and is pretty bad, it makes the
2392                  * pending list not follow the dirty order */
2393                 client_obd_list_lock(&cli->cl_loi_list_lock);
2394                 cfs_list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
2395                         cfs_list_del_init(&oap->oap_rpc_item);
2396
2397                         /* queued sync pages can be torn down while the pages
2398                          * were between the pending list and the rpc */
2399                         if (oap->oap_interrupted) {
2400                                 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
2401                                 osc_ap_completion(env, cli, NULL, oap, 0,
2402                                                   oap->oap_count);
2403                                 continue;
2404                         }
2405                         osc_ap_completion(env, cli, NULL, oap, 0, PTR_ERR(req));
2406                 }
2407                 if (clerq && !IS_ERR(clerq))
2408                         cl_req_completion(env, clerq, PTR_ERR(req));
2409         }
2410         RETURN(req);
2411 }
2412
2413 /**
2414  * prepare pages for ASYNC io and put pages in send queue.
2415  *
2416  * \param cmd OBD_BRW_* macroses
2417  * \param lop pending pages
2418  *
2419  * \return zero if no page added to send queue.
2420  * \return 1 if pages successfully added to send queue.
2421  * \return negative on errors.
2422  */
2423 static int
2424 osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
2425                  struct lov_oinfo *loi, int cmd,
2426                  struct loi_oap_pages *lop, pdl_policy_t pol)
2427 {
2428         struct ptlrpc_request *req;
2429         obd_count page_count = 0;
2430         struct osc_async_page *oap = NULL, *tmp;
2431         struct osc_brw_async_args *aa;
2432         const struct obd_async_page_ops *ops;
2433         CFS_LIST_HEAD(rpc_list);
2434         int srvlock = 0, mem_tight = 0;
2435         struct cl_object *clob = NULL;
2436         obd_off starting_offset = OBD_OBJECT_EOF;
2437         unsigned int ending_offset;
2438         int starting_page_off = 0;
2439         ENTRY;
2440
2441         /* ASYNC_HP pages first. At present, when the lock the pages is
2442          * to be canceled, the pages covered by the lock will be sent out
2443          * with ASYNC_HP. We have to send out them as soon as possible. */
2444         cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
2445                 if (oap->oap_async_flags & ASYNC_HP)
2446                         cfs_list_move(&oap->oap_pending_item, &rpc_list);
2447                 else if (!(oap->oap_brw_flags & OBD_BRW_SYNC))
2448                         /* only do this for writeback pages. */
2449                         cfs_list_move_tail(&oap->oap_pending_item, &rpc_list);
2450                 if (++page_count >= cli->cl_max_pages_per_rpc)
2451                         break;
2452         }
2453         cfs_list_splice_init(&rpc_list, &lop->lop_pending);
2454         page_count = 0;
2455
2456         /* first we find the pages we're allowed to work with */
2457         cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
2458                                      oap_pending_item) {
2459                 ops = oap->oap_caller_ops;
2460
2461                 LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
2462                          "magic 0x%x\n", oap, oap->oap_magic);
2463
2464                 if (clob == NULL) {
2465                         /* pin object in memory, so that completion call-backs
2466                          * can be safely called under client_obd_list lock. */
2467                         clob = osc_oap2cl_page(oap)->cp_obj;
2468                         cl_object_get(clob);
2469                 }
2470
2471                 if (page_count != 0 &&
2472                     srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
2473                         CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
2474                                " oap %p, page %p, srvlock %u\n",
2475                                oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
2476                         break;
2477                 }
2478
2479                 /* If there is a gap at the start of this page, it can't merge
2480                  * with any previous page, so we'll hand the network a
2481                  * "fragmented" page array that it can't transfer in 1 RDMA */
2482                 if (oap->oap_obj_off < starting_offset) {
2483                         if (starting_page_off != 0)
2484                                 break;
2485
2486                         starting_page_off = oap->oap_page_off;
2487                         starting_offset = oap->oap_obj_off + starting_page_off;
2488                 } else if (oap->oap_page_off != 0)
2489                         break;
2490
2491                 /* in llite being 'ready' equates to the page being locked
2492                  * until completion unlocks it.  commit_write submits a page
2493                  * as not ready because its unlock will happen unconditionally
2494                  * as the call returns.  if we race with commit_write giving
2495                  * us that page we don't want to create a hole in the page
2496                  * stream, so we stop and leave the rpc to be fired by
2497                  * another dirtier or kupdated interval (the not ready page
2498                  * will still be on the dirty list).  we could call in
2499                  * at the end of ll_file_write to process the queue again. */
2500                 if (!(oap->oap_async_flags & ASYNC_READY)) {
2501                         int rc = ops->ap_make_ready(env, oap->oap_caller_data,
2502                                                     cmd);
2503                         if (rc < 0)
2504                                 CDEBUG(D_INODE, "oap %p page %p returned %d "
2505                                                 "instead of ready\n", oap,
2506                                                 oap->oap_page, rc);
2507                         switch (rc) {
2508                         case -EAGAIN:
2509                                 /* llite is telling us that the page is still
2510                                  * in commit_write and that we should try
2511                                  * and put it in an rpc again later.  we
2512                                  * break out of the loop so we don't create
2513                                  * a hole in the sequence of pages in the rpc
2514                                  * stream.*/
2515                                 oap = NULL;
2516                                 break;
2517                         case -EINTR:
2518                                 /* the io isn't needed.. tell the checks
2519                                  * below to complete the rpc with EINTR */
2520                                 cfs_spin_lock(&oap->oap_lock);
2521                                 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
2522                                 cfs_spin_unlock(&oap->oap_lock);
2523                                 oap->oap_count = -EINTR;
2524                                 break;
2525                         case 0:
2526                                 cfs_spin_lock(&oap->oap_lock);
2527                                 oap->oap_async_flags |= ASYNC_READY;
2528                                 cfs_spin_unlock(&oap->oap_lock);
2529                                 break;
2530                         default:
2531                                 LASSERTF(0, "oap %p page %p returned %d "
2532                                             "from make_ready\n", oap,
2533                                             oap->oap_page, rc);
2534                                 break;
2535                         }
2536                 }
2537                 if (oap == NULL)
2538                         break;
2539
2540                 /* take the page out of our book-keeping */
2541                 cfs_list_del_init(&oap->oap_pending_item);
2542                 lop_update_pending(cli, lop, cmd, -1);
2543                 cfs_list_del_init(&oap->oap_urgent_item);
2544
2545                 /* ask the caller for the size of the io as the rpc leaves. */
2546                 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
2547                         oap->oap_count =
2548                                 ops->ap_refresh_count(env, oap->oap_caller_data,
2549                                                       cmd);
2550                         LASSERT(oap->oap_page_off + oap->oap_count <= CFS_PAGE_SIZE);
2551                 }
2552                 if (oap->oap_count <= 0) {
2553                         CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
2554                                oap->oap_count);
2555                         osc_ap_completion(env, cli, NULL,
2556                                           oap, 0, oap->oap_count);
2557                         continue;
2558                 }
2559
2560                 /* now put the page back in our accounting */
2561                 cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
2562                 if (page_count++ == 0)
2563                         srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
2564
2565                 if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
2566                         mem_tight = 1;
2567
2568                 /* End on a PTLRPC_MAX_BRW_SIZE boundary.  We want full-sized
2569                  * RPCs aligned on PTLRPC_MAX_BRW_SIZE boundaries to help reads
2570                  * have the same alignment as the initial writes that allocated
2571                  * extents on the server. */
2572                 ending_offset = oap->oap_obj_off + oap->oap_page_off +
2573                                 oap->oap_count;
2574                 if (!(ending_offset & (PTLRPC_MAX_BRW_SIZE - 1)))
2575                         break;
2576
2577                 if (page_count >= cli->cl_max_pages_per_rpc)
2578                         break;
2579
2580                 /* If there is a gap at the end of this page, it can't merge
2581                  * with any subsequent pages, so we'll hand the network a
2582                  * "fragmented" page array that it can't transfer in 1 RDMA */
2583                 if (oap->oap_page_off + oap->oap_count < CFS_PAGE_SIZE)
2584                         break;
2585         }
2586
2587         loi_list_maint(cli, loi);
2588
2589         client_obd_list_unlock(&cli->cl_loi_list_lock);
2590
2591         if (clob != NULL)
2592                 cl_object_put(env, clob);
2593
2594         if (page_count == 0) {
2595                 client_obd_list_lock(&cli->cl_loi_list_lock);
2596                 RETURN(0);
2597         }
2598
2599         req = osc_build_req(env, cli, &rpc_list, page_count,
2600                             mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
2601         if (IS_ERR(req)) {
2602                 LASSERT(cfs_list_empty(&rpc_list));
2603                 loi_list_maint(cli, loi);
2604                 RETURN(PTR_ERR(req));
2605         }
2606
2607         aa = ptlrpc_req_async_args(req);
2608
2609         starting_offset &= PTLRPC_MAX_BRW_SIZE - 1;
2610         if (cmd == OBD_BRW_READ) {
2611                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2612                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2613                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2614                                       (starting_offset >> CFS_PAGE_SHIFT) + 1);
2615         } else {
2616                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2617                 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
2618                                  cli->cl_w_in_flight);
2619                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2620                                       (starting_offset >> CFS_PAGE_SHIFT) + 1);
2621         }
2622
2623         client_obd_list_lock(&cli->cl_loi_list_lock);
2624
2625         if (cmd == OBD_BRW_READ)
2626                 cli->cl_r_in_flight++;
2627         else
2628                 cli->cl_w_in_flight++;
2629
2630         /* queued sync pages can be torn down while the pages
2631          * were between the pending list and the rpc */
2632         tmp = NULL;
2633         cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2634                 /* only one oap gets a request reference */
2635                 if (tmp == NULL)
2636                         tmp = oap;
2637                 if (oap->oap_interrupted && !req->rq_intr) {
2638                         CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2639                                oap, req);
2640                         ptlrpc_mark_interrupted(req);
2641                 }
2642         }
2643         if (tmp != NULL)
2644                 tmp->oap_request = ptlrpc_request_addref(req);
2645
2646         DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2647                   page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
2648
2649         req->rq_interpret_reply = brw_interpret;
2650
2651         /* XXX: Maybe the caller can check the RPC bulk descriptor to see which
2652          *      CPU/NUMA node the majority of pages were allocated on, and try
2653          *      to assign the async RPC to the CPU core (PDL_POLICY_PREFERRED)
2654          *      to reduce cross-CPU memory traffic.
2655          *
2656          *      But on the other hand, we expect that multiple ptlrpcd threads
2657          *      and the initial write sponsor can run in parallel, especially
2658          *      when data checksum is enabled, which is CPU-bound operation and
2659          *      single ptlrpcd thread cannot process in time. So more ptlrpcd
2660          *      threads sharing BRW load (with PDL_POLICY_ROUND) seems better.
2661          */
2662         ptlrpcd_add_req(req, pol, -1);
2663         RETURN(1);
2664 }
2665
2666 #define LOI_DEBUG(LOI, STR, args...)                                     \
2667         CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR,           \
2668                !cfs_list_empty(&(LOI)->loi_ready_item) ||                \
2669                !cfs_list_empty(&(LOI)->loi_hp_ready_item),               \
2670                (LOI)->loi_write_lop.lop_num_pending,                     \
2671                !cfs_list_empty(&(LOI)->loi_write_lop.lop_urgent),        \
2672                (LOI)->loi_read_lop.lop_num_pending,                      \
2673                !cfs_list_empty(&(LOI)->loi_read_lop.lop_urgent),         \
2674                args)                                                     \
2675
2676 /* This is called by osc_check_rpcs() to find which objects have pages that
2677  * we could be sending.  These lists are maintained by lop_makes_rpc(). */
2678 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
2679 {
2680         ENTRY;
2681
2682         /* First return objects that have blocked locks so that they
2683          * will be flushed quickly and other clients can get the lock,
2684          * then objects which have pages ready to be stuffed into RPCs */
2685         if (!cfs_list_empty(&cli->cl_loi_hp_ready_list))
2686                 RETURN(cfs_list_entry(cli->cl_loi_hp_ready_list.next,
2687                                       struct lov_oinfo, loi_hp_ready_item));
2688         if (!cfs_list_empty(&cli->cl_loi_ready_list))
2689                 RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
2690                                       struct lov_oinfo, loi_ready_item));
2691
2692         /* then if we have cache waiters, return all objects with queued
2693          * writes.  This is especially important when many small files
2694          * have filled up the cache and not been fired into rpcs because
2695          * they don't pass the nr_pending/object threshhold */
2696         if (!cfs_list_empty(&cli->cl_cache_waiters) &&
2697             !cfs_list_empty(&cli->cl_loi_write_list))
2698                 RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2699                                       struct lov_oinfo, loi_write_item));
2700
2701         /* then return all queued objects when we have an invalid import
2702          * so that they get flushed */
2703         if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
2704                 if (!cfs_list_empty(&cli->cl_loi_write_list))
2705                         RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2706                                               struct lov_oinfo,
2707                                               loi_write_item));
2708                 if (!cfs_list_empty(&cli->cl_loi_read_list))
2709                         RETURN(cfs_list_entry(cli->cl_loi_read_list.next,
2710                                               struct lov_oinfo, loi_read_item));
2711         }
2712         RETURN(NULL);
2713 }
2714
2715 static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi)
2716 {
2717         struct osc_async_page *oap;
2718         int hprpc = 0;
2719
2720         if (!cfs_list_empty(&loi->loi_write_lop.lop_urgent)) {
2721                 oap = cfs_list_entry(loi->loi_write_lop.lop_urgent.next,
2722                                      struct osc_async_page, oap_urgent_item);
2723                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2724         }
2725
2726         if (!hprpc && !cfs_list_empty(&loi->loi_read_lop.lop_urgent)) {
2727                 oap = cfs_list_entry(loi->loi_read_lop.lop_urgent.next,
2728                                      struct osc_async_page, oap_urgent_item);
2729                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2730         }
2731
2732         return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
2733 }
2734
2735 /* called with the loi list lock held */
2736 static void osc_check_rpcs0(const struct lu_env *env, struct client_obd *cli, int ptlrpc)
2737 {
2738         struct lov_oinfo *loi;
2739         int rc = 0, race_counter = 0;
2740         pdl_policy_t pol;
2741         ENTRY;
2742
2743         pol = ptlrpc ? PDL_POLICY_SAME : PDL_POLICY_ROUND;
2744
2745         while ((loi = osc_next_loi(cli)) != NULL) {
2746                 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
2747
2748                 if (osc_max_rpc_in_flight(cli, loi))
2749                         break;
2750
2751                 /* attempt some read/write balancing by alternating between
2752                  * reads and writes in an object.  The makes_rpc checks here
2753                  * would be redundant if we were getting read/write work items
2754                  * instead of objects.  we don't want send_oap_rpc to drain a
2755                  * partial read pending queue when we're given this object to
2756                  * do io on writes while there are cache waiters */
2757                 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
2758                         rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE,
2759                                               &loi->loi_write_lop, pol);
2760                         if (rc < 0) {
2761                                 CERROR("Write request failed with %d\n", rc);
2762
2763                                 /* osc_send_oap_rpc failed, mostly because of
2764                                  * memory pressure.
2765                                  *
2766                                  * It can't break here, because if:
2767                                  *  - a page was submitted by osc_io_submit, so
2768                                  *    page locked;
2769                                  *  - no request in flight
2770                                  *  - no subsequent request
2771                                  * The system will be in live-lock state,
2772                                  * because there is no chance to call
2773                                  * osc_io_unplug() and osc_check_rpcs() any
2774                                  * more. pdflush can't help in this case,
2775                                  * because it might be blocked at grabbing
2776                                  * the page lock as we mentioned.
2777                                  *
2778                                  * Anyway, continue to drain pages. */
2779                                 /* break; */
2780                         }
2781
2782                         if (rc > 0)
2783                                 race_counter = 0;
2784                         else if (rc == 0)
2785                                 race_counter++;
2786                 }
2787                 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
2788                         rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ,
2789                                               &loi->loi_read_lop, pol);
2790                         if (rc < 0)
2791                                 CERROR("Read request failed with %d\n", rc);
2792
2793                         if (rc > 0)
2794                                 race_counter = 0;
2795                         else if (rc == 0)
2796                                 race_counter++;
2797                 }
2798
2799                 /* attempt some inter-object balancing by issuing rpcs
2800                  * for each object in turn */
2801                 if (!cfs_list_empty(&loi->loi_hp_ready_item))
2802                         cfs_list_del_init(&loi->loi_hp_ready_item);
2803                 if (!cfs_list_empty(&loi->loi_ready_item))
2804                         cfs_list_del_init(&loi->loi_ready_item);
2805                 if (!cfs_list_empty(&loi->loi_write_item))
2806                         cfs_list_del_init(&loi->loi_write_item);
2807                 if (!cfs_list_empty(&loi->loi_read_item))
2808                         cfs_list_del_init(&loi->loi_read_item);
2809
2810                 loi_list_maint(cli, loi);
2811
2812                 /* send_oap_rpc fails with 0 when make_ready tells it to
2813                  * back off.  llite's make_ready does this when it tries
2814                  * to lock a page queued for write that is already locked.
2815                  * we want to try sending rpcs from many objects, but we
2816                  * don't want to spin failing with 0.  */
2817                 if (race_counter == 10)
2818                         break;
2819         }
2820 }
2821
2822 void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2823 {
2824         osc_check_rpcs0(env, cli, 0);
2825 }
2826
2827 /**
2828  * Non-blocking version of osc_enter_cache() that consumes grant only when it
2829  * is available.
2830  */
2831 int osc_enter_cache_try(const struct lu_env *env,
2832                         struct client_obd *cli, struct lov_oinfo *loi,
2833                         struct osc_async_page *oap, int transient)
2834 {
2835         int has_grant;
2836
2837         has_grant = cli->cl_avail_grant >= CFS_PAGE_SIZE;
2838         if (has_grant) {
2839                 osc_consume_write_grant(cli, &oap->oap_brw_page);
2840                 if (transient) {
2841                         cli->cl_dirty_transit += CFS_PAGE_SIZE;
2842                         cfs_atomic_inc(&obd_dirty_transit_pages);
2843                         oap->oap_brw_flags |= OBD_BRW_NOCACHE;
2844                 }
2845         }
2846         return has_grant;
2847 }
2848
2849 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
2850  * grant or cache space. */
2851 static int osc_enter_cache(const struct lu_env *env,
2852                            struct client_obd *cli, struct lov_oinfo *loi,
2853                            struct osc_async_page *oap)
2854 {
2855         struct osc_cache_waiter ocw;
2856         struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
2857         int rc = -EDQUOT;
2858         ENTRY;
2859
2860         CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
2861                "grant: %lu\n", cli->cl_dirty, cfs_atomic_read(&obd_dirty_pages),
2862                cli->cl_dirty_max, obd_max_dirty_pages,
2863                cli->cl_lost_grant, cli->cl_avail_grant);
2864
2865         /* force the caller to try sync io.  this can jump the list
2866          * of queued writes and create a discontiguous rpc stream */
2867         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
2868             cli->cl_dirty_max < CFS_PAGE_SIZE     ||
2869             cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
2870                 RETURN(-EDQUOT);
2871
2872         /* Hopefully normal case - cache space and write credits available */
2873         if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
2874             cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
2875             osc_enter_cache_try(env, cli, loi, oap, 0))
2876                 RETURN(0);
2877
2878         /* We can get here for two reasons: too many dirty pages in cache, or
2879          * run out of grants. In both cases we should write dirty pages out.
2880          * Adding a cache waiter will trigger urgent write-out no matter what
2881          * RPC size will be.
2882          * The exiting condition is no avail grants and no dirty pages caching,
2883          * that really means there is no space on the OST. */
2884         cfs_waitq_init(&ocw.ocw_waitq);
2885         ocw.ocw_oap = oap;
2886         while (cli->cl_dirty > 0) {
2887                 cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
2888                 ocw.ocw_rc = 0;
2889
2890                 loi_list_maint(cli, loi);
2891                 osc_check_rpcs(env, cli);
2892                 client_obd_list_unlock(&cli->cl_loi_list_lock);
2893
2894                 CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
2895                        cli->cl_import->imp_obd->obd_name, &ocw, oap);
2896
2897                 rc = l_wait_event(ocw.ocw_waitq, cfs_list_empty(&ocw.ocw_entry), &lwi);
2898
2899                 client_obd_list_lock(&cli->cl_loi_list_lock);
2900                 cfs_list_del_init(&ocw.ocw_entry);
2901                 if (rc < 0)
2902                         break;
2903
2904                 rc = ocw.ocw_rc;
2905                 if (rc != -EDQUOT)
2906                         break;
2907         }
2908
2909         RETURN(rc);
2910 }
2911
2912
2913 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
2914                         struct lov_oinfo *loi, cfs_page_t *page,
2915                         obd_off offset, const struct obd_async_page_ops *ops,
2916                         void *data, void **res, int nocache,
2917                         struct lustre_handle *lockh)
2918 {
2919         struct osc_async_page *oap;
2920
2921         ENTRY;
2922
2923         if (!page)
2924                 return cfs_size_round(sizeof(*oap));
2925
2926         oap = *res;
2927         oap->oap_magic = OAP_MAGIC;
2928         oap->oap_cli = &exp->exp_obd->u.cli;
2929         oap->oap_loi = loi;
2930
2931         oap->oap_caller_ops = ops;
2932         oap->oap_caller_data = data;
2933
2934         oap->oap_page = page;
2935         oap->oap_obj_off = offset;
2936         if (!client_is_remote(exp) &&
2937             cfs_capable(CFS_CAP_SYS_RESOURCE))
2938                 oap->oap_brw_flags = OBD_BRW_NOQUOTA;
2939
2940         LASSERT(!(offset & ~CFS_PAGE_MASK));
2941
2942         CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
2943         CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
2944         CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
2945         CFS_INIT_LIST_HEAD(&oap->oap_page_list);
2946
2947         cfs_spin_lock_init(&oap->oap_lock);
2948         CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
2949         RETURN(0);
2950 }
2951
2952 int osc_queue_async_io(const struct lu_env *env, struct obd_export *exp,
2953                        struct lov_stripe_md *lsm, struct lov_oinfo *loi,
2954                        struct osc_async_page *oap, int cmd, int off,
2955                        int count, obd_flag brw_flags, enum async_flags async_flags)
2956 {
2957         struct client_obd *cli = &exp->exp_obd->u.cli;
2958         int rc = 0;
2959         ENTRY;
2960
2961         if (oap->oap_magic != OAP_MAGIC)
2962                 RETURN(-EINVAL);
2963
2964         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2965                 RETURN(-EIO);
2966
2967         if (!cfs_list_empty(&oap->oap_pending_item) ||
2968             !cfs_list_empty(&oap->oap_urgent_item) ||
2969             !cfs_list_empty(&oap->oap_rpc_item))
2970                 RETURN(-EBUSY);
2971
2972         /* check if the file's owner/group is over quota */
2973         if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)) {
2974                 struct cl_object *obj;
2975                 struct cl_attr    attr; /* XXX put attr into thread info */
2976                 unsigned int qid[MAXQUOTAS];
2977
2978                 obj = cl_object_top(osc_oap2cl_page(oap)->cp_obj);
2979
2980                 cl_object_attr_lock(obj);
2981                 rc = cl_object_attr_get(env, obj, &attr);
2982                 cl_object_attr_unlock(obj);
2983
2984                 qid[USRQUOTA] = attr.cat_uid;
2985                 qid[GRPQUOTA] = attr.cat_gid;
2986                 if (rc == 0 &&
2987                     osc_quota_chkdq(cli, qid) == NO_QUOTA)
2988                         rc = -EDQUOT;
2989                 if (rc)
2990                         RETURN(rc);
2991         }
2992
2993         if (loi == NULL)
2994                 loi = lsm->lsm_oinfo[0];
2995
2996         client_obd_list_lock(&cli->cl_loi_list_lock);
2997
2998         LASSERT(off + count <= CFS_PAGE_SIZE);
2999         oap->oap_cmd = cmd;
3000         oap->oap_page_off = off;
3001         oap->oap_count = count;
3002         oap->oap_brw_flags = brw_flags;
3003         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
3004         if (cfs_memory_pressure_get())
3005                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
3006         cfs_spin_lock(&oap->oap_lock);
3007         oap->oap_async_flags = async_flags;
3008         cfs_spin_unlock(&oap->oap_lock);
3009
3010         if (cmd & OBD_BRW_WRITE) {
3011                 rc = osc_enter_cache(env, cli, loi, oap);
3012                 if (rc) {
3013                         client_obd_list_unlock(&cli->cl_loi_list_lock);
3014                         RETURN(rc);
3015                 }
3016         }
3017
3018         LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
3019                   cmd);
3020
3021         osc_oap_to_pending(oap);
3022         loi_list_maint(cli, loi);
3023         if (!osc_max_rpc_in_flight(cli, loi) &&
3024             lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
3025                 LASSERT(cli->cl_writeback_work != NULL);
3026                 rc = ptlrpcd_queue_work(cli->cl_writeback_work);
3027
3028                 CDEBUG(D_CACHE, "Queued writeback work for client obd %p/%d.\n",
3029                        cli, rc);
3030         }
3031         client_obd_list_unlock(&cli->cl_loi_list_lock);
3032
3033         RETURN(0);
3034 }
3035
3036 /* aka (~was & now & flag), but this is more clear :) */
3037 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
3038
3039 int osc_set_async_flags_base(struct client_obd *cli,
3040                              struct lov_oinfo *loi, struct osc_async_page *oap,
3041                              obd_flag async_flags)
3042 {
3043         struct loi_oap_pages *lop;
3044         int flags = 0;
3045         ENTRY;
3046
3047         LASSERT(!cfs_list_empty(&oap->oap_pending_item));
3048
3049         if (oap->oap_cmd & OBD_BRW_WRITE) {
3050                 lop = &loi->loi_write_lop;
3051         } else {
3052                 lop = &loi->loi_read_lop;
3053         }
3054
3055         if ((oap->oap_async_flags & async_flags) == async_flags)
3056                 RETURN(0);
3057
3058         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
3059                 flags |= ASYNC_READY;
3060
3061         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
3062             cfs_list_empty(&oap->oap_rpc_item)) {
3063                 if (oap->oap_async_flags & ASYNC_HP)
3064                         cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
3065                 else
3066                         cfs_list_add_tail(&oap->oap_urgent_item,
3067                                           &lop->lop_urgent);
3068                 flags |= ASYNC_URGENT;
3069                 loi_list_maint(cli, loi);
3070         }
3071         cfs_spin_lock(&oap->oap_lock);
3072         oap->oap_async_flags |= flags;
3073         cfs_spin_unlock(&oap->oap_lock);
3074
3075         LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
3076                         oap->oap_async_flags);
3077         RETURN(0);
3078 }
3079
3080 int osc_teardown_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
3081                             struct lov_oinfo *loi, struct osc_async_page *oap)
3082 {
3083         struct client_obd *cli = &exp->exp_obd->u.cli;
3084         struct loi_oap_pages *lop;
3085         int rc = 0;
3086         ENTRY;
3087
3088         if (oap->oap_magic != OAP_MAGIC)
3089                 RETURN(-EINVAL);
3090
3091         if (loi == NULL)
3092                 loi = lsm->lsm_oinfo[0];
3093
3094         if (oap->oap_cmd & OBD_BRW_WRITE) {
3095                 lop = &loi->loi_write_lop;
3096         } else {
3097                 lop = &loi->loi_read_lop;
3098         }
3099
3100         client_obd_list_lock(&cli->cl_loi_list_lock);
3101
3102         if (!cfs_list_empty(&oap->oap_rpc_item))
3103                 GOTO(out, rc = -EBUSY);
3104
3105         osc_exit_cache(cli, oap, 0);
3106         osc_wake_cache_waiters(cli);
3107
3108         if (!cfs_list_empty(&oap->oap_urgent_item)) {
3109                 cfs_list_del_init(&oap->oap_urgent_item);
3110                 cfs_spin_lock(&oap->oap_lock);
3111                 oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
3112                 cfs_spin_unlock(&oap->oap_lock);
3113         }
3114         if (!cfs_list_empty(&oap->oap_pending_item)) {
3115                 cfs_list_del_init(&oap->oap_pending_item);
3116                 lop_update_pending(cli, lop, oap->oap_cmd, -1);
3117         }
3118         loi_list_maint(cli, loi);
3119         LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
3120 out:
3121         client_obd_list_unlock(&cli->cl_loi_list_lock);
3122         RETURN(rc);
3123 }
3124
3125 static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
3126                                         struct ldlm_enqueue_info *einfo)
3127 {
3128         void *data = einfo->ei_cbdata;
3129         int set = 0;
3130
3131         LASSERT(lock != NULL);
3132         LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
3133         LASSERT(lock->l_resource->lr_type == einfo->ei_type);
3134         LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
3135         LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
3136
3137         lock_res_and_lock(lock);
3138         cfs_spin_lock(&osc_ast_guard);
3139
3140         if (lock->l_ast_data == NULL)
3141                 lock->l_ast_data = data;
3142         if (lock->l_ast_data == data)
3143                 set = 1;
3144
3145         cfs_spin_unlock(&osc_ast_guard);
3146         unlock_res_and_lock(lock);
3147
3148         return set;
3149 }
3150
3151 static int osc_set_data_with_check(struct lustre_handle *lockh,
3152                                    struct ldlm_enqueue_info *einfo)
3153 {
3154         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
3155         int set = 0;
3156
3157         if (lock != NULL) {
3158                 set = osc_set_lock_data_with_check(lock, einfo);
3159                 LDLM_LOCK_PUT(lock);
3160         } else
3161                 CERROR("lockh %p, data %p - client evicted?\n",
3162                        lockh, einfo->ei_cbdata);
3163         return set;
3164 }
3165
3166 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3167                              ldlm_iterator_t replace, void *data)
3168 {
3169         struct ldlm_res_id res_id;
3170         struct obd_device *obd = class_exp2obd(exp);
3171
3172         osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3173         ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3174         return 0;
3175 }
3176
3177 /* find any ldlm lock of the inode in osc
3178  * return 0    not find
3179  *        1    find one
3180  *      < 0    error */
3181 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3182                            ldlm_iterator_t replace, void *data)
3183 {
3184         struct ldlm_res_id res_id;
3185         struct obd_device *obd = class_exp2obd(exp);
3186         int rc = 0;
3187
3188         osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3189         rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3190         if (rc == LDLM_ITER_STOP)
3191                 return(1);
3192         if (rc == LDLM_ITER_CONTINUE)
3193                 return(0);
3194         return(rc);
3195 }
3196
3197 static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
3198                             obd_enqueue_update_f upcall, void *cookie,
3199                             int *flags, int agl, int rc)
3200 {
3201         int intent = *flags & LDLM_FL_HAS_INTENT;
3202         ENTRY;
3203
3204         if (intent) {
3205                 /* The request was created before ldlm_cli_enqueue call. */
3206                 if (rc == ELDLM_LOCK_ABORTED) {
3207                         struct ldlm_reply *rep;
3208                         rep = req_capsule_server_get(&req->rq_pill,
3209                                                      &RMF_DLM_REP);
3210
3211                         LASSERT(rep != NULL);
3212                         if (rep->lock_policy_res1)
3213                                 rc = rep->lock_policy_res1;
3214                 }
3215         }
3216
3217         if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
3218             (rc == 0)) {
3219                 *flags |= LDLM_FL_LVB_READY;
3220                 CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
3221                        lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
3222         }
3223
3224         /* Call the update callback. */
3225         rc = (*upcall)(cookie, rc);
3226         RETURN(rc);
3227 }
3228
3229 static int osc_enqueue_interpret(const struct lu_env *env,
3230                                  struct ptlrpc_request *req,
3231                                  struct osc_enqueue_args *aa, int rc)
3232 {
3233         struct ldlm_lock *lock;
3234         struct lustre_handle handle;
3235         __u32 mode;
3236         struct ost_lvb *lvb;
3237         __u32 lvb_len;
3238         int *flags = aa->oa_flags;
3239
3240         /* Make a local copy of a lock handle and a mode, because aa->oa_*
3241          * might be freed anytime after lock upcall has been called. */
3242         lustre_handle_copy(&handle, aa->oa_lockh);
3243         mode = aa->oa_ei->ei_mode;
3244
3245         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
3246          * be valid. */
3247         lock = ldlm_handle2lock(&handle);
3248
3249         /* Take an additional reference so that a blocking AST that
3250          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
3251          * to arrive after an upcall has been executed by
3252          * osc_enqueue_fini(). */
3253         ldlm_lock_addref(&handle, mode);
3254
3255         /* Let CP AST to grant the lock first. */
3256         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
3257
3258         if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
3259                 lvb = NULL;
3260                 lvb_len = 0;
3261         } else {
3262                 lvb = aa->oa_lvb;
3263                 lvb_len = sizeof(*aa->oa_lvb);
3264         }
3265
3266         /* Complete obtaining the lock procedure. */
3267         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
3268                                    mode, flags, lvb, lvb_len, &handle, rc);
3269         /* Complete osc stuff. */
3270         rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
3271                               flags, aa->oa_agl, rc);
3272
3273         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
3274
3275         /* Release the lock for async request. */
3276         if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
3277                 /*
3278                  * Releases a reference taken by ldlm_cli_enqueue(), if it is
3279                  * not already released by
3280                  * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
3281                  */
3282                 ldlm_lock_decref(&handle, mode);
3283
3284         LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
3285                  aa->oa_lockh, req, aa);
3286         ldlm_lock_decref(&handle, mode);
3287         LDLM_LOCK_PUT(lock);
3288         return rc;
3289 }
3290
3291 void osc_update_enqueue(struct lustre_handle *lov_lockhp,
3292                         struct lov_oinfo *loi, int flags,
3293                         struct ost_lvb *lvb, __u32 mode, int rc)
3294 {
3295         struct ldlm_lock *lock = ldlm_handle2lock(lov_lockhp);
3296
3297         if (rc == ELDLM_OK) {
3298                 __u64 tmp;
3299
3300                 LASSERT(lock != NULL);
3301                 loi->loi_lvb = *lvb;
3302                 tmp = loi->loi_lvb.lvb_size;
3303                 /* Extend KMS up to the end of this lock and no further
3304                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
3305                 if (tmp > lock->l_policy_data.l_extent.end)
3306                         tmp = lock->l_policy_data.l_extent.end + 1;
3307                 if (tmp >= loi->loi_kms) {
3308                         LDLM_DEBUG(lock, "lock acquired, setting rss="LPU64
3309                                    ", kms="LPU64, loi->loi_lvb.lvb_size, tmp);
3310                         loi_kms_set(loi, tmp);
3311                 } else {
3312                         LDLM_DEBUG(lock, "lock acquired, setting rss="
3313                                    LPU64"; leaving kms="LPU64", end="LPU64,
3314                                    loi->loi_lvb.lvb_size, loi->loi_kms,
3315                                    lock->l_policy_data.l_extent.end);
3316                 }
3317                 ldlm_lock_allow_match(lock);
3318         } else if (rc == ELDLM_LOCK_ABORTED && (flags & LDLM_FL_HAS_INTENT)) {
3319                 LASSERT(lock != NULL);
3320                 loi->loi_lvb = *lvb;
3321                 ldlm_lock_allow_match(lock);
3322                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
3323                        " kms="LPU64"\n", loi->loi_lvb.lvb_size, loi->loi_kms);
3324                 rc = ELDLM_OK;
3325         }
3326
3327         if (lock != NULL) {
3328                 if (rc != ELDLM_OK)
3329                         ldlm_lock_fail_match(lock);
3330
3331                 LDLM_LOCK_PUT(lock);
3332         }
3333 }
3334 EXPORT_SYMBOL(osc_update_enqueue);
3335
3336 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
3337
3338 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
3339  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
3340  * other synchronous requests, however keeping some locks and trying to obtain
3341  * others may take a considerable amount of time in a case of ost failure; and
3342  * when other sync requests do not get released lock from a client, the client
3343  * is excluded from the cluster -- such scenarious make the life difficult, so
3344  * release locks just after they are obtained. */
3345 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3346                      int *flags, ldlm_policy_data_t *policy,
3347                      struct ost_lvb *lvb, int kms_valid,
3348                      obd_enqueue_update_f upcall, void *cookie,
3349                      struct ldlm_enqueue_info *einfo,
3350                      struct lustre_handle *lockh,
3351                      struct ptlrpc_request_set *rqset, int async, int agl)
3352 {
3353         struct obd_device *obd = exp->exp_obd;
3354         struct ptlrpc_request *req = NULL;
3355         int intent = *flags & LDLM_FL_HAS_INTENT;
3356         int match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
3357         ldlm_mode_t mode;
3358         int rc;
3359         ENTRY;
3360
3361         /* Filesystem lock extents are extended to page boundaries so that
3362          * dealing with the page cache is a little smoother.  */
3363         policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3364         policy->l_extent.end |= ~CFS_PAGE_MASK;
3365
3366         /*
3367          * kms is not valid when either object is completely fresh (so that no
3368          * locks are cached), or object was evicted. In the latter case cached
3369          * lock cannot be used, because it would prime inode state with
3370          * potentially stale LVB.
3371          */
3372         if (!kms_valid)
3373                 goto no_match;
3374
3375         /* Next, search for already existing extent locks that will cover us */
3376         /* If we're trying to read, we also search for an existing PW lock.  The
3377          * VFS and page cache already protect us locally, so lots of readers/
3378          * writers can share a single PW lock.
3379          *
3380          * There are problems with conversion deadlocks, so instead of
3381          * converting a read lock to a write lock, we'll just enqueue a new
3382          * one.
3383          *
3384          * At some point we should cancel the read lock instead of making them
3385          * send us a blocking callback, but there are problems with canceling
3386          * locks out from other users right now, too. */
3387         mode = einfo->ei_mode;
3388         if (einfo->ei_mode == LCK_PR)
3389                 mode |= LCK_PW;
3390         mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
3391                                einfo->ei_type, policy, mode, lockh, 0);
3392         if (mode) {
3393                 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
3394
3395                 if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
3396                         /* For AGL, if enqueue RPC is sent but the lock is not
3397                          * granted, then skip to process this strpe.
3398                          * Return -ECANCELED to tell the caller. */
3399                         ldlm_lock_decref(lockh, mode);
3400                         LDLM_LOCK_PUT(matched);
3401                         RETURN(-ECANCELED);
3402                 } else if (osc_set_lock_data_with_check(matched, einfo)) {
3403                         *flags |= LDLM_FL_LVB_READY;
3404                         /* addref the lock only if not async requests and PW
3405                          * lock is matched whereas we asked for PR. */
3406                         if (!rqset && einfo->ei_mode != mode)
3407                                 ldlm_lock_addref(lockh, LCK_PR);
3408                         if (intent) {
3409                                 /* I would like to be able to ASSERT here that
3410                                  * rss <= kms, but I can't, for reasons which
3411                                  * are explained in lov_enqueue() */
3412                         }
3413
3414                         /* We already have a lock, and it's referenced */
3415                         (*upcall)(cookie, ELDLM_OK);
3416
3417                         if (einfo->ei_mode != mode)
3418                                 ldlm_lock_decref(lockh, LCK_PW);
3419                         else if (rqset)
3420                                 /* For async requests, decref the lock. */
3421                                 ldlm_lock_decref(lockh, einfo->ei_mode);
3422                         LDLM_LOCK_PUT(matched);
3423                         RETURN(ELDLM_OK);
3424                 } else {
3425                         ldlm_lock_decref(lockh, mode);
3426                         LDLM_LOCK_PUT(matched);
3427                 }
3428         }
3429
3430  no_match:
3431         if (intent) {
3432                 CFS_LIST_HEAD(cancels);
3433                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3434                                            &RQF_LDLM_ENQUEUE_LVB);
3435                 if (req == NULL)
3436                         RETURN(-ENOMEM);
3437
3438                 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
3439                 if (rc) {
3440                         ptlrpc_request_free(req);
3441                         RETURN(rc);
3442                 }
3443
3444                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
3445                                      sizeof *lvb);
3446                 ptlrpc_request_set_replen(req);
3447         }
3448
3449         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
3450         *flags &= ~LDLM_FL_BLOCK_GRANTED;
3451
3452         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
3453                               sizeof(*lvb), lockh, async);
3454         if (rqset) {
3455                 if (!rc) {
3456                         struct osc_enqueue_args *aa;
3457                         CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3458                         aa = ptlrpc_req_async_args(req);
3459                         aa->oa_ei = einfo;
3460                         aa->oa_exp = exp;
3461                         aa->oa_flags  = flags;
3462                         aa->oa_upcall = upcall;
3463                         aa->oa_cookie = cookie;
3464                         aa->oa_lvb    = lvb;
3465                         aa->oa_lockh  = lockh;
3466                         aa->oa_agl    = !!agl;
3467
3468                         req->rq_interpret_reply =
3469                                 (ptlrpc_interpterer_t)osc_enqueue_interpret;
3470                         if (rqset == PTLRPCD_SET)
3471                                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
3472                         else
3473                                 ptlrpc_set_add_req(rqset, req);
3474                 } else if (intent) {
3475                         ptlrpc_req_finished(req);
3476                 }
3477                 RETURN(rc);
3478         }
3479
3480         rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
3481         if (intent)
3482                 ptlrpc_req_finished(req);
3483
3484         RETURN(rc);
3485 }
3486
3487 static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
3488                        struct ldlm_enqueue_info *einfo,
3489                        struct ptlrpc_request_set *rqset)
3490 {
3491         struct ldlm_res_id res_id;
3492         int rc;
3493         ENTRY;
3494
3495         osc_build_res_name(oinfo->oi_md->lsm_object_id,
3496                            oinfo->oi_md->lsm_object_seq, &res_id);
3497
3498         rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
3499                               &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
3500                               oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid,
3501                               oinfo->oi_cb_up, oinfo, einfo, oinfo->oi_lockh,
3502                               rqset, rqset != NULL, 0);
3503         RETURN(rc);
3504 }
3505
3506 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3507                    __u32 type, ldlm_policy_data_t *policy, __u32 mode,
3508                    int *flags, void *data, struct lustre_handle *lockh,
3509                    int unref)
3510 {
3511         struct obd_device *obd = exp->exp_obd;
3512         int lflags = *flags;
3513         ldlm_mode_t rc;
3514         ENTRY;
3515
3516         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
3517                 RETURN(-EIO);
3518
3519         /* Filesystem lock extents are extended to page boundaries so that
3520          * dealing with the page cache is a little smoother */
3521         policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3522         policy->l_extent.end |= ~CFS_PAGE_MASK;
3523
3524         /* Next, search for already existing extent locks that will cover us */
3525         /* If we're trying to read, we also search for an existing PW lock.  The
3526          * VFS and page cache already protect us locally, so lots of readers/
3527          * writers can share a single PW lock. */
3528         rc = mode;
3529         if (mode == LCK_PR)
3530                 rc |= LCK_PW;
3531         rc = ldlm_lock_match(obd->obd_namespace, lflags,
3532                              res_id, type, policy, rc, lockh, unref);
3533         if (rc) {
3534                 if (data != NULL) {
3535                         if (!osc_set_data_with_check(lockh, data)) {
3536                                 if (!(lflags & LDLM_FL_TEST_LOCK))
3537                                         ldlm_lock_decref(lockh, rc);
3538                                 RETURN(0);
3539                         }
3540                 }
3541                 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
3542                         ldlm_lock_addref(lockh, LCK_PR);
3543                         ldlm_lock_decref(lockh, LCK_PW);
3544                 }
3545                 RETURN(rc);
3546         }
3547         RETURN(rc);
3548 }
3549
3550 int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
3551 {
3552         ENTRY;
3553
3554         if (unlikely(mode == LCK_GROUP))
3555                 ldlm_lock_decref_and_cancel(lockh, mode);
3556         else
3557                 ldlm_lock_decref(lockh, mode);
3558
3559         RETURN(0);
3560 }
3561
3562 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
3563                       __u32 mode, struct lustre_handle *lockh)
3564 {
3565         ENTRY;
3566         RETURN(osc_cancel_base(lockh, mode));
3567 }
3568
3569 static int osc_cancel_unused(struct obd_export *exp,
3570                              struct lov_stripe_md *lsm,
3571                              ldlm_cancel_flags_t flags,
3572                              void *opaque)
3573 {
3574         struct obd_device *obd = class_exp2obd(exp);
3575         struct ldlm_res_id res_id, *resp = NULL;
3576
3577         if (lsm != NULL) {
3578                 resp = osc_build_res_name(lsm->lsm_object_id,
3579                                           lsm->lsm_object_seq, &res_id);
3580         }
3581
3582         return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
3583 }
3584
3585 static int osc_statfs_interpret(const struct lu_env *env,
3586                                 struct ptlrpc_request *req,
3587                                 struct osc_async_args *aa, int rc)
3588 {
3589         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
3590         struct obd_statfs *msfs;
3591         __u64 used;
3592         ENTRY;
3593
3594         if (rc == -EBADR)
3595                 /* The request has in fact never been sent
3596                  * due to issues at a higher level (LOV).
3597                  * Exit immediately since the caller is
3598                  * aware of the problem and takes care
3599                  * of the clean up */
3600                  RETURN(rc);
3601
3602         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3603             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3604                 GOTO(out, rc = 0);
3605
3606         if (rc != 0)
3607                 GOTO(out, rc);
3608
3609         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3610         if (msfs == NULL) {
3611                 GOTO(out, rc = -EPROTO);
3612         }
3613
3614         /* Reinitialize the RDONLY and DEGRADED flags at the client
3615          * on each statfs, so they don't stay set permanently. */
3616         cfs_spin_lock(&cli->cl_oscc.oscc_lock);
3617
3618         if (unlikely(msfs->os_state & OS_STATE_DEGRADED))
3619                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED;
3620         else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_DEGRADED))
3621                 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_DEGRADED;
3622
3623         if (unlikely(msfs->os_state & OS_STATE_READONLY))
3624                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_RDONLY;
3625         else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_RDONLY))
3626                 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_RDONLY;
3627
3628         /* Add a bit of hysteresis so this flag isn't continually flapping,
3629          * and ensure that new files don't get extremely fragmented due to
3630          * only a small amount of available space in the filesystem.
3631          * We want to set the NOSPC flag when there is less than ~0.1% free
3632          * and clear it when there is at least ~0.2% free space, so:
3633          *                   avail < ~0.1% max          max = avail + used
3634          *            1025 * avail < avail + used       used = blocks - free
3635          *            1024 * avail < used
3636          *            1024 * avail < blocks - free
3637          *                   avail < ((blocks - free) >> 10)
3638          *
3639          * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
3640          * lose that amount of space so in those cases we report no space left
3641          * if their is less than 1 GB left.                             */
3642         used = min_t(__u64,(msfs->os_blocks - msfs->os_bfree) >> 10, 1 << 30);
3643         if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) == 0) &&
3644                      ((msfs->os_ffree < 32) || (msfs->os_bavail < used))))
3645                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC;
3646         else if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3647                           (msfs->os_ffree > 64) &&
3648                           (msfs->os_bavail > (used << 1)))) {
3649                 cli->cl_oscc.oscc_flags &= ~(OSCC_FLAG_NOSPC |
3650                                              OSCC_FLAG_NOSPC_BLK);
3651         }
3652
3653         if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3654                      (msfs->os_bavail < used)))
3655                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC_BLK;
3656
3657         cfs_spin_unlock(&cli->cl_oscc.oscc_lock);
3658
3659         *aa->aa_oi->oi_osfs = *msfs;
3660 out:
3661         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3662         RETURN(rc);
3663 }
3664
3665 static int osc_statfs_async(struct obd_device *obd, struct obd_info *oinfo,
3666                             __u64 max_age, struct ptlrpc_request_set *rqset)
3667 {
3668         struct ptlrpc_request *req;
3669         struct osc_async_args *aa;
3670         int                    rc;
3671         ENTRY;
3672
3673         /* We could possibly pass max_age in the request (as an absolute
3674          * timestamp or a "seconds.usec ago") so the target can avoid doing
3675          * extra calls into the filesystem if that isn't necessary (e.g.
3676          * during mount that would help a bit).  Having relative timestamps
3677          * is not so great if request processing is slow, while absolute
3678          * timestamps are not ideal because they need time synchronization. */
3679         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3680         if (req == NULL)
3681                 RETURN(-ENOMEM);
3682
3683         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3684         if (rc) {
3685                 ptlrpc_request_free(req);
3686                 RETURN(rc);
3687         }
3688         ptlrpc_request_set_replen(req);
3689         req->rq_request_portal = OST_CREATE_PORTAL;
3690         ptlrpc_at_set_req_timeout(req);
3691
3692         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3693                 /* procfs requests not want stat in wait for avoid deadlock */
3694                 req->rq_no_resend = 1;
3695                 req->rq_no_delay = 1;
3696         }
3697
3698         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
3699         CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3700         aa = ptlrpc_req_async_args(req);
3701         aa->aa_oi = oinfo;
3702
3703         ptlrpc_set_add_req(rqset, req);
3704         RETURN(0);
3705 }
3706
3707 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
3708                       __u64 max_age, __u32 flags)
3709 {
3710         struct obd_statfs     *msfs;
3711         struct ptlrpc_request *req;
3712         struct obd_import     *imp = NULL;
3713         int rc;
3714         ENTRY;
3715
3716         /*Since the request might also come from lprocfs, so we need
3717          *sync this with client_disconnect_export Bug15684*/
3718         cfs_down_read(&obd->u.cli.cl_sem);
3719         if (obd->u.cli.cl_import)
3720                 imp = class_import_get(obd->u.cli.cl_import);
3721         cfs_up_read(&obd->u.cli.cl_sem);
3722         if (!imp)
3723                 RETURN(-ENODEV);
3724
3725         /* We could possibly pass max_age in the request (as an absolute
3726          * timestamp or a "seconds.usec ago") so the target can avoid doing
3727          * extra calls into the filesystem if that isn't necessary (e.g.
3728          * during mount that would help a bit).  Having relative timestamps
3729          * is not so great if request processing is slow, while absolute
3730          * timestamps are not ideal because they need time synchronization. */
3731         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3732
3733         class_import_put(imp);
3734
3735         if (req == NULL)
3736                 RETURN(-ENOMEM);
3737
3738         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3739         if (rc) {
3740                 ptlrpc_request_free(req);
3741                 RETURN(rc);
3742         }
3743         ptlrpc_request_set_replen(req);
3744         req->rq_request_portal = OST_CREATE_PORTAL;
3745         ptlrpc_at_set_req_timeout(req);
3746
3747         if (flags & OBD_STATFS_NODELAY) {
3748                 /* procfs requests not want stat in wait for avoid deadlock */
3749                 req->rq_no_resend = 1;
3750                 req->rq_no_delay = 1;
3751         }
3752
3753         rc = ptlrpc_queue_wait(req);
3754         if (rc)
3755                 GOTO(out, rc);
3756
3757         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3758         if (msfs == NULL) {
3759                 GOTO(out, rc = -EPROTO);
3760         }
3761
3762         *osfs = *msfs;
3763
3764         EXIT;
3765  out:
3766         ptlrpc_req_finished(req);
3767         return rc;
3768 }
3769
3770 /* Retrieve object striping information.
3771  *
3772  * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
3773  * the maximum number of OST indices which will fit in the user buffer.
3774  * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
3775  */
3776 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
3777 {
3778         /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
3779         struct lov_user_md_v3 lum, *lumk;
3780         struct lov_user_ost_data_v1 *lmm_objects;
3781         int rc = 0, lum_size;
3782         ENTRY;
3783
3784         if (!lsm)
3785                 RETURN(-ENODATA);
3786
3787         /* we only need the header part from user space to get lmm_magic and
3788          * lmm_stripe_count, (the header part is common to v1 and v3) */
3789         lum_size = sizeof(struct lov_user_md_v1);
3790         if (cfs_copy_from_user(&lum, lump, lum_size))
3791                 RETURN(-EFAULT);
3792
3793         if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
3794             (lum.lmm_magic != LOV_USER_MAGIC_V3))
3795                 RETURN(-EINVAL);
3796
3797         /* lov_user_md_vX and lov_mds_md_vX must have the same size */
3798         LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
3799         LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
3800         LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
3801
3802         /* we can use lov_mds_md_size() to compute lum_size
3803          * because lov_user_md_vX and lov_mds_md_vX have the same size */
3804         if (lum.lmm_stripe_count > 0) {
3805                 lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
3806                 OBD_ALLOC(lumk, lum_size);
3807                 if (!lumk)
3808                         RETURN(-ENOMEM);
3809
3810                 if (lum.lmm_magic == LOV_USER_MAGIC_V1)
3811                         lmm_objects = &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
3812                 else
3813                         lmm_objects = &(lumk->lmm_objects[0]);
3814                 lmm_objects->l_object_id = lsm->lsm_object_id;
3815         } else {
3816                 lum_size = lov_mds_md_size(0, lum.lmm_magic);
3817                 lumk = &lum;
3818         }
3819
3820         lumk->lmm_object_id = lsm->lsm_object_id;
3821         lumk->lmm_object_seq = lsm->lsm_object_seq;
3822         lumk->lmm_stripe_count = 1;
3823
3824         if (cfs_copy_to_user(lump, lumk, lum_size))
3825                 rc = -EFAULT;
3826
3827         if (lumk != &lum)
3828                 OBD_FREE(lumk, lum_size);
3829
3830         RETURN(rc);
3831 }
3832
3833
3834 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3835                          void *karg, void *uarg)
3836 {
3837         struct obd_device *obd = exp->exp_obd;
3838         struct obd_ioctl_data *data = karg;
3839         int err = 0;
3840         ENTRY;
3841
3842         if (!cfs_try_module_get(THIS_MODULE)) {
3843                 CERROR("Can't get module. Is it alive?");
3844                 return -EINVAL;
3845         }
3846         switch (cmd) {
3847         case OBD_IOC_LOV_GET_CONFIG: {
3848                 char *buf;
3849                 struct lov_desc *desc;
3850                 struct obd_uuid uuid;
3851
3852                 buf = NULL;
3853                 len = 0;
3854                 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
3855                         GOTO(out, err = -EINVAL);
3856
3857                 data = (struct obd_ioctl_data *)buf;
3858
3859                 if (sizeof(*desc) > data->ioc_inllen1) {
3860                         obd_ioctl_freedata(buf, len);
3861                         GOTO(out, err = -EINVAL);
3862                 }
3863
3864                 if (data->ioc_inllen2 < sizeof(uuid)) {
3865                         obd_ioctl_freedata(buf, len);
3866                         GOTO(out, err = -EINVAL);
3867                 }
3868
3869                 desc = (struct lov_desc *)data->ioc_inlbuf1;
3870                 desc->ld_tgt_count = 1;
3871                 desc->ld_active_tgt_count = 1;
3872                 desc->ld_default_stripe_count = 1;
3873                 desc->ld_default_stripe_size = 0;
3874                 desc->ld_default_stripe_offset = 0;
3875                 desc->ld_pattern = 0;
3876                 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
3877
3878                 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
3879
3880                 err = cfs_copy_to_user((void *)uarg, buf, len);
3881                 if (err)
3882                         err = -EFAULT;
3883                 obd_ioctl_freedata(buf, len);
3884                 GOTO(out, err);
3885         }
3886         case LL_IOC_LOV_SETSTRIPE:
3887                 err = obd_alloc_memmd(exp, karg);
3888                 if (err > 0)
3889                         err = 0;
3890                 GOTO(out, err);
3891         case LL_IOC_LOV_GETSTRIPE:
3892                 err = osc_getstripe(karg, uarg);
3893                 GOTO(out, err);
3894         case OBD_IOC_CLIENT_RECOVER:
3895                 err = ptlrpc_recover_import(obd->u.cli.cl_import,
3896                                             data->ioc_inlbuf1, 0);
3897                 if (err > 0)
3898                         err = 0;
3899                 GOTO(out, err);
3900         case IOC_OSC_SET_ACTIVE:
3901                 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
3902                                                data->ioc_offset);
3903                 GOTO(out, err);
3904         case OBD_IOC_POLL_QUOTACHECK:
3905                 err = osc_quota_poll_check(exp, (struct if_quotacheck *)karg);
3906                 GOTO(out, err);
3907         case OBD_IOC_PING_TARGET:
3908                 err = ptlrpc_obd_ping(obd);
3909                 GOTO(out, err);
3910         default:
3911                 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
3912                        cmd, cfs_curproc_comm());
3913                 GOTO(out, err = -ENOTTY);
3914         }
3915 out:
3916         cfs_module_put(THIS_MODULE);
3917         return err;
3918 }
3919
3920 static int osc_get_info(struct obd_export *exp, obd_count keylen,
3921                         void *key, __u32 *vallen, void *val,
3922                         struct lov_stripe_md *lsm)
3923 {
3924         ENTRY;
3925         if (!vallen || !val)
3926                 RETURN(-EFAULT);
3927
3928         if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
3929                 __u32 *stripe = val;
3930                 *vallen = sizeof(*stripe);
3931                 *stripe = 0;
3932                 RETURN(0);
3933         } else if (KEY_IS(KEY_LAST_ID)) {
3934                 struct ptlrpc_request *req;
3935                 obd_id                *reply;
3936                 char                  *tmp;
3937                 int                    rc;
3938
3939                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3940                                            &RQF_OST_GET_INFO_LAST_ID);
3941                 if (req == NULL)
3942                         RETURN(-ENOMEM);
3943
3944                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3945                                      RCL_CLIENT, keylen);
3946                 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3947                 if (rc) {
3948                         ptlrpc_request_free(req);
3949                         RETURN(rc);
3950                 }
3951
3952                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3953                 memcpy(tmp, key, keylen);
3954
3955                 req->rq_no_delay = req->rq_no_resend = 1;
3956                 ptlrpc_request_set_replen(req);
3957                 rc = ptlrpc_queue_wait(req);
3958                 if (rc)
3959                         GOTO(out, rc);
3960
3961                 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
3962                 if (reply == NULL)
3963                         GOTO(out, rc = -EPROTO);
3964
3965                 *((obd_id *)val) = *reply;
3966         out:
3967                 ptlrpc_req_finished(req);
3968                 RETURN(rc);
3969         } else if (KEY_IS(KEY_FIEMAP)) {
3970                 struct ptlrpc_request *req;
3971                 struct ll_user_fiemap *reply;
3972                 char *tmp;
3973                 int rc;
3974
3975                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3976                                            &RQF_OST_GET_INFO_FIEMAP);
3977                 if (req == NULL)
3978                         RETURN(-ENOMEM);
3979
3980                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
3981                                      RCL_CLIENT, keylen);
3982                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
3983                                      RCL_CLIENT, *vallen);
3984                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
3985                                      RCL_SERVER, *vallen);
3986
3987                 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3988                 if (rc) {
3989                         ptlrpc_request_free(req);
3990                         RETURN(rc);
3991                 }
3992
3993                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
3994                 memcpy(tmp, key, keylen);
3995                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
3996                 memcpy(tmp, val, *vallen);
3997
3998                 ptlrpc_request_set_replen(req);
3999                 rc = ptlrpc_queue_wait(req);
4000                 if (rc)
4001                         GOTO(out1, rc);
4002
4003                 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
4004                 if (reply == NULL)
4005                         GOTO(out1, rc = -EPROTO);
4006
4007                 memcpy(val, reply, *vallen);
4008         out1:
4009                 ptlrpc_req_finished(req);
4010
4011                 RETURN(rc);
4012         }
4013
4014         RETURN(-EINVAL);
4015 }
4016
4017 static int osc_setinfo_mds_connect_import(struct obd_import *imp)
4018 {
4019         struct llog_ctxt *ctxt;
4020         int rc = 0;
4021         ENTRY;
4022
4023         ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT);
4024         if (ctxt) {
4025                 rc = llog_initiator_connect(ctxt);
4026                 llog_ctxt_put(ctxt);
4027         } else {
4028                 /* XXX return an error? skip setting below flags? */
4029         }
4030
4031         cfs_spin_lock(&imp->imp_lock);
4032         imp->imp_server_timeout = 1;
4033         imp->imp_pingable = 1;
4034         cfs_spin_unlock(&imp->imp_lock);
4035         CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
4036
4037         RETURN(rc);
4038 }
4039
4040 static int osc_setinfo_mds_conn_interpret(const struct lu_env *env,
4041                                           struct ptlrpc_request *req,
4042                                           void *aa, int rc)
4043 {
4044         ENTRY;
4045         if (rc != 0)
4046                 RETURN(rc);
4047
4048         RETURN(osc_setinfo_mds_connect_import(req->rq_import));
4049 }
4050
4051 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
4052                               void *key, obd_count vallen, void *val,
4053                               struct ptlrpc_request_set *set)
4054 {
4055         struct ptlrpc_request *req;
4056         struct obd_device     *obd = exp->exp_obd;
4057         struct obd_import     *imp = class_exp2cliimp(exp);
4058         char                  *tmp;
4059         int                    rc;
4060         ENTRY;
4061
4062         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
4063
4064         if (KEY_IS(KEY_NEXT_ID)) {
4065                 obd_id new_val;
4066                 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4067
4068                 if (vallen != sizeof(obd_id))
4069                         RETURN(-ERANGE);
4070                 if (val == NULL)
4071                         RETURN(-EINVAL);
4072
4073                 if (vallen != sizeof(obd_id))
4074                         RETURN(-EINVAL);
4075
4076                 /* avoid race between allocate new object and set next id
4077                  * from ll_sync thread */
4078                 cfs_spin_lock(&oscc->oscc_lock);
4079                 new_val = *((obd_id*)val) + 1;
4080                 if (new_val > oscc->oscc_next_id)
4081                         oscc->oscc_next_id = new_val;
4082                 cfs_spin_unlock(&oscc->oscc_lock);
4083                 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
4084                        exp->exp_obd->obd_name,
4085                        obd->u.cli.cl_oscc.oscc_next_id);
4086
4087                 RETURN(0);
4088         }
4089
4090         if (KEY_IS(KEY_CHECKSUM)) {
4091                 if (vallen != sizeof(int))
4092                         RETURN(-EINVAL);
4093                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
4094                 RETURN(0);
4095         }
4096
4097         if (KEY_IS(KEY_SPTLRPC_CONF)) {
4098                 sptlrpc_conf_client_adapt(obd);
4099                 RETURN(0);
4100         }
4101
4102         if (KEY_IS(KEY_FLUSH_CTX)) {
4103                 sptlrpc_import_flush_my_ctx(imp);
4104                 RETURN(0);
4105         }
4106
4107         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
4108                 RETURN(-EINVAL);
4109
4110         /* We pass all other commands directly to OST. Since nobody calls osc
4111            methods directly and everybody is supposed to go through LOV, we
4112            assume lov checked invalid values for us.
4113            The only recognised values so far are evict_by_nid and mds_conn.
4114            Even if something bad goes through, we'd get a -EINVAL from OST
4115            anyway. */
4116
4117         if (KEY_IS(KEY_GRANT_SHRINK))
4118                 req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO);
4119         else
4120                 req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
4121
4122         if (req == NULL)
4123                 RETURN(-ENOMEM);
4124
4125         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
4126                              RCL_CLIENT, keylen);
4127         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
4128                              RCL_CLIENT, vallen);
4129         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
4130         if (rc) {
4131                 ptlrpc_request_free(req);
4132                 RETURN(rc);
4133         }
4134
4135         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
4136         memcpy(tmp, key, keylen);
4137         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
4138         memcpy(tmp, val, vallen);
4139
4140         if (KEY_IS(KEY_MDS_CONN)) {
4141                 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4142
4143                 oscc->oscc_oa.o_seq = (*(__u32 *)val);
4144                 oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
4145                 LASSERT_SEQ_IS_MDT(oscc->oscc_oa.o_seq);
4146                 req->rq_no_delay = req->rq_no_resend = 1;
4147                 req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
4148         } else if (KEY_IS(KEY_GRANT_SHRINK)) {
4149                 struct osc_grant_args *aa;
4150                 struct obdo *oa;
4151
4152                 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
4153                 aa = ptlrpc_req_async_args(req);
4154                 OBDO_ALLOC(oa);
4155                 if (!oa) {
4156                         ptlrpc_req_finished(req);
4157                         RETURN(-ENOMEM);
4158                 }
4159                 *oa = ((struct ost_body *)val)->oa;
4160                 aa->aa_oa = oa;
4161                 req->rq_interpret_reply = osc_shrink_grant_interpret;
4162         }
4163
4164         ptlrpc_request_set_replen(req);
4165         if (!KEY_IS(KEY_GRANT_SHRINK)) {
4166                 LASSERT(set != NULL);
4167                 ptlrpc_set_add_req(set, req);
4168                 ptlrpc_check_set(NULL, set);
4169         } else
4170                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
4171
4172         RETURN(0);
4173 }
4174
4175
4176 static struct llog_operations osc_size_repl_logops = {
4177         lop_cancel: llog_obd_repl_cancel
4178 };
4179
4180 static struct llog_operations osc_mds_ost_orig_logops;
4181
4182 static int __osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4183                            struct obd_device *tgt, struct llog_catid *catid)
4184 {
4185         int rc;
4186         ENTRY;
4187
4188         rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, 1,
4189                         &catid->lci_logid, &osc_mds_ost_orig_logops);
4190         if (rc) {
4191                 CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n");
4192                 GOTO(out, rc);
4193         }
4194
4195         rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, 1,
4196                         NULL, &osc_size_repl_logops);
4197         if (rc) {
4198                 struct llog_ctxt *ctxt =
4199                         llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4200                 if (ctxt)
4201                         llog_cleanup(ctxt);
4202                 CERROR("failed LLOG_SIZE_REPL_CTXT\n");
4203         }
4204         GOTO(out, rc);
4205 out:
4206         if (rc) {
4207                 CERROR("osc '%s' tgt '%s' catid %p rc=%d\n",
4208                        obd->obd_name, tgt->obd_name, catid, rc);
4209                 CERROR("logid "LPX64":0x%x\n",
4210                        catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
4211         }
4212         return rc;
4213 }
4214
4215 static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4216                          struct obd_device *disk_obd, int *index)
4217 {
4218         struct llog_catid catid;
4219         static char name[32] = CATLIST;
4220         int rc;
4221         ENTRY;
4222
4223         LASSERT(olg == &obd->obd_olg);
4224
4225         cfs_mutex_lock(&olg->olg_cat_processing);
4226         rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid);
4227         if (rc) {
4228                 CERROR("rc: %d\n", rc);
4229                 GOTO(out, rc);
4230         }
4231
4232         CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
4233                obd->obd_name, *index, catid.lci_logid.lgl_oid,
4234                catid.lci_logid.lgl_oseq, catid.lci_logid.lgl_ogen);
4235
4236         rc = __osc_llog_init(obd, olg, disk_obd, &catid);
4237         if (rc) {
4238                 CERROR("rc: %d\n", rc);
4239                 GOTO(out, rc);
4240         }
4241
4242         rc = llog_put_cat_list(disk_obd, name, *index, 1, &catid);
4243         if (rc) {
4244                 CERROR("rc: %d\n", rc);
4245                 GOTO(out, rc);
4246         }
4247
4248  out:
4249         cfs_mutex_unlock(&olg->olg_cat_processing);
4250
4251         return rc;
4252 }
4253
4254 static int osc_llog_finish(struct obd_device *obd, int count)
4255 {
4256         struct llog_ctxt *ctxt;
4257         int rc = 0, rc2 = 0;
4258         ENTRY;
4259
4260         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4261         if (ctxt)
4262                 rc = llog_cleanup(ctxt);
4263
4264         ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4265         if (ctxt)
4266                 rc2 = llog_cleanup(ctxt);
4267         if (!rc)
4268                 rc = rc2;
4269
4270         RETURN(rc);
4271 }
4272
4273 static int osc_reconnect(const struct lu_env *env,
4274                          struct obd_export *exp, struct obd_device *obd,
4275                          struct obd_uuid *cluuid,
4276                          struct obd_connect_data *data,
4277                          void *localdata)
4278 {
4279         struct client_obd *cli = &obd->u.cli;
4280
4281         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
4282                 long lost_grant;
4283
4284                 client_obd_list_lock(&cli->cl_loi_list_lock);
4285                 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
4286                                 2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
4287                 lost_grant = cli->cl_lost_grant;
4288                 cli->cl_lost_grant = 0;
4289                 client_obd_list_unlock(&cli->cl_loi_list_lock);
4290
4291                 CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld "
4292                        "cl_dirty: %ld cl_lost_grant: %ld\n", data->ocd_grant,
4293                        cli->cl_avail_grant, cli->cl_dirty, lost_grant);
4294                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
4295                        " ocd_grant: %d\n", data->ocd_connect_flags,
4296                        data->ocd_version, data->ocd_grant);
4297         }
4298
4299         RETURN(0);
4300 }
4301
4302 static int osc_disconnect(struct obd_export *exp)
4303 {
4304         struct obd_device *obd = class_exp2obd(exp);
4305         struct llog_ctxt  *ctxt;
4306         int rc;
4307
4308         ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4309         if (ctxt) {
4310                 if (obd->u.cli.cl_conn_count == 1) {
4311                         /* Flush any remaining cancel messages out to the
4312                          * target */
4313                         llog_sync(ctxt, exp);
4314                 }
4315                 llog_ctxt_put(ctxt);
4316         } else {
4317                 CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n",
4318                        obd);
4319         }
4320
4321         rc = client_disconnect_export(exp);
4322         /**
4323          * Initially we put del_shrink_grant before disconnect_export, but it
4324          * causes the following problem if setup (connect) and cleanup
4325          * (disconnect) are tangled together.
4326          *      connect p1                     disconnect p2
4327          *   ptlrpc_connect_import
4328          *     ...............               class_manual_cleanup
4329          *                                     osc_disconnect
4330          *                                     del_shrink_grant
4331          *   ptlrpc_connect_interrupt
4332          *     init_grant_shrink
4333          *   add this client to shrink list
4334          *                                      cleanup_osc
4335          * Bang! pinger trigger the shrink.
4336          * So the osc should be disconnected from the shrink list, after we
4337          * are sure the import has been destroyed. BUG18662
4338          */
4339         if (obd->u.cli.cl_import == NULL)
4340                 osc_del_shrink_grant(&obd->u.cli);
4341         return rc;
4342 }
4343
4344 static int osc_import_event(struct obd_device *obd,
4345                             struct obd_import *imp,
4346                             enum obd_import_event event)
4347 {
4348         struct client_obd *cli;
4349         int rc = 0;
4350
4351         ENTRY;
4352         LASSERT(imp->imp_obd == obd);
4353
4354         switch (event) {
4355         case IMP_EVENT_DISCON: {
4356                 /* Only do this on the MDS OSC's */
4357                 if (imp->imp_server_timeout) {
4358                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4359
4360                         cfs_spin_lock(&oscc->oscc_lock);
4361                         oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
4362                         cfs_spin_unlock(&oscc->oscc_lock);
4363                 }
4364                 cli = &obd->u.cli;
4365                 client_obd_list_lock(&cli->cl_loi_list_lock);
4366                 cli->cl_avail_grant = 0;
4367                 cli->cl_lost_grant = 0;
4368                 client_obd_list_unlock(&cli->cl_loi_list_lock);
4369                 break;
4370         }
4371         case IMP_EVENT_INACTIVE: {
4372                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
4373                 break;
4374         }
4375         case IMP_EVENT_INVALIDATE: {
4376                 struct ldlm_namespace *ns = obd->obd_namespace;
4377                 struct lu_env         *env;
4378                 int                    refcheck;
4379
4380                 env = cl_env_get(&refcheck);
4381                 if (!IS_ERR(env)) {
4382                         /* Reset grants */
4383                         cli = &obd->u.cli;
4384                         client_obd_list_lock(&cli->cl_loi_list_lock);
4385                         /* all pages go to failing rpcs due to the invalid
4386                          * import */
4387                         osc_check_rpcs(env, cli);
4388                         client_obd_list_unlock(&cli->cl_loi_list_lock);
4389
4390                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
4391                         cl_env_put(env, &refcheck);
4392                 } else
4393                         rc = PTR_ERR(env);
4394                 break;
4395         }
4396         case IMP_EVENT_ACTIVE: {
4397                 /* Only do this on the MDS OSC's */
4398                 if (imp->imp_server_timeout) {
4399                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4400
4401                         cfs_spin_lock(&oscc->oscc_lock);
4402                         oscc->oscc_flags &= ~(OSCC_FLAG_NOSPC |
4403                                               OSCC_FLAG_NOSPC_BLK);
4404                         cfs_spin_unlock(&oscc->oscc_lock);
4405                 }
4406                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
4407                 break;
4408         }
4409         case IMP_EVENT_OCD: {
4410                 struct obd_connect_data *ocd = &imp->imp_connect_data;
4411
4412                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
4413                         osc_init_grant(&obd->u.cli, ocd);
4414
4415                 /* See bug 7198 */
4416                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
4417                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
4418
4419                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
4420                 break;
4421         }
4422         case IMP_EVENT_DEACTIVATE: {
4423                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
4424                 break;
4425         }
4426         case IMP_EVENT_ACTIVATE: {
4427                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
4428                 break;
4429         }
4430         default:
4431                 CERROR("Unknown import event %d\n", event);
4432                 LBUG();
4433         }
4434         RETURN(rc);
4435 }
4436
4437 /**
4438  * Determine whether the lock can be canceled before replaying the lock
4439  * during recovery, see bug16774 for detailed information.
4440  *
4441  * \retval zero the lock can't be canceled
4442  * \retval other ok to cancel
4443  */
4444 static int osc_cancel_for_recovery(struct ldlm_lock *lock)
4445 {
4446         check_res_locked(lock->l_resource);
4447
4448         /*
4449          * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
4450          *
4451          * XXX as a future improvement, we can also cancel unused write lock
4452          * if it doesn't have dirty data and active mmaps.
4453          */
4454         if (lock->l_resource->lr_type == LDLM_EXTENT &&
4455             (lock->l_granted_mode == LCK_PR ||
4456              lock->l_granted_mode == LCK_CR) &&
4457             (osc_dlm_lock_pageref(lock) == 0))
4458                 RETURN(1);
4459
4460         RETURN(0);
4461 }
4462
4463 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
4464 {
4465         struct client_obd *cli = &obd->u.cli;
4466         int rc;
4467         ENTRY;
4468
4469         ENTRY;
4470         rc = ptlrpcd_addref();
4471         if (rc)
4472                 RETURN(rc);
4473
4474         rc = client_obd_setup(obd, lcfg);
4475         if (rc == 0) {
4476                 void *handler;
4477                 handler = ptlrpcd_alloc_work(cli->cl_import,
4478                                              brw_queue_work, cli);
4479                 if (!IS_ERR(handler))
4480                         cli->cl_writeback_work = handler;
4481                 else
4482                         rc = PTR_ERR(handler);
4483         }
4484
4485         if (rc == 0) {
4486                 struct lprocfs_static_vars lvars = { 0 };
4487
4488                 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
4489                 lprocfs_osc_init_vars(&lvars);
4490                 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) {
4491                         lproc_osc_attach_seqstat(obd);
4492                         sptlrpc_lprocfs_cliobd_attach(obd);
4493                         ptlrpc_lprocfs_register_obd(obd);
4494                 }
4495
4496                 oscc_init(obd);
4497                 /* We need to allocate a few requests more, because
4498                    brw_interpret tries to create new requests before freeing
4499                    previous ones. Ideally we want to have 2x max_rpcs_in_flight
4500                    reserved, but I afraid that might be too much wasted RAM
4501                    in fact, so 2 is just my guess and still should work. */
4502                 cli->cl_import->imp_rq_pool =
4503                         ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
4504                                             OST_MAXREQSIZE,
4505                                             ptlrpc_add_rqs_to_pool);
4506
4507                 CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
4508
4509                 ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
4510         }
4511
4512         if (rc)
4513                 ptlrpcd_decref();
4514         RETURN(rc);
4515 }
4516
4517 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
4518 {
4519         int rc = 0;
4520         ENTRY;
4521
4522         switch (stage) {
4523         case OBD_CLEANUP_EARLY: {
4524                 struct obd_import *imp;
4525                 imp = obd->u.cli.cl_import;
4526                 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
4527                 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
4528                 ptlrpc_deactivate_import(imp);
4529                 cfs_spin_lock(&imp->imp_lock);
4530                 imp->imp_pingable = 0;
4531                 cfs_spin_unlock(&imp->imp_lock);
4532                 break;
4533         }
4534         case OBD_CLEANUP_EXPORTS: {
4535                 struct client_obd *cli = &obd->u.cli;
4536                 /* LU-464
4537                  * for echo client, export may be on zombie list, wait for
4538                  * zombie thread to cull it, because cli.cl_import will be
4539                  * cleared in client_disconnect_export():
4540                  *   class_export_destroy() -> obd_cleanup() ->
4541                  *   echo_device_free() -> echo_client_cleanup() ->
4542                  *   obd_disconnect() -> osc_disconnect() ->
4543                  *   client_disconnect_export()
4544                  */
4545                 obd_zombie_barrier();
4546                 if (cli->cl_writeback_work) {
4547                         ptlrpcd_destroy_work(cli->cl_writeback_work);
4548                         cli->cl_writeback_work = NULL;
4549                 }
4550                 obd_cleanup_client_import(obd);
4551                 ptlrpc_lprocfs_unregister_obd(obd);
4552                 lprocfs_obd_cleanup(obd);
4553                 rc = obd_llog_finish(obd, 0);
4554                 if (rc != 0)
4555                         CERROR("failed to cleanup llogging subsystems\n");
4556                 break;
4557                 }
4558         }
4559         RETURN(rc);
4560 }
4561
4562 int osc_cleanup(struct obd_device *obd)
4563 {
4564         int rc;
4565
4566         ENTRY;
4567
4568         /* free memory of osc quota cache */
4569         osc_quota_cleanup(obd);
4570
4571         rc = client_obd_cleanup(obd);
4572
4573         ptlrpcd_decref();
4574         RETURN(rc);
4575 }
4576
4577 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
4578 {
4579         struct lprocfs_static_vars lvars = { 0 };
4580         int rc = 0;
4581
4582         lprocfs_osc_init_vars(&lvars);
4583
4584         switch (lcfg->lcfg_command) {
4585         default:
4586                 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
4587                                               lcfg, obd);
4588                 if (rc > 0)
4589                         rc = 0;
4590                 break;
4591         }
4592
4593         return(rc);
4594 }
4595
4596 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
4597 {
4598         return osc_process_config_base(obd, buf);
4599 }
4600
4601 struct obd_ops osc_obd_ops = {
4602         .o_owner                = THIS_MODULE,
4603         .o_setup                = osc_setup,
4604         .o_precleanup           = osc_precleanup,
4605         .o_cleanup              = osc_cleanup,
4606         .o_add_conn             = client_import_add_conn,
4607         .o_del_conn             = client_import_del_conn,
4608         .o_connect              = client_connect_import,
4609         .o_reconnect            = osc_reconnect,
4610         .o_disconnect           = osc_disconnect,
4611         .o_statfs               = osc_statfs,
4612         .o_statfs_async         = osc_statfs_async,
4613         .o_packmd               = osc_packmd,
4614         .o_unpackmd             = osc_unpackmd,
4615         .o_precreate            = osc_precreate,
4616         .o_create               = osc_create,
4617         .o_create_async         = osc_create_async,
4618         .o_destroy              = osc_destroy,
4619         .o_getattr              = osc_getattr,
4620         .o_getattr_async        = osc_getattr_async,
4621         .o_setattr              = osc_setattr,
4622         .o_setattr_async        = osc_setattr_async,
4623         .o_brw                  = osc_brw,
4624         .o_punch                = osc_punch,
4625         .o_sync                 = osc_sync,
4626         .o_enqueue              = osc_enqueue,
4627         .o_change_cbdata        = osc_change_cbdata,
4628         .o_find_cbdata          = osc_find_cbdata,
4629         .o_cancel               = osc_cancel,
4630         .o_cancel_unused        = osc_cancel_unused,
4631         .o_iocontrol            = osc_iocontrol,
4632         .o_get_info             = osc_get_info,
4633         .o_set_info_async       = osc_set_info_async,
4634         .o_import_event         = osc_import_event,
4635         .o_llog_init            = osc_llog_init,
4636         .o_llog_finish          = osc_llog_finish,
4637         .o_process_config       = osc_process_config,
4638         .o_quotactl             = osc_quotactl,
4639         .o_quotacheck           = osc_quotacheck,
4640         .o_quota_adjust_qunit   = osc_quota_adjust_qunit,
4641 };
4642
4643 extern struct lu_kmem_descr osc_caches[];
4644 extern cfs_spinlock_t       osc_ast_guard;
4645 extern cfs_lock_class_key_t osc_ast_guard_class;
4646
4647 int __init osc_init(void)
4648 {
4649         struct lprocfs_static_vars lvars = { 0 };
4650         int rc;
4651         ENTRY;
4652
4653         /* print an address of _any_ initialized kernel symbol from this
4654          * module, to allow debugging with gdb that doesn't support data
4655          * symbols from modules.*/
4656         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
4657
4658         rc = lu_kmem_init(osc_caches);
4659
4660         lprocfs_osc_init_vars(&lvars);
4661
4662         osc_quota_init();
4663         rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
4664                                  LUSTRE_OSC_NAME, &osc_device_type);
4665         if (rc) {
4666                 lu_kmem_fini(osc_caches);
4667                 RETURN(rc);
4668         }
4669
4670         cfs_spin_lock_init(&osc_ast_guard);
4671         cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
4672
4673         osc_mds_ost_orig_logops = llog_lvfs_ops;
4674         osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
4675         osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
4676         osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add;
4677         osc_mds_ost_orig_logops.lop_connect = llog_origin_connect;
4678
4679         RETURN(rc);
4680 }
4681
4682 #ifdef __KERNEL__
4683 static void /*__exit*/ osc_exit(void)
4684 {
4685         lu_device_type_fini(&osc_device_type);
4686
4687         osc_quota_exit();
4688         class_unregister_type(LUSTRE_OSC_NAME);
4689         lu_kmem_fini(osc_caches);
4690 }
4691
4692 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4693 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
4694 MODULE_LICENSE("GPL");
4695
4696 cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);
4697 #endif