Whamcloud - gitweb
LU-15121 llite: skip request slot for lmv_revalidate_slaves()
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #define DEBUG_SUBSYSTEM S_OSC
33
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
44 #include <obd.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
49
50 #include "osc_internal.h"
51 #include <lnet/lnet_rdma.h>
52
53 atomic_t osc_pool_req_count;
54 unsigned int osc_reqpool_maxreqcount;
55 struct ptlrpc_request_pool *osc_rq_pool;
56
57 /* max memory used for request pool, unit is MB */
58 static unsigned int osc_reqpool_mem_max = 5;
59 module_param(osc_reqpool_mem_max, uint, 0444);
60
61 static int osc_idle_timeout = 20;
62 module_param(osc_idle_timeout, uint, 0644);
63
64 #define osc_grant_args osc_brw_async_args
65
66 struct osc_setattr_args {
67         struct obdo             *sa_oa;
68         obd_enqueue_update_f     sa_upcall;
69         void                    *sa_cookie;
70 };
71
72 struct osc_fsync_args {
73         struct osc_object       *fa_obj;
74         struct obdo             *fa_oa;
75         obd_enqueue_update_f    fa_upcall;
76         void                    *fa_cookie;
77 };
78
79 struct osc_ladvise_args {
80         struct obdo             *la_oa;
81         obd_enqueue_update_f     la_upcall;
82         void                    *la_cookie;
83 };
84
85 static void osc_release_ppga(struct brw_page **ppga, size_t count);
86 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
87                          void *data, int rc);
88
89 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
90 {
91         struct ost_body *body;
92
93         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
94         LASSERT(body);
95
96         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
97 }
98
99 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
100                        struct obdo *oa)
101 {
102         struct ptlrpc_request   *req;
103         struct ost_body         *body;
104         int                      rc;
105
106         ENTRY;
107         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
108         if (req == NULL)
109                 RETURN(-ENOMEM);
110
111         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
112         if (rc) {
113                 ptlrpc_request_free(req);
114                 RETURN(rc);
115         }
116
117         osc_pack_req_body(req, oa);
118
119         ptlrpc_request_set_replen(req);
120
121         rc = ptlrpc_queue_wait(req);
122         if (rc)
123                 GOTO(out, rc);
124
125         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
126         if (body == NULL)
127                 GOTO(out, rc = -EPROTO);
128
129         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
130         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
131
132         oa->o_blksize = cli_brw_size(exp->exp_obd);
133         oa->o_valid |= OBD_MD_FLBLKSZ;
134
135         EXIT;
136 out:
137         ptlrpc_req_finished(req);
138
139         return rc;
140 }
141
142 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
143                        struct obdo *oa)
144 {
145         struct ptlrpc_request   *req;
146         struct ost_body         *body;
147         int                      rc;
148
149         ENTRY;
150         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
151
152         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
153         if (req == NULL)
154                 RETURN(-ENOMEM);
155
156         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
157         if (rc) {
158                 ptlrpc_request_free(req);
159                 RETURN(rc);
160         }
161
162         osc_pack_req_body(req, oa);
163
164         ptlrpc_request_set_replen(req);
165
166         rc = ptlrpc_queue_wait(req);
167         if (rc)
168                 GOTO(out, rc);
169
170         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
171         if (body == NULL)
172                 GOTO(out, rc = -EPROTO);
173
174         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
175
176         EXIT;
177 out:
178         ptlrpc_req_finished(req);
179
180         RETURN(rc);
181 }
182
183 static int osc_setattr_interpret(const struct lu_env *env,
184                                  struct ptlrpc_request *req, void *args, int rc)
185 {
186         struct osc_setattr_args *sa = args;
187         struct ost_body *body;
188
189         ENTRY;
190
191         if (rc != 0)
192                 GOTO(out, rc);
193
194         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
195         if (body == NULL)
196                 GOTO(out, rc = -EPROTO);
197
198         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
199                              &body->oa);
200 out:
201         rc = sa->sa_upcall(sa->sa_cookie, rc);
202         RETURN(rc);
203 }
204
205 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
206                       obd_enqueue_update_f upcall, void *cookie,
207                       struct ptlrpc_request_set *rqset)
208 {
209         struct ptlrpc_request   *req;
210         struct osc_setattr_args *sa;
211         int                      rc;
212
213         ENTRY;
214
215         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
216         if (req == NULL)
217                 RETURN(-ENOMEM);
218
219         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
220         if (rc) {
221                 ptlrpc_request_free(req);
222                 RETURN(rc);
223         }
224
225         osc_pack_req_body(req, oa);
226
227         ptlrpc_request_set_replen(req);
228
229         /* do mds to ost setattr asynchronously */
230         if (!rqset) {
231                 /* Do not wait for response. */
232                 ptlrpcd_add_req(req);
233         } else {
234                 req->rq_interpret_reply = osc_setattr_interpret;
235
236                 sa = ptlrpc_req_async_args(sa, req);
237                 sa->sa_oa = oa;
238                 sa->sa_upcall = upcall;
239                 sa->sa_cookie = cookie;
240
241                 ptlrpc_set_add_req(rqset, req);
242         }
243
244         RETURN(0);
245 }
246
247 static int osc_ladvise_interpret(const struct lu_env *env,
248                                  struct ptlrpc_request *req,
249                                  void *arg, int rc)
250 {
251         struct osc_ladvise_args *la = arg;
252         struct ost_body *body;
253         ENTRY;
254
255         if (rc != 0)
256                 GOTO(out, rc);
257
258         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
259         if (body == NULL)
260                 GOTO(out, rc = -EPROTO);
261
262         *la->la_oa = body->oa;
263 out:
264         rc = la->la_upcall(la->la_cookie, rc);
265         RETURN(rc);
266 }
267
268 /**
269  * If rqset is NULL, do not wait for response. Upcall and cookie could also
270  * be NULL in this case
271  */
272 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
273                      struct ladvise_hdr *ladvise_hdr,
274                      obd_enqueue_update_f upcall, void *cookie,
275                      struct ptlrpc_request_set *rqset)
276 {
277         struct ptlrpc_request   *req;
278         struct ost_body         *body;
279         struct osc_ladvise_args *la;
280         int                      rc;
281         struct lu_ladvise       *req_ladvise;
282         struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
283         int                      num_advise = ladvise_hdr->lah_count;
284         struct ladvise_hdr      *req_ladvise_hdr;
285         ENTRY;
286
287         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
288         if (req == NULL)
289                 RETURN(-ENOMEM);
290
291         req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
292                              num_advise * sizeof(*ladvise));
293         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
294         if (rc != 0) {
295                 ptlrpc_request_free(req);
296                 RETURN(rc);
297         }
298         req->rq_request_portal = OST_IO_PORTAL;
299         ptlrpc_at_set_req_timeout(req);
300
301         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
302         LASSERT(body);
303         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
304                              oa);
305
306         req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
307                                                  &RMF_OST_LADVISE_HDR);
308         memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
309
310         req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
311         memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
312         ptlrpc_request_set_replen(req);
313
314         if (rqset == NULL) {
315                 /* Do not wait for response. */
316                 ptlrpcd_add_req(req);
317                 RETURN(0);
318         }
319
320         req->rq_interpret_reply = osc_ladvise_interpret;
321         la = ptlrpc_req_async_args(la, req);
322         la->la_oa = oa;
323         la->la_upcall = upcall;
324         la->la_cookie = cookie;
325
326         ptlrpc_set_add_req(rqset, req);
327
328         RETURN(0);
329 }
330
331 static int osc_create(const struct lu_env *env, struct obd_export *exp,
332                       struct obdo *oa)
333 {
334         struct ptlrpc_request *req;
335         struct ost_body       *body;
336         int                    rc;
337         ENTRY;
338
339         LASSERT(oa != NULL);
340         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
341         LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
342
343         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
344         if (req == NULL)
345                 GOTO(out, rc = -ENOMEM);
346
347         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
348         if (rc) {
349                 ptlrpc_request_free(req);
350                 GOTO(out, rc);
351         }
352
353         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
354         LASSERT(body);
355
356         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
357
358         ptlrpc_request_set_replen(req);
359
360         rc = ptlrpc_queue_wait(req);
361         if (rc)
362                 GOTO(out_req, rc);
363
364         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365         if (body == NULL)
366                 GOTO(out_req, rc = -EPROTO);
367
368         CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
369         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
370
371         oa->o_blksize = cli_brw_size(exp->exp_obd);
372         oa->o_valid |= OBD_MD_FLBLKSZ;
373
374         CDEBUG(D_HA, "transno: %lld\n",
375                lustre_msg_get_transno(req->rq_repmsg));
376 out_req:
377         ptlrpc_req_finished(req);
378 out:
379         RETURN(rc);
380 }
381
382 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
383                    obd_enqueue_update_f upcall, void *cookie)
384 {
385         struct ptlrpc_request *req;
386         struct osc_setattr_args *sa;
387         struct obd_import *imp = class_exp2cliimp(exp);
388         struct ost_body *body;
389         int rc;
390
391         ENTRY;
392
393         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
394         if (req == NULL)
395                 RETURN(-ENOMEM);
396
397         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
398         if (rc < 0) {
399                 ptlrpc_request_free(req);
400                 RETURN(rc);
401         }
402
403         osc_set_io_portal(req);
404
405         ptlrpc_at_set_req_timeout(req);
406
407         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
408
409         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
410
411         ptlrpc_request_set_replen(req);
412
413         req->rq_interpret_reply = osc_setattr_interpret;
414         sa = ptlrpc_req_async_args(sa, req);
415         sa->sa_oa = oa;
416         sa->sa_upcall = upcall;
417         sa->sa_cookie = cookie;
418
419         ptlrpcd_add_req(req);
420
421         RETURN(0);
422 }
423 EXPORT_SYMBOL(osc_punch_send);
424
425 /**
426  * osc_fallocate_base() - Handles fallocate request.
427  *
428  * @exp:        Export structure
429  * @oa:         Attributes passed to OSS from client (obdo structure)
430  * @upcall:     Primary & supplementary group information
431  * @cookie:     Exclusive identifier
432  * @rqset:      Request list.
433  * @mode:       Operation done on given range.
434  *
435  * osc_fallocate_base() - Handles fallocate requests only. Only block
436  * allocation or standard preallocate operation is supported currently.
437  * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
438  * is supported via SETATTR request.
439  *
440  * Return: Non-zero on failure and O on success.
441  */
442 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
443                        obd_enqueue_update_f upcall, void *cookie, int mode)
444 {
445         struct ptlrpc_request *req;
446         struct osc_setattr_args *sa;
447         struct ost_body *body;
448         struct obd_import *imp = class_exp2cliimp(exp);
449         int rc;
450         ENTRY;
451
452         oa->o_falloc_mode = mode;
453         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
454                                    &RQF_OST_FALLOCATE);
455         if (req == NULL)
456                 RETURN(-ENOMEM);
457
458         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
459         if (rc != 0) {
460                 ptlrpc_request_free(req);
461                 RETURN(rc);
462         }
463
464         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
465         LASSERT(body);
466
467         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
468
469         ptlrpc_request_set_replen(req);
470
471         req->rq_interpret_reply = osc_setattr_interpret;
472         BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
473         sa = ptlrpc_req_async_args(sa, req);
474         sa->sa_oa = oa;
475         sa->sa_upcall = upcall;
476         sa->sa_cookie = cookie;
477
478         ptlrpcd_add_req(req);
479
480         RETURN(0);
481 }
482 EXPORT_SYMBOL(osc_fallocate_base);
483
484 static int osc_sync_interpret(const struct lu_env *env,
485                               struct ptlrpc_request *req, void *args, int rc)
486 {
487         struct osc_fsync_args *fa = args;
488         struct ost_body *body;
489         struct cl_attr *attr = &osc_env_info(env)->oti_attr;
490         unsigned long valid = 0;
491         struct cl_object *obj;
492         ENTRY;
493
494         if (rc != 0)
495                 GOTO(out, rc);
496
497         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
498         if (body == NULL) {
499                 CERROR("can't unpack ost_body\n");
500                 GOTO(out, rc = -EPROTO);
501         }
502
503         *fa->fa_oa = body->oa;
504         obj = osc2cl(fa->fa_obj);
505
506         /* Update osc object's blocks attribute */
507         cl_object_attr_lock(obj);
508         if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
509                 attr->cat_blocks = body->oa.o_blocks;
510                 valid |= CAT_BLOCKS;
511         }
512
513         if (valid != 0)
514                 cl_object_attr_update(env, obj, attr, valid);
515         cl_object_attr_unlock(obj);
516
517 out:
518         rc = fa->fa_upcall(fa->fa_cookie, rc);
519         RETURN(rc);
520 }
521
522 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
523                   obd_enqueue_update_f upcall, void *cookie,
524                   struct ptlrpc_request_set *rqset)
525 {
526         struct obd_export     *exp = osc_export(obj);
527         struct ptlrpc_request *req;
528         struct ost_body       *body;
529         struct osc_fsync_args *fa;
530         int                    rc;
531         ENTRY;
532
533         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
534         if (req == NULL)
535                 RETURN(-ENOMEM);
536
537         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
538         if (rc) {
539                 ptlrpc_request_free(req);
540                 RETURN(rc);
541         }
542
543         /* overload the size and blocks fields in the oa with start/end */
544         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
545         LASSERT(body);
546         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
547
548         ptlrpc_request_set_replen(req);
549         req->rq_interpret_reply = osc_sync_interpret;
550
551         fa = ptlrpc_req_async_args(fa, req);
552         fa->fa_obj = obj;
553         fa->fa_oa = oa;
554         fa->fa_upcall = upcall;
555         fa->fa_cookie = cookie;
556
557         ptlrpc_set_add_req(rqset, req);
558
559         RETURN (0);
560 }
561
562 /* Find and cancel locally locks matched by @mode in the resource found by
563  * @objid. Found locks are added into @cancel list. Returns the amount of
564  * locks added to @cancels list. */
565 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
566                                    struct list_head *cancels,
567                                    enum ldlm_mode mode, __u64 lock_flags)
568 {
569         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
570         struct ldlm_res_id res_id;
571         struct ldlm_resource *res;
572         int count;
573         ENTRY;
574
575         /* Return, i.e. cancel nothing, only if ELC is supported (flag in
576          * export) but disabled through procfs (flag in NS).
577          *
578          * This distinguishes from a case when ELC is not supported originally,
579          * when we still want to cancel locks in advance and just cancel them
580          * locally, without sending any RPC. */
581         if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
582                 RETURN(0);
583
584         ostid_build_res_name(&oa->o_oi, &res_id);
585         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
586         if (IS_ERR(res))
587                 RETURN(0);
588
589         LDLM_RESOURCE_ADDREF(res);
590         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
591                                            lock_flags, 0, NULL);
592         LDLM_RESOURCE_DELREF(res);
593         ldlm_resource_putref(res);
594         RETURN(count);
595 }
596
597 static int osc_destroy_interpret(const struct lu_env *env,
598                                  struct ptlrpc_request *req, void *args, int rc)
599 {
600         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
601
602         atomic_dec(&cli->cl_destroy_in_flight);
603         wake_up(&cli->cl_destroy_waitq);
604
605         return 0;
606 }
607
608 static int osc_can_send_destroy(struct client_obd *cli)
609 {
610         if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
611             cli->cl_max_rpcs_in_flight) {
612                 /* The destroy request can be sent */
613                 return 1;
614         }
615         if (atomic_dec_return(&cli->cl_destroy_in_flight) <
616             cli->cl_max_rpcs_in_flight) {
617                 /*
618                  * The counter has been modified between the two atomic
619                  * operations.
620                  */
621                 wake_up(&cli->cl_destroy_waitq);
622         }
623         return 0;
624 }
625
626 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
627                        struct obdo *oa)
628 {
629         struct client_obd     *cli = &exp->exp_obd->u.cli;
630         struct ptlrpc_request *req;
631         struct ost_body       *body;
632         LIST_HEAD(cancels);
633         int rc, count;
634         ENTRY;
635
636         if (!oa) {
637                 CDEBUG(D_INFO, "oa NULL\n");
638                 RETURN(-EINVAL);
639         }
640
641         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
642                                         LDLM_FL_DISCARD_DATA);
643
644         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
645         if (req == NULL) {
646                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
647                 RETURN(-ENOMEM);
648         }
649
650         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
651                                0, &cancels, count);
652         if (rc) {
653                 ptlrpc_request_free(req);
654                 RETURN(rc);
655         }
656
657         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
658         ptlrpc_at_set_req_timeout(req);
659
660         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
661         LASSERT(body);
662         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
663
664         ptlrpc_request_set_replen(req);
665
666         req->rq_interpret_reply = osc_destroy_interpret;
667         if (!osc_can_send_destroy(cli)) {
668                 /*
669                  * Wait until the number of on-going destroy RPCs drops
670                  * under max_rpc_in_flight
671                  */
672                 rc = l_wait_event_abortable_exclusive(
673                         cli->cl_destroy_waitq,
674                         osc_can_send_destroy(cli));
675                 if (rc) {
676                         ptlrpc_req_finished(req);
677                         RETURN(-EINTR);
678                 }
679         }
680
681         /* Do not wait for response */
682         ptlrpcd_add_req(req);
683         RETURN(0);
684 }
685
686 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
687                                 long writing_bytes)
688 {
689         u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
690
691         LASSERT(!(oa->o_valid & bits));
692
693         oa->o_valid |= bits;
694         spin_lock(&cli->cl_loi_list_lock);
695         if (cli->cl_ocd_grant_param)
696                 oa->o_dirty = cli->cl_dirty_grant;
697         else
698                 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
699         if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
700                 CERROR("dirty %lu > dirty_max %lu\n",
701                        cli->cl_dirty_pages,
702                        cli->cl_dirty_max_pages);
703                 oa->o_undirty = 0;
704         } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
705                             (long)(obd_max_dirty_pages + 1))) {
706                 /* The atomic_read() allowing the atomic_inc() are
707                  * not covered by a lock thus they may safely race and trip
708                  * this CERROR() unless we add in a small fudge factor (+1). */
709                 CERROR("%s: dirty %ld > system dirty_max %ld\n",
710                        cli_name(cli), atomic_long_read(&obd_dirty_pages),
711                        obd_max_dirty_pages);
712                 oa->o_undirty = 0;
713         } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
714                             0x7fffffff)) {
715                 CERROR("dirty %lu - dirty_max %lu too big???\n",
716                        cli->cl_dirty_pages, cli->cl_dirty_max_pages);
717                 oa->o_undirty = 0;
718         } else {
719                 unsigned long nrpages;
720                 unsigned long undirty;
721
722                 nrpages = cli->cl_max_pages_per_rpc;
723                 nrpages *= cli->cl_max_rpcs_in_flight + 1;
724                 nrpages = max(nrpages, cli->cl_dirty_max_pages);
725                 undirty = nrpages << PAGE_SHIFT;
726                 if (cli->cl_ocd_grant_param) {
727                         int nrextents;
728
729                         /* take extent tax into account when asking for more
730                          * grant space */
731                         nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
732                                      cli->cl_max_extent_pages;
733                         undirty += nrextents * cli->cl_grant_extent_tax;
734                 }
735                 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
736                  * to add extent tax, etc.
737                  */
738                 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
739                                     ~(PTLRPC_MAX_BRW_SIZE * 4UL));
740         }
741         oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
742         /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
743         if (cli->cl_lost_grant > INT_MAX) {
744                 CDEBUG(D_CACHE,
745                       "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
746                       cli_name(cli), cli->cl_lost_grant);
747                 oa->o_dropped = INT_MAX;
748         } else {
749                 oa->o_dropped = cli->cl_lost_grant;
750         }
751         cli->cl_lost_grant -= oa->o_dropped;
752         spin_unlock(&cli->cl_loi_list_lock);
753         CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
754                " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
755                oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
756 }
757
758 void osc_update_next_shrink(struct client_obd *cli)
759 {
760         cli->cl_next_shrink_grant = ktime_get_seconds() +
761                                     cli->cl_grant_shrink_interval;
762
763         CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
764                cli->cl_next_shrink_grant);
765 }
766 EXPORT_SYMBOL(osc_update_next_shrink);
767
768 static void __osc_update_grant(struct client_obd *cli, u64 grant)
769 {
770         spin_lock(&cli->cl_loi_list_lock);
771         cli->cl_avail_grant += grant;
772         spin_unlock(&cli->cl_loi_list_lock);
773 }
774
775 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
776 {
777         if (body->oa.o_valid & OBD_MD_FLGRANT) {
778                 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
779                 __osc_update_grant(cli, body->oa.o_grant);
780         }
781 }
782
783 /**
784  * grant thread data for shrinking space.
785  */
786 struct grant_thread_data {
787         struct list_head        gtd_clients;
788         struct mutex            gtd_mutex;
789         unsigned long           gtd_stopped:1;
790 };
791 static struct grant_thread_data client_gtd;
792
793 static int osc_shrink_grant_interpret(const struct lu_env *env,
794                                       struct ptlrpc_request *req,
795                                       void *args, int rc)
796 {
797         struct osc_grant_args *aa = args;
798         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
799         struct ost_body *body;
800
801         if (rc != 0) {
802                 __osc_update_grant(cli, aa->aa_oa->o_grant);
803                 GOTO(out, rc);
804         }
805
806         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
807         LASSERT(body);
808         osc_update_grant(cli, body);
809 out:
810         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
811         aa->aa_oa = NULL;
812
813         return rc;
814 }
815
816 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
817 {
818         spin_lock(&cli->cl_loi_list_lock);
819         oa->o_grant = cli->cl_avail_grant / 4;
820         cli->cl_avail_grant -= oa->o_grant;
821         spin_unlock(&cli->cl_loi_list_lock);
822         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
823                 oa->o_valid |= OBD_MD_FLFLAGS;
824                 oa->o_flags = 0;
825         }
826         oa->o_flags |= OBD_FL_SHRINK_GRANT;
827         osc_update_next_shrink(cli);
828 }
829
830 /* Shrink the current grant, either from some large amount to enough for a
831  * full set of in-flight RPCs, or if we have already shrunk to that limit
832  * then to enough for a single RPC.  This avoids keeping more grant than
833  * needed, and avoids shrinking the grant piecemeal. */
834 static int osc_shrink_grant(struct client_obd *cli)
835 {
836         __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
837                              (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
838
839         spin_lock(&cli->cl_loi_list_lock);
840         if (cli->cl_avail_grant <= target_bytes)
841                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
842         spin_unlock(&cli->cl_loi_list_lock);
843
844         return osc_shrink_grant_to_target(cli, target_bytes);
845 }
846
847 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
848 {
849         int                     rc = 0;
850         struct ost_body        *body;
851         ENTRY;
852
853         spin_lock(&cli->cl_loi_list_lock);
854         /* Don't shrink if we are already above or below the desired limit
855          * We don't want to shrink below a single RPC, as that will negatively
856          * impact block allocation and long-term performance. */
857         if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
858                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
859
860         if (target_bytes >= cli->cl_avail_grant) {
861                 spin_unlock(&cli->cl_loi_list_lock);
862                 RETURN(0);
863         }
864         spin_unlock(&cli->cl_loi_list_lock);
865
866         OBD_ALLOC_PTR(body);
867         if (!body)
868                 RETURN(-ENOMEM);
869
870         osc_announce_cached(cli, &body->oa, 0);
871
872         spin_lock(&cli->cl_loi_list_lock);
873         if (target_bytes >= cli->cl_avail_grant) {
874                 /* available grant has changed since target calculation */
875                 spin_unlock(&cli->cl_loi_list_lock);
876                 GOTO(out_free, rc = 0);
877         }
878         body->oa.o_grant = cli->cl_avail_grant - target_bytes;
879         cli->cl_avail_grant = target_bytes;
880         spin_unlock(&cli->cl_loi_list_lock);
881         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
882                 body->oa.o_valid |= OBD_MD_FLFLAGS;
883                 body->oa.o_flags = 0;
884         }
885         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
886         osc_update_next_shrink(cli);
887
888         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
889                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
890                                 sizeof(*body), body, NULL);
891         if (rc != 0)
892                 __osc_update_grant(cli, body->oa.o_grant);
893 out_free:
894         OBD_FREE_PTR(body);
895         RETURN(rc);
896 }
897
898 static int osc_should_shrink_grant(struct client_obd *client)
899 {
900         time64_t next_shrink = client->cl_next_shrink_grant;
901
902         if (client->cl_import == NULL)
903                 return 0;
904
905         if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
906             client->cl_import->imp_grant_shrink_disabled) {
907                 osc_update_next_shrink(client);
908                 return 0;
909         }
910
911         if (ktime_get_seconds() >= next_shrink - 5) {
912                 /* Get the current RPC size directly, instead of going via:
913                  * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
914                  * Keep comment here so that it can be found by searching. */
915                 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
916
917                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
918                     client->cl_avail_grant > brw_size)
919                         return 1;
920                 else
921                         osc_update_next_shrink(client);
922         }
923         return 0;
924 }
925
926 #define GRANT_SHRINK_RPC_BATCH  100
927
928 static struct delayed_work work;
929
930 static void osc_grant_work_handler(struct work_struct *data)
931 {
932         struct client_obd *cli;
933         int rpc_sent;
934         bool init_next_shrink = true;
935         time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
936
937         rpc_sent = 0;
938         mutex_lock(&client_gtd.gtd_mutex);
939         list_for_each_entry(cli, &client_gtd.gtd_clients,
940                             cl_grant_chain) {
941                 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
942                     osc_should_shrink_grant(cli)) {
943                         osc_shrink_grant(cli);
944                         rpc_sent++;
945                 }
946
947                 if (!init_next_shrink) {
948                         if (cli->cl_next_shrink_grant < next_shrink &&
949                             cli->cl_next_shrink_grant > ktime_get_seconds())
950                                 next_shrink = cli->cl_next_shrink_grant;
951                 } else {
952                         init_next_shrink = false;
953                         next_shrink = cli->cl_next_shrink_grant;
954                 }
955         }
956         mutex_unlock(&client_gtd.gtd_mutex);
957
958         if (client_gtd.gtd_stopped == 1)
959                 return;
960
961         if (next_shrink > ktime_get_seconds()) {
962                 time64_t delay = next_shrink - ktime_get_seconds();
963
964                 schedule_delayed_work(&work, cfs_time_seconds(delay));
965         } else {
966                 schedule_work(&work.work);
967         }
968 }
969
970 void osc_schedule_grant_work(void)
971 {
972         cancel_delayed_work_sync(&work);
973         schedule_work(&work.work);
974 }
975 EXPORT_SYMBOL(osc_schedule_grant_work);
976
977 /**
978  * Start grant thread for returing grant to server for idle clients.
979  */
980 static int osc_start_grant_work(void)
981 {
982         client_gtd.gtd_stopped = 0;
983         mutex_init(&client_gtd.gtd_mutex);
984         INIT_LIST_HEAD(&client_gtd.gtd_clients);
985
986         INIT_DELAYED_WORK(&work, osc_grant_work_handler);
987         schedule_work(&work.work);
988
989         return 0;
990 }
991
992 static void osc_stop_grant_work(void)
993 {
994         client_gtd.gtd_stopped = 1;
995         cancel_delayed_work_sync(&work);
996 }
997
998 static void osc_add_grant_list(struct client_obd *client)
999 {
1000         mutex_lock(&client_gtd.gtd_mutex);
1001         list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
1002         mutex_unlock(&client_gtd.gtd_mutex);
1003 }
1004
1005 static void osc_del_grant_list(struct client_obd *client)
1006 {
1007         if (list_empty(&client->cl_grant_chain))
1008                 return;
1009
1010         mutex_lock(&client_gtd.gtd_mutex);
1011         list_del_init(&client->cl_grant_chain);
1012         mutex_unlock(&client_gtd.gtd_mutex);
1013 }
1014
1015 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1016 {
1017         /*
1018          * ocd_grant is the total grant amount we're expect to hold: if we've
1019          * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1020          * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1021          * dirty.
1022          *
1023          * race is tolerable here: if we're evicted, but imp_state already
1024          * left EVICTED state, then cl_dirty_pages must be 0 already.
1025          */
1026         spin_lock(&cli->cl_loi_list_lock);
1027         cli->cl_avail_grant = ocd->ocd_grant;
1028         if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1029                 unsigned long consumed = cli->cl_reserved_grant;
1030
1031                 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1032                         consumed += cli->cl_dirty_grant;
1033                 else
1034                         consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1035                 if (cli->cl_avail_grant < consumed) {
1036                         CERROR("%s: granted %ld but already consumed %ld\n",
1037                                cli_name(cli), cli->cl_avail_grant, consumed);
1038                         cli->cl_avail_grant = 0;
1039                 } else {
1040                         cli->cl_avail_grant -= consumed;
1041                 }
1042         }
1043
1044         if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1045                 u64 size;
1046                 int chunk_mask;
1047
1048                 /* overhead for each extent insertion */
1049                 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1050                 /* determine the appropriate chunk size used by osc_extent. */
1051                 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1052                                           ocd->ocd_grant_blkbits);
1053                 /* max_pages_per_rpc must be chunk aligned */
1054                 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1055                 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1056                                              ~chunk_mask) & chunk_mask;
1057                 /* determine maximum extent size, in #pages */
1058                 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1059                 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1060                 cli->cl_ocd_grant_param = 1;
1061         } else {
1062                 cli->cl_ocd_grant_param = 0;
1063                 cli->cl_grant_extent_tax = 0;
1064                 cli->cl_chunkbits = PAGE_SHIFT;
1065                 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1066         }
1067         spin_unlock(&cli->cl_loi_list_lock);
1068
1069         CDEBUG(D_CACHE,
1070                "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1071                cli_name(cli),
1072                cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1073                cli->cl_max_extent_pages);
1074
1075         if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1076                 osc_add_grant_list(cli);
1077 }
1078 EXPORT_SYMBOL(osc_init_grant);
1079
1080 /* We assume that the reason this OSC got a short read is because it read
1081  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1082  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1083  * this stripe never got written at or beyond this stripe offset yet. */
1084 static void handle_short_read(int nob_read, size_t page_count,
1085                               struct brw_page **pga)
1086 {
1087         char *ptr;
1088         int i = 0;
1089
1090         /* skip bytes read OK */
1091         while (nob_read > 0) {
1092                 LASSERT (page_count > 0);
1093
1094                 if (pga[i]->count > nob_read) {
1095                         /* EOF inside this page */
1096                         ptr = kmap(pga[i]->pg) +
1097                                 (pga[i]->off & ~PAGE_MASK);
1098                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1099                         kunmap(pga[i]->pg);
1100                         page_count--;
1101                         i++;
1102                         break;
1103                 }
1104
1105                 nob_read -= pga[i]->count;
1106                 page_count--;
1107                 i++;
1108         }
1109
1110         /* zero remaining pages */
1111         while (page_count-- > 0) {
1112                 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1113                 memset(ptr, 0, pga[i]->count);
1114                 kunmap(pga[i]->pg);
1115                 i++;
1116         }
1117 }
1118
1119 static int check_write_rcs(struct ptlrpc_request *req,
1120                            int requested_nob, int niocount,
1121                            size_t page_count, struct brw_page **pga)
1122 {
1123         int     i;
1124         __u32   *remote_rcs;
1125
1126         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1127                                                   sizeof(*remote_rcs) *
1128                                                   niocount);
1129         if (remote_rcs == NULL) {
1130                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1131                 return(-EPROTO);
1132         }
1133
1134         /* return error if any niobuf was in error */
1135         for (i = 0; i < niocount; i++) {
1136                 if ((int)remote_rcs[i] < 0) {
1137                         CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1138                                i, remote_rcs[i], req);
1139                         return remote_rcs[i];
1140                 }
1141
1142                 if (remote_rcs[i] != 0) {
1143                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1144                                 i, remote_rcs[i], req);
1145                         return(-EPROTO);
1146                 }
1147         }
1148         if (req->rq_bulk != NULL &&
1149             req->rq_bulk->bd_nob_transferred != requested_nob) {
1150                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1151                        req->rq_bulk->bd_nob_transferred, requested_nob);
1152                 return(-EPROTO);
1153         }
1154
1155         return (0);
1156 }
1157
1158 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1159 {
1160         if (p1->flag != p2->flag) {
1161                 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1162                                   OBD_BRW_SYNC       | OBD_BRW_ASYNC   |
1163                                   OBD_BRW_NOQUOTA    | OBD_BRW_SOFT_SYNC |
1164                                   OBD_BRW_SYS_RESOURCE);
1165
1166                 /* warn if we try to combine flags that we don't know to be
1167                  * safe to combine */
1168                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1169                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1170                               "report this at https://jira.whamcloud.com/\n",
1171                               p1->flag, p2->flag);
1172                 }
1173                 return 0;
1174         }
1175
1176         return (p1->off + p1->count == p2->off);
1177 }
1178
1179 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1180 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1181                                    size_t pg_count, struct brw_page **pga,
1182                                    int opc, obd_dif_csum_fn *fn,
1183                                    int sector_size,
1184                                    u32 *check_sum, bool resend)
1185 {
1186         struct ahash_request *req;
1187         /* Used Adler as the default checksum type on top of DIF tags */
1188         unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1189         struct page *__page;
1190         unsigned char *buffer;
1191         __u16 *guard_start;
1192         unsigned int bufsize;
1193         int guard_number;
1194         int used_number = 0;
1195         int used;
1196         u32 cksum;
1197         int rc = 0;
1198         int i = 0;
1199
1200         LASSERT(pg_count > 0);
1201
1202         __page = alloc_page(GFP_KERNEL);
1203         if (__page == NULL)
1204                 return -ENOMEM;
1205
1206         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1207         if (IS_ERR(req)) {
1208                 rc = PTR_ERR(req);
1209                 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1210                        obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1211                 GOTO(out, rc);
1212         }
1213
1214         buffer = kmap(__page);
1215         guard_start = (__u16 *)buffer;
1216         guard_number = PAGE_SIZE / sizeof(*guard_start);
1217         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1218                "GRD tags per page=%u, resend=%u, bytes=%u, pages=%zu\n",
1219                guard_number, resend, nob, pg_count);
1220
1221         while (nob > 0 && pg_count > 0) {
1222                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1223
1224                 /* corrupt the data before we compute the checksum, to
1225                  * simulate an OST->client data error */
1226                 if (unlikely(i == 0 && opc == OST_READ &&
1227                              OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1228                         unsigned char *ptr = kmap(pga[i]->pg);
1229                         int off = pga[i]->off & ~PAGE_MASK;
1230
1231                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1232                         kunmap(pga[i]->pg);
1233                 }
1234
1235                 /*
1236                  * The left guard number should be able to hold checksums of a
1237                  * whole page
1238                  */
1239                 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1240                                                   pga[i]->off & ~PAGE_MASK,
1241                                                   count,
1242                                                   guard_start + used_number,
1243                                                   guard_number - used_number,
1244                                                   &used, sector_size,
1245                                                   fn);
1246                 if (unlikely(resend))
1247                         CDEBUG(D_PAGE | D_HA,
1248                                "pga[%u]: used %u off %llu+%u gen checksum: %*phN\n",
1249                                i, used, pga[i]->off & ~PAGE_MASK, count,
1250                                (int)(used * sizeof(*guard_start)),
1251                                guard_start + used_number);
1252                 if (rc)
1253                         break;
1254
1255                 used_number += used;
1256                 if (used_number == guard_number) {
1257                         cfs_crypto_hash_update_page(req, __page, 0,
1258                                 used_number * sizeof(*guard_start));
1259                         used_number = 0;
1260                 }
1261
1262                 nob -= pga[i]->count;
1263                 pg_count--;
1264                 i++;
1265         }
1266         kunmap(__page);
1267         if (rc)
1268                 GOTO(out, rc);
1269
1270         if (used_number != 0)
1271                 cfs_crypto_hash_update_page(req, __page, 0,
1272                         used_number * sizeof(*guard_start));
1273
1274         bufsize = sizeof(cksum);
1275         cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1276
1277         /* For sending we only compute the wrong checksum instead
1278          * of corrupting the data so it is still correct on a redo */
1279         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1280                 cksum++;
1281
1282         *check_sum = cksum;
1283 out:
1284         __free_page(__page);
1285         return rc;
1286 }
1287 #else /* !CONFIG_CRC_T10DIF */
1288 #define obd_dif_ip_fn NULL
1289 #define obd_dif_crc_fn NULL
1290 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum, re) \
1291         -EOPNOTSUPP
1292 #endif /* CONFIG_CRC_T10DIF */
1293
1294 static int osc_checksum_bulk(int nob, size_t pg_count,
1295                              struct brw_page **pga, int opc,
1296                              enum cksum_types cksum_type,
1297                              u32 *cksum)
1298 {
1299         int                             i = 0;
1300         struct ahash_request           *req;
1301         unsigned int                    bufsize;
1302         unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
1303
1304         LASSERT(pg_count > 0);
1305
1306         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1307         if (IS_ERR(req)) {
1308                 CERROR("Unable to initialize checksum hash %s\n",
1309                        cfs_crypto_hash_name(cfs_alg));
1310                 return PTR_ERR(req);
1311         }
1312
1313         while (nob > 0 && pg_count > 0) {
1314                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1315
1316                 /* corrupt the data before we compute the checksum, to
1317                  * simulate an OST->client data error */
1318                 if (i == 0 && opc == OST_READ &&
1319                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1320                         unsigned char *ptr = kmap(pga[i]->pg);
1321                         int off = pga[i]->off & ~PAGE_MASK;
1322
1323                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1324                         kunmap(pga[i]->pg);
1325                 }
1326                 cfs_crypto_hash_update_page(req, pga[i]->pg,
1327                                             pga[i]->off & ~PAGE_MASK,
1328                                             count);
1329                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1330                                (int)(pga[i]->off & ~PAGE_MASK));
1331
1332                 nob -= pga[i]->count;
1333                 pg_count--;
1334                 i++;
1335         }
1336
1337         bufsize = sizeof(*cksum);
1338         cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1339
1340         /* For sending we only compute the wrong checksum instead
1341          * of corrupting the data so it is still correct on a redo */
1342         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1343                 (*cksum)++;
1344
1345         return 0;
1346 }
1347
1348 static int osc_checksum_bulk_rw(const char *obd_name,
1349                                 enum cksum_types cksum_type,
1350                                 int nob, size_t pg_count,
1351                                 struct brw_page **pga, int opc,
1352                                 u32 *check_sum, bool resend)
1353 {
1354         obd_dif_csum_fn *fn = NULL;
1355         int sector_size = 0;
1356         int rc;
1357
1358         ENTRY;
1359         obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
1360
1361         if (fn)
1362                 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1363                                              opc, fn, sector_size, check_sum,
1364                                              resend);
1365         else
1366                 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1367                                        check_sum);
1368
1369         RETURN(rc);
1370 }
1371
1372 static inline void osc_release_bounce_pages(struct brw_page **pga,
1373                                             u32 page_count)
1374 {
1375 #ifdef HAVE_LUSTRE_CRYPTO
1376         int i;
1377
1378         for (i = 0; i < page_count; i++) {
1379                 /* Bounce pages allocated by a call to
1380                  * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1381                  * are identified thanks to the PageChecked flag.
1382                  */
1383                 if (PageChecked(pga[i]->pg))
1384                         llcrypt_finalize_bounce_page(&pga[i]->pg);
1385                 pga[i]->count -= pga[i]->bp_count_diff;
1386                 pga[i]->off += pga[i]->bp_off_diff;
1387         }
1388 #endif
1389 }
1390
1391 static int
1392 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1393                      u32 page_count, struct brw_page **pga,
1394                      struct ptlrpc_request **reqp, int resend)
1395 {
1396         struct ptlrpc_request *req;
1397         struct ptlrpc_bulk_desc *desc;
1398         struct ost_body *body;
1399         struct obd_ioobj *ioobj;
1400         struct niobuf_remote *niobuf;
1401         int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1402         struct osc_brw_async_args *aa;
1403         struct req_capsule *pill;
1404         struct brw_page *pg_prev;
1405         void *short_io_buf;
1406         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1407         struct inode *inode = NULL;
1408         bool directio = false;
1409         bool enable_checksum = true;
1410
1411         ENTRY;
1412         if (pga[0]->pg) {
1413                 inode = page2inode(pga[0]->pg);
1414                 if (inode == NULL) {
1415                         /* Try to get reference to inode from cl_page if we are
1416                          * dealing with direct IO, as handled pages are not
1417                          * actual page cache pages.
1418                          */
1419                         struct osc_async_page *oap = brw_page2oap(pga[0]);
1420                         struct cl_page *clpage = oap2cl_page(oap);
1421
1422                         inode = clpage->cp_inode;
1423                         if (inode)
1424                                 directio = true;
1425                 }
1426         }
1427         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1428                 RETURN(-ENOMEM); /* Recoverable */
1429         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1430                 RETURN(-EINVAL); /* Fatal */
1431
1432         if ((cmd & OBD_BRW_WRITE) != 0) {
1433                 opc = OST_WRITE;
1434                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1435                                                 osc_rq_pool,
1436                                                 &RQF_OST_BRW_WRITE);
1437         } else {
1438                 opc = OST_READ;
1439                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1440         }
1441         if (req == NULL)
1442                 RETURN(-ENOMEM);
1443
1444         if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1445                 for (i = 0; i < page_count; i++) {
1446                         struct brw_page *pg = pga[i];
1447                         struct page *data_page = NULL;
1448                         bool retried = false;
1449                         bool lockedbymyself;
1450                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1451                         struct address_space *map_orig = NULL;
1452                         pgoff_t index_orig;
1453
1454 retry_encrypt:
1455                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1456                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1457                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1458                         /* The page can already be locked when we arrive here.
1459                          * This is possible when cl_page_assume/vvp_page_assume
1460                          * is stuck on wait_on_page_writeback with page lock
1461                          * held. In this case there is no risk for the lock to
1462                          * be released while we are doing our encryption
1463                          * processing, because writeback against that page will
1464                          * end in vvp_page_completion_write/cl_page_completion,
1465                          * which means only once the page is fully processed.
1466                          */
1467                         lockedbymyself = trylock_page(pg->pg);
1468                         if (directio) {
1469                                 map_orig = pg->pg->mapping;
1470                                 pg->pg->mapping = inode->i_mapping;
1471                                 index_orig = pg->pg->index;
1472                                 pg->pg->index = pg->off >> PAGE_SHIFT;
1473                         }
1474                         data_page =
1475                                 llcrypt_encrypt_pagecache_blocks(pg->pg,
1476                                                                  nunits, 0,
1477                                                                  GFP_NOFS);
1478                         if (directio) {
1479                                 pg->pg->mapping = map_orig;
1480                                 pg->pg->index = index_orig;
1481                         }
1482                         if (lockedbymyself)
1483                                 unlock_page(pg->pg);
1484                         if (IS_ERR(data_page)) {
1485                                 rc = PTR_ERR(data_page);
1486                                 if (rc == -ENOMEM && !retried) {
1487                                         retried = true;
1488                                         rc = 0;
1489                                         goto retry_encrypt;
1490                                 }
1491                                 ptlrpc_request_free(req);
1492                                 RETURN(rc);
1493                         }
1494                         /* Set PageChecked flag on bounce page for
1495                          * disambiguation in osc_release_bounce_pages().
1496                          */
1497                         SetPageChecked(data_page);
1498                         pg->pg = data_page;
1499                         /* there should be no gap in the middle of page array */
1500                         if (i == page_count - 1) {
1501                                 struct osc_async_page *oap = brw_page2oap(pg);
1502
1503                                 oa->o_size = oap->oap_count +
1504                                         oap->oap_obj_off + oap->oap_page_off;
1505                         }
1506                         /* len is forced to nunits, and relative offset to 0
1507                          * so store the old, clear text info
1508                          */
1509                         pg->bp_count_diff = nunits - pg->count;
1510                         pg->count = nunits;
1511                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1512                         pg->off = pg->off & PAGE_MASK;
1513                 }
1514         } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
1515                 for (i = 0; i < page_count; i++) {
1516                         struct brw_page *pg = pga[i];
1517                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1518
1519                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1520                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1521                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1522                         /* count/off are forced to cover the whole encryption
1523                          * unit size so that all encrypted data is stored on the
1524                          * OST, so adjust bp_{count,off}_diff for the size of
1525                          * the clear text.
1526                          */
1527                         pg->bp_count_diff = nunits - pg->count;
1528                         pg->count = nunits;
1529                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1530                         pg->off = pg->off & PAGE_MASK;
1531                 }
1532         }
1533
1534         for (niocount = i = 1; i < page_count; i++) {
1535                 if (!can_merge_pages(pga[i - 1], pga[i]))
1536                         niocount++;
1537         }
1538
1539         pill = &req->rq_pill;
1540         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1541                              sizeof(*ioobj));
1542         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1543                              niocount * sizeof(*niobuf));
1544
1545         for (i = 0; i < page_count; i++) {
1546                 short_io_size += pga[i]->count;
1547                 if (!inode || !IS_ENCRYPTED(inode)) {
1548                         pga[i]->bp_count_diff = 0;
1549                         pga[i]->bp_off_diff = 0;
1550                 }
1551         }
1552
1553         if (lnet_is_rdma_only_page(pga[0]->pg)) {
1554                 enable_checksum = false;
1555                 short_io_size = 0;
1556         }
1557
1558         /* Check if read/write is small enough to be a short io. */
1559         if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1560             !imp_connect_shortio(cli->cl_import))
1561                 short_io_size = 0;
1562
1563         /* If this is an empty RPC to old server, just ignore it */
1564         if (!short_io_size && !pga[0]->pg) {
1565                 ptlrpc_request_free(req);
1566                 RETURN(-ENODATA);
1567         }
1568
1569         req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1570                              opc == OST_READ ? 0 : short_io_size);
1571         if (opc == OST_READ)
1572                 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1573                                      short_io_size);
1574
1575         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1576         if (rc) {
1577                 ptlrpc_request_free(req);
1578                 RETURN(rc);
1579         }
1580         osc_set_io_portal(req);
1581
1582         ptlrpc_at_set_req_timeout(req);
1583         /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1584          * retry logic */
1585         req->rq_no_retry_einprogress = 1;
1586
1587         if (short_io_size != 0) {
1588                 desc = NULL;
1589                 short_io_buf = NULL;
1590                 goto no_bulk;
1591         }
1592
1593         desc = ptlrpc_prep_bulk_imp(req, page_count,
1594                 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1595                 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1596                         PTLRPC_BULK_PUT_SINK),
1597                 OST_BULK_PORTAL,
1598                 &ptlrpc_bulk_kiov_pin_ops);
1599
1600         if (desc == NULL)
1601                 GOTO(out, rc = -ENOMEM);
1602         /* NB request now owns desc and will free it when it gets freed */
1603 no_bulk:
1604         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1605         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1606         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1607         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1608
1609         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1610
1611         /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1612          * and from_kgid(), because they are asynchronous. Fortunately, variable
1613          * oa contains valid o_uid and o_gid in these two operations.
1614          * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1615          * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1616          * other process logic */
1617         body->oa.o_uid = oa->o_uid;
1618         body->oa.o_gid = oa->o_gid;
1619
1620         obdo_to_ioobj(oa, ioobj);
1621         ioobj->ioo_bufcnt = niocount;
1622         /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1623          * that might be send for this request.  The actual number is decided
1624          * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1625          * "max - 1" for old client compatibility sending "0", and also so the
1626          * the actual maximum is a power-of-two number, not one less. LU-1431 */
1627         if (desc != NULL)
1628                 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1629         else /* short io */
1630                 ioobj_max_brw_set(ioobj, 0);
1631
1632         if (short_io_size != 0) {
1633                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1634                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1635                         body->oa.o_flags = 0;
1636                 }
1637                 body->oa.o_flags |= OBD_FL_SHORT_IO;
1638                 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1639                        short_io_size);
1640                 if (opc == OST_WRITE) {
1641                         short_io_buf = req_capsule_client_get(pill,
1642                                                               &RMF_SHORT_IO);
1643                         LASSERT(short_io_buf != NULL);
1644                 }
1645         }
1646
1647         LASSERT(page_count > 0);
1648         pg_prev = pga[0];
1649         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1650                 struct brw_page *pg = pga[i];
1651                 int poff = pg->off & ~PAGE_MASK;
1652
1653                 LASSERT(pg->count > 0);
1654                 /* make sure there is no gap in the middle of page array */
1655                 LASSERTF(page_count == 1 ||
1656                          (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1657                           ergo(i > 0 && i < page_count - 1,
1658                                poff == 0 && pg->count == PAGE_SIZE)   &&
1659                           ergo(i == page_count - 1, poff == 0)),
1660                          "i: %d/%d pg: %p off: %llu, count: %u\n",
1661                          i, page_count, pg, pg->off, pg->count);
1662                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1663                          "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1664                          " prev_pg %p [pri %lu ind %lu] off %llu\n",
1665                          i, page_count,
1666                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1667                          pg_prev->pg, page_private(pg_prev->pg),
1668                          pg_prev->pg->index, pg_prev->off);
1669                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1670                         (pg->flag & OBD_BRW_SRVLOCK));
1671                 if (short_io_size != 0 && opc == OST_WRITE) {
1672                         unsigned char *ptr = kmap_atomic(pg->pg);
1673
1674                         LASSERT(short_io_size >= requested_nob + pg->count);
1675                         memcpy(short_io_buf + requested_nob,
1676                                ptr + poff,
1677                                pg->count);
1678                         kunmap_atomic(ptr);
1679                 } else if (short_io_size == 0) {
1680                         desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1681                                                          pg->count);
1682                 }
1683                 requested_nob += pg->count;
1684
1685                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1686                         niobuf--;
1687                         niobuf->rnb_len += pg->count;
1688                 } else {
1689                         niobuf->rnb_offset = pg->off;
1690                         niobuf->rnb_len    = pg->count;
1691                         niobuf->rnb_flags  = pg->flag;
1692                 }
1693                 pg_prev = pg;
1694         }
1695
1696         LASSERTF((void *)(niobuf - niocount) ==
1697                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1698                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1699                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1700
1701         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1702         if (resend) {
1703                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1704                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1705                         body->oa.o_flags = 0;
1706                 }
1707                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1708         }
1709
1710         if (osc_should_shrink_grant(cli))
1711                 osc_shrink_grant_local(cli, &body->oa);
1712
1713         if (!cli->cl_checksum || sptlrpc_flavor_has_bulk(&req->rq_flvr))
1714                 enable_checksum = false;
1715
1716         /* size[REQ_REC_OFF] still sizeof (*body) */
1717         if (opc == OST_WRITE) {
1718                 if (enable_checksum) {
1719                         /* store cl_cksum_type in a local variable since
1720                          * it can be changed via lprocfs */
1721                         enum cksum_types cksum_type = cli->cl_cksum_type;
1722
1723                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1724                                 body->oa.o_flags = 0;
1725
1726                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1727                                                                 cksum_type);
1728                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1729
1730                         rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1731                                                   requested_nob, page_count,
1732                                                   pga, OST_WRITE,
1733                                                   &body->oa.o_cksum, resend);
1734                         if (rc < 0) {
1735                                 CDEBUG(D_PAGE, "failed to checksum: rc = %d\n",
1736                                        rc);
1737                                 GOTO(out, rc);
1738                         }
1739                         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1740                                "checksum at write origin: %x (%x)\n",
1741                                body->oa.o_cksum, cksum_type);
1742
1743                         /* save this in 'oa', too, for later checking */
1744                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1745                         oa->o_flags |= obd_cksum_type_pack(obd_name,
1746                                                            cksum_type);
1747                 } else {
1748                         /* clear out the checksum flag, in case this is a
1749                          * resend but cl_checksum is no longer set. b=11238 */
1750                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1751                 }
1752                 oa->o_cksum = body->oa.o_cksum;
1753                 /* 1 RC per niobuf */
1754                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1755                                      sizeof(__u32) * niocount);
1756         } else {
1757                 if (enable_checksum) {
1758                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1759                                 body->oa.o_flags = 0;
1760                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1761                                 cli->cl_cksum_type);
1762                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1763                 }
1764
1765                 /* Client cksum has been already copied to wire obdo in previous
1766                  * lustre_set_wire_obdo(), and in the case a bulk-read is being
1767                  * resent due to cksum error, this will allow Server to
1768                  * check+dump pages on its side */
1769         }
1770         ptlrpc_request_set_replen(req);
1771
1772         aa = ptlrpc_req_async_args(aa, req);
1773         aa->aa_oa = oa;
1774         aa->aa_requested_nob = requested_nob;
1775         aa->aa_nio_count = niocount;
1776         aa->aa_page_count = page_count;
1777         aa->aa_resends = 0;
1778         aa->aa_ppga = pga;
1779         aa->aa_cli = cli;
1780         INIT_LIST_HEAD(&aa->aa_oaps);
1781
1782         *reqp = req;
1783         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1784         CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1785                 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1786                 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1787         RETURN(0);
1788
1789  out:
1790         ptlrpc_req_finished(req);
1791         RETURN(rc);
1792 }
1793
1794 char dbgcksum_file_name[PATH_MAX];
1795
1796 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1797                                 struct brw_page **pga, __u32 server_cksum,
1798                                 __u32 client_cksum)
1799 {
1800         struct file *filp;
1801         int rc, i;
1802         unsigned int len;
1803         char *buf;
1804
1805         /* will only keep dump of pages on first error for the same range in
1806          * file/fid, not during the resends/retries. */
1807         snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1808                  "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1809                  (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1810                   libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1811                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1812                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1813                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1814                  pga[0]->off,
1815                  pga[page_count-1]->off + pga[page_count-1]->count - 1,
1816                  client_cksum, server_cksum);
1817         CWARN("dumping checksum data to %s\n", dbgcksum_file_name);
1818         filp = filp_open(dbgcksum_file_name,
1819                          O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1820         if (IS_ERR(filp)) {
1821                 rc = PTR_ERR(filp);
1822                 if (rc == -EEXIST)
1823                         CDEBUG(D_INFO, "%s: can't open to dump pages with "
1824                                "checksum error: rc = %d\n", dbgcksum_file_name,
1825                                rc);
1826                 else
1827                         CERROR("%s: can't open to dump pages with checksum "
1828                                "error: rc = %d\n", dbgcksum_file_name, rc);
1829                 return;
1830         }
1831
1832         for (i = 0; i < page_count; i++) {
1833                 len = pga[i]->count;
1834                 buf = kmap(pga[i]->pg);
1835                 while (len != 0) {
1836                         rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1837                         if (rc < 0) {
1838                                 CERROR("%s: wanted to write %u but got %d "
1839                                        "error\n", dbgcksum_file_name, len, rc);
1840                                 break;
1841                         }
1842                         len -= rc;
1843                         buf += rc;
1844                 }
1845                 kunmap(pga[i]->pg);
1846         }
1847
1848         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1849         if (rc)
1850                 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1851         filp_close(filp, NULL);
1852
1853         libcfs_debug_dumplog();
1854 }
1855
1856 static int
1857 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1858                      __u32 client_cksum, __u32 server_cksum,
1859                      struct osc_brw_async_args *aa)
1860 {
1861         const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1862         enum cksum_types cksum_type;
1863         obd_dif_csum_fn *fn = NULL;
1864         int sector_size = 0;
1865         __u32 new_cksum;
1866         char *msg;
1867         int rc;
1868
1869         if (server_cksum == client_cksum) {
1870                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1871                 return 0;
1872         }
1873
1874         if (aa->aa_cli->cl_checksum_dump)
1875                 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1876                                     server_cksum, client_cksum);
1877
1878         cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1879                                            oa->o_flags : 0);
1880
1881         switch (cksum_type) {
1882         case OBD_CKSUM_T10IP512:
1883                 fn = obd_dif_ip_fn;
1884                 sector_size = 512;
1885                 break;
1886         case OBD_CKSUM_T10IP4K:
1887                 fn = obd_dif_ip_fn;
1888                 sector_size = 4096;
1889                 break;
1890         case OBD_CKSUM_T10CRC512:
1891                 fn = obd_dif_crc_fn;
1892                 sector_size = 512;
1893                 break;
1894         case OBD_CKSUM_T10CRC4K:
1895                 fn = obd_dif_crc_fn;
1896                 sector_size = 4096;
1897                 break;
1898         default:
1899                 break;
1900         }
1901
1902         if (fn)
1903                 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1904                                              aa->aa_page_count, aa->aa_ppga,
1905                                              OST_WRITE, fn, sector_size,
1906                                              &new_cksum, true);
1907         else
1908                 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1909                                        aa->aa_ppga, OST_WRITE, cksum_type,
1910                                        &new_cksum);
1911
1912         if (rc < 0)
1913                 msg = "failed to calculate the client write checksum";
1914         else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1915                 msg = "the server did not use the checksum type specified in "
1916                       "the original request - likely a protocol problem";
1917         else if (new_cksum == server_cksum)
1918                 msg = "changed on the client after we checksummed it - "
1919                       "likely false positive due to mmap IO (bug 11742)";
1920         else if (new_cksum == client_cksum)
1921                 msg = "changed in transit before arrival at OST";
1922         else
1923                 msg = "changed in transit AND doesn't match the original - "
1924                       "likely false positive due to mmap IO (bug 11742)";
1925
1926         LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1927                            DFID " object "DOSTID" extent [%llu-%llu], original "
1928                            "client csum %x (type %x), server csum %x (type %x),"
1929                            " client csum now %x\n",
1930                            obd_name, msg, libcfs_nid2str(peer->nid),
1931                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1932                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1933                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1934                            POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1935                            aa->aa_ppga[aa->aa_page_count - 1]->off +
1936                                 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1937                            client_cksum,
1938                            obd_cksum_type_unpack(aa->aa_oa->o_flags),
1939                            server_cksum, cksum_type, new_cksum);
1940         return 1;
1941 }
1942
1943 /* Note rc enters this function as number of bytes transferred */
1944 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1945 {
1946         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1947         struct client_obd *cli = aa->aa_cli;
1948         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1949         const struct lnet_process_id *peer =
1950                 &req->rq_import->imp_connection->c_peer;
1951         struct ost_body *body;
1952         u32 client_cksum = 0;
1953         struct inode *inode;
1954         unsigned int blockbits = 0, blocksize = 0;
1955
1956         ENTRY;
1957
1958         if (rc < 0 && rc != -EDQUOT) {
1959                 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1960                 RETURN(rc);
1961         }
1962
1963         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1964         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1965         if (body == NULL) {
1966                 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1967                 RETURN(-EPROTO);
1968         }
1969
1970         /* set/clear over quota flag for a uid/gid/projid */
1971         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1972             body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1973                 unsigned qid[LL_MAXQUOTAS] = {
1974                                          body->oa.o_uid, body->oa.o_gid,
1975                                          body->oa.o_projid };
1976                 CDEBUG(D_QUOTA,
1977                        "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1978                        body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1979                        body->oa.o_valid, body->oa.o_flags);
1980                 osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1981                                 body->oa.o_flags);
1982         }
1983
1984         osc_update_grant(cli, body);
1985
1986         if (rc < 0)
1987                 RETURN(rc);
1988
1989         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1990                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1991
1992         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1993                 if (rc > 0) {
1994                         CERROR("%s: unexpected positive size %d\n",
1995                                obd_name, rc);
1996                         RETURN(-EPROTO);
1997                 }
1998
1999                 if (req->rq_bulk != NULL &&
2000                     sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
2001                         RETURN(-EAGAIN);
2002
2003                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
2004                     check_write_checksum(&body->oa, peer, client_cksum,
2005                                          body->oa.o_cksum, aa))
2006                         RETURN(-EAGAIN);
2007
2008                 rc = check_write_rcs(req, aa->aa_requested_nob,
2009                                      aa->aa_nio_count, aa->aa_page_count,
2010                                      aa->aa_ppga);
2011                 GOTO(out, rc);
2012         }
2013
2014         /* The rest of this function executes only for OST_READs */
2015
2016         if (req->rq_bulk == NULL) {
2017                 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
2018                                           RCL_SERVER);
2019                 LASSERT(rc == req->rq_status);
2020         } else {
2021                 /* if unwrap_bulk failed, return -EAGAIN to retry */
2022                 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
2023         }
2024         if (rc < 0)
2025                 GOTO(out, rc = -EAGAIN);
2026
2027         if (rc > aa->aa_requested_nob) {
2028                 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
2029                        rc, aa->aa_requested_nob);
2030                 RETURN(-EPROTO);
2031         }
2032
2033         if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2034                 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2035                        rc, req->rq_bulk->bd_nob_transferred);
2036                 RETURN(-EPROTO);
2037         }
2038
2039         if (req->rq_bulk == NULL) {
2040                 /* short io */
2041                 int nob, pg_count, i = 0;
2042                 unsigned char *buf;
2043
2044                 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2045                 pg_count = aa->aa_page_count;
2046                 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2047                                                    rc);
2048                 nob = rc;
2049                 while (nob > 0 && pg_count > 0) {
2050                         unsigned char *ptr;
2051                         int count = aa->aa_ppga[i]->count > nob ?
2052                                     nob : aa->aa_ppga[i]->count;
2053
2054                         CDEBUG(D_CACHE, "page %p count %d\n",
2055                                aa->aa_ppga[i]->pg, count);
2056                         ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2057                         memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2058                                count);
2059                         kunmap_atomic((void *) ptr);
2060
2061                         buf += count;
2062                         nob -= count;
2063                         i++;
2064                         pg_count--;
2065                 }
2066         }
2067
2068         if (rc < aa->aa_requested_nob)
2069                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2070
2071         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2072                 static int cksum_counter;
2073                 u32 server_cksum = body->oa.o_cksum;
2074                 int nob = rc;
2075                 char *via = "";
2076                 char *router = "";
2077                 enum cksum_types cksum_type;
2078                 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2079                         body->oa.o_flags : 0;
2080
2081                 cksum_type = obd_cksum_type_unpack(o_flags);
2082                 rc = osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2083                                           aa->aa_page_count, aa->aa_ppga,
2084                                           OST_READ, &client_cksum, false);
2085                 if (rc < 0)
2086                         GOTO(out, rc);
2087
2088                 if (req->rq_bulk != NULL &&
2089                     peer->nid != req->rq_bulk->bd_sender) {
2090                         via = " via ";
2091                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
2092                 }
2093
2094                 if (server_cksum != client_cksum) {
2095                         struct ost_body *clbody;
2096                         __u32 client_cksum2;
2097                         u32 page_count = aa->aa_page_count;
2098
2099                         osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2100                                              page_count, aa->aa_ppga,
2101                                              OST_READ, &client_cksum2, true);
2102                         clbody = req_capsule_client_get(&req->rq_pill,
2103                                                         &RMF_OST_BODY);
2104                         if (cli->cl_checksum_dump)
2105                                 dump_all_bulk_pages(&clbody->oa, page_count,
2106                                                     aa->aa_ppga, server_cksum,
2107                                                     client_cksum);
2108
2109                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2110                                            "%s%s%s inode "DFID" object "DOSTID
2111                                            " extent [%llu-%llu], client %x/%x, "
2112                                            "server %x, cksum_type %x\n",
2113                                            obd_name,
2114                                            libcfs_nid2str(peer->nid),
2115                                            via, router,
2116                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2117                                                 clbody->oa.o_parent_seq : 0ULL,
2118                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2119                                                 clbody->oa.o_parent_oid : 0,
2120                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2121                                                 clbody->oa.o_parent_ver : 0,
2122                                            POSTID(&body->oa.o_oi),
2123                                            aa->aa_ppga[0]->off,
2124                                            aa->aa_ppga[page_count-1]->off +
2125                                            aa->aa_ppga[page_count-1]->count - 1,
2126                                            client_cksum, client_cksum2,
2127                                            server_cksum, cksum_type);
2128                         cksum_counter = 0;
2129                         aa->aa_oa->o_cksum = client_cksum;
2130                         rc = -EAGAIN;
2131                 } else {
2132                         cksum_counter++;
2133                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2134                         rc = 0;
2135                 }
2136         } else if (unlikely(client_cksum)) {
2137                 static int cksum_missed;
2138
2139                 cksum_missed++;
2140                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2141                         CERROR("%s: checksum %u requested from %s but not sent\n",
2142                                obd_name, cksum_missed,
2143                                libcfs_nid2str(peer->nid));
2144         } else {
2145                 rc = 0;
2146         }
2147
2148         inode = page2inode(aa->aa_ppga[0]->pg);
2149         if (inode == NULL) {
2150                 /* Try to get reference to inode from cl_page if we are
2151                  * dealing with direct IO, as handled pages are not
2152                  * actual page cache pages.
2153                  */
2154                 struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
2155
2156                 inode = oap2cl_page(oap)->cp_inode;
2157                 if (inode) {
2158                         blockbits = inode->i_blkbits;
2159                         blocksize = 1 << blockbits;
2160                 }
2161         }
2162         if (inode && IS_ENCRYPTED(inode)) {
2163                 int idx;
2164
2165                 if (!llcrypt_has_encryption_key(inode)) {
2166                         CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2167                         GOTO(out, rc);
2168                 }
2169                 for (idx = 0; idx < aa->aa_page_count; idx++) {
2170                         struct brw_page *pg = aa->aa_ppga[idx];
2171                         unsigned int offs = 0;
2172
2173                         while (offs < PAGE_SIZE) {
2174                                 /* do not decrypt if page is all 0s */
2175                                 if (memchr_inv(page_address(pg->pg) + offs, 0,
2176                                          LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2177                                         /* if page is empty forward info to
2178                                          * upper layers (ll_io_zero_page) by
2179                                          * clearing PagePrivate2
2180                                          */
2181                                         if (!offs)
2182                                                 ClearPagePrivate2(pg->pg);
2183                                         break;
2184                                 }
2185
2186                                 if (blockbits) {
2187                                         /* This is direct IO case. Directly call
2188                                          * decrypt function that takes inode as
2189                                          * input parameter. Page does not need
2190                                          * to be locked.
2191                                          */
2192                                         u64 lblk_num =
2193                                                 ((u64)(pg->off >> PAGE_SHIFT) <<
2194                                                      (PAGE_SHIFT - blockbits)) +
2195                                                        (offs >> blockbits);
2196                                         unsigned int i;
2197
2198                                         for (i = offs;
2199                                              i < offs +
2200                                                     LUSTRE_ENCRYPTION_UNIT_SIZE;
2201                                              i += blocksize, lblk_num++) {
2202                                                 rc =
2203                                                   llcrypt_decrypt_block_inplace(
2204                                                           inode, pg->pg,
2205                                                           blocksize, i,
2206                                                           lblk_num);
2207                                                 if (rc)
2208                                                         break;
2209                                         }
2210                                 } else {
2211                                         rc = llcrypt_decrypt_pagecache_blocks(
2212                                                 pg->pg,
2213                                                 LUSTRE_ENCRYPTION_UNIT_SIZE,
2214                                                 offs);
2215                                 }
2216                                 if (rc)
2217                                         GOTO(out, rc);
2218
2219                                 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2220                         }
2221                 }
2222         }
2223
2224 out:
2225         if (rc >= 0)
2226                 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2227                                      aa->aa_oa, &body->oa);
2228
2229         RETURN(rc);
2230 }
2231
2232 static int osc_brw_redo_request(struct ptlrpc_request *request,
2233                                 struct osc_brw_async_args *aa, int rc)
2234 {
2235         struct ptlrpc_request *new_req;
2236         struct osc_brw_async_args *new_aa;
2237         struct osc_async_page *oap;
2238         ENTRY;
2239
2240         /* The below message is checked in replay-ost-single.sh test_8ae*/
2241         DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2242                   "redo for recoverable error %d", rc);
2243
2244         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2245                                 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2246                                   aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2247                                   aa->aa_ppga, &new_req, 1);
2248         if (rc)
2249                 RETURN(rc);
2250
2251         list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2252                 if (oap->oap_request != NULL) {
2253                         LASSERTF(request == oap->oap_request,
2254                                  "request %p != oap_request %p\n",
2255                                  request, oap->oap_request);
2256                 }
2257         }
2258         /*
2259          * New request takes over pga and oaps from old request.
2260          * Note that copying a list_head doesn't work, need to move it...
2261          */
2262         aa->aa_resends++;
2263         new_req->rq_interpret_reply = request->rq_interpret_reply;
2264         new_req->rq_async_args = request->rq_async_args;
2265         new_req->rq_commit_cb = request->rq_commit_cb;
2266         /* cap resend delay to the current request timeout, this is similar to
2267          * what ptlrpc does (see after_reply()) */
2268         if (aa->aa_resends > new_req->rq_timeout)
2269                 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2270         else
2271                 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2272         new_req->rq_generation_set = 1;
2273         new_req->rq_import_generation = request->rq_import_generation;
2274
2275         new_aa = ptlrpc_req_async_args(new_aa, new_req);
2276
2277         INIT_LIST_HEAD(&new_aa->aa_oaps);
2278         list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2279         INIT_LIST_HEAD(&new_aa->aa_exts);
2280         list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2281         new_aa->aa_resends = aa->aa_resends;
2282
2283         list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2284                 if (oap->oap_request) {
2285                         ptlrpc_req_finished(oap->oap_request);
2286                         oap->oap_request = ptlrpc_request_addref(new_req);
2287                 }
2288         }
2289
2290         /* XXX: This code will run into problem if we're going to support
2291          * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2292          * and wait for all of them to be finished. We should inherit request
2293          * set from old request. */
2294         ptlrpcd_add_req(new_req);
2295
2296         DEBUG_REQ(D_INFO, new_req, "new request");
2297         RETURN(0);
2298 }
2299
2300 /*
2301  * ugh, we want disk allocation on the target to happen in offset order.  we'll
2302  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2303  * fine for our small page arrays and doesn't require allocation.  its an
2304  * insertion sort that swaps elements that are strides apart, shrinking the
2305  * stride down until its '1' and the array is sorted.
2306  */
2307 static void sort_brw_pages(struct brw_page **array, int num)
2308 {
2309         int stride, i, j;
2310         struct brw_page *tmp;
2311
2312         if (num == 1)
2313                 return;
2314         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2315                 ;
2316
2317         do {
2318                 stride /= 3;
2319                 for (i = stride ; i < num ; i++) {
2320                         tmp = array[i];
2321                         j = i;
2322                         while (j >= stride && array[j - stride]->off > tmp->off) {
2323                                 array[j] = array[j - stride];
2324                                 j -= stride;
2325                         }
2326                         array[j] = tmp;
2327                 }
2328         } while (stride > 1);
2329 }
2330
2331 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2332 {
2333         LASSERT(ppga != NULL);
2334         OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2335 }
2336
2337 static int brw_interpret(const struct lu_env *env,
2338                          struct ptlrpc_request *req, void *args, int rc)
2339 {
2340         struct osc_brw_async_args *aa = args;
2341         struct osc_extent *ext;
2342         struct osc_extent *tmp;
2343         struct client_obd *cli = aa->aa_cli;
2344         unsigned long transferred = 0;
2345
2346         ENTRY;
2347
2348         rc = osc_brw_fini_request(req, rc);
2349         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2350
2351         /* restore clear text pages */
2352         osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2353
2354         /*
2355          * When server returns -EINPROGRESS, client should always retry
2356          * regardless of the number of times the bulk was resent already.
2357          */
2358         if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2359                 if (req->rq_import_generation !=
2360                     req->rq_import->imp_generation) {
2361                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2362                                ""DOSTID", rc = %d.\n",
2363                                req->rq_import->imp_obd->obd_name,
2364                                POSTID(&aa->aa_oa->o_oi), rc);
2365                 } else if (rc == -EINPROGRESS ||
2366                            client_should_resend(aa->aa_resends, aa->aa_cli)) {
2367                         rc = osc_brw_redo_request(req, aa, rc);
2368                 } else {
2369                         CERROR("%s: too many resent retries for object: "
2370                                "%llu:%llu, rc = %d.\n",
2371                                req->rq_import->imp_obd->obd_name,
2372                                POSTID(&aa->aa_oa->o_oi), rc);
2373                 }
2374
2375                 if (rc == 0)
2376                         RETURN(0);
2377                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2378                         rc = -EIO;
2379         }
2380
2381         if (rc == 0) {
2382                 struct obdo *oa = aa->aa_oa;
2383                 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2384                 unsigned long valid = 0;
2385                 struct cl_object *obj;
2386                 struct osc_async_page *last;
2387
2388                 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2389                 obj = osc2cl(last->oap_obj);
2390
2391                 cl_object_attr_lock(obj);
2392                 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2393                         attr->cat_blocks = oa->o_blocks;
2394                         valid |= CAT_BLOCKS;
2395                 }
2396                 if (oa->o_valid & OBD_MD_FLMTIME) {
2397                         attr->cat_mtime = oa->o_mtime;
2398                         valid |= CAT_MTIME;
2399                 }
2400                 if (oa->o_valid & OBD_MD_FLATIME) {
2401                         attr->cat_atime = oa->o_atime;
2402                         valid |= CAT_ATIME;
2403                 }
2404                 if (oa->o_valid & OBD_MD_FLCTIME) {
2405                         attr->cat_ctime = oa->o_ctime;
2406                         valid |= CAT_CTIME;
2407                 }
2408
2409                 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2410                         struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2411                         loff_t last_off = last->oap_count + last->oap_obj_off +
2412                                 last->oap_page_off;
2413
2414                         /* Change file size if this is an out of quota or
2415                          * direct IO write and it extends the file size */
2416                         if (loi->loi_lvb.lvb_size < last_off) {
2417                                 attr->cat_size = last_off;
2418                                 valid |= CAT_SIZE;
2419                         }
2420                         /* Extend KMS if it's not a lockless write */
2421                         if (loi->loi_kms < last_off &&
2422                             oap2osc_page(last)->ops_srvlock == 0) {
2423                                 attr->cat_kms = last_off;
2424                                 valid |= CAT_KMS;
2425                         }
2426                 }
2427
2428                 if (valid != 0)
2429                         cl_object_attr_update(env, obj, attr, valid);
2430                 cl_object_attr_unlock(obj);
2431         }
2432         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2433         aa->aa_oa = NULL;
2434
2435         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2436                 osc_inc_unstable_pages(req);
2437
2438         list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2439                 list_del_init(&ext->oe_link);
2440                 osc_extent_finish(env, ext, 1,
2441                                   rc && req->rq_no_delay ? -EAGAIN : rc);
2442         }
2443         LASSERT(list_empty(&aa->aa_exts));
2444         LASSERT(list_empty(&aa->aa_oaps));
2445
2446         transferred = (req->rq_bulk == NULL ? /* short io */
2447                        aa->aa_requested_nob :
2448                        req->rq_bulk->bd_nob_transferred);
2449
2450         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2451         ptlrpc_lprocfs_brw(req, transferred);
2452
2453         spin_lock(&cli->cl_loi_list_lock);
2454         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2455          * is called so we know whether to go to sync BRWs or wait for more
2456          * RPCs to complete */
2457         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2458                 cli->cl_w_in_flight--;
2459         else
2460                 cli->cl_r_in_flight--;
2461         osc_wake_cache_waiters(cli);
2462         spin_unlock(&cli->cl_loi_list_lock);
2463
2464         osc_io_unplug(env, cli, NULL);
2465         RETURN(rc);
2466 }
2467
2468 static void brw_commit(struct ptlrpc_request *req)
2469 {
2470         /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2471          * this called via the rq_commit_cb, I need to ensure
2472          * osc_dec_unstable_pages is still called. Otherwise unstable
2473          * pages may be leaked. */
2474         spin_lock(&req->rq_lock);
2475         if (likely(req->rq_unstable)) {
2476                 req->rq_unstable = 0;
2477                 spin_unlock(&req->rq_lock);
2478
2479                 osc_dec_unstable_pages(req);
2480         } else {
2481                 req->rq_committed = 1;
2482                 spin_unlock(&req->rq_lock);
2483         }
2484 }
2485
2486 /**
2487  * Build an RPC by the list of extent @ext_list. The caller must ensure
2488  * that the total pages in this list are NOT over max pages per RPC.
2489  * Extents in the list must be in OES_RPC state.
2490  */
2491 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2492                   struct list_head *ext_list, int cmd)
2493 {
2494         struct ptlrpc_request           *req = NULL;
2495         struct osc_extent               *ext;
2496         struct brw_page                 **pga = NULL;
2497         struct osc_brw_async_args       *aa = NULL;
2498         struct obdo                     *oa = NULL;
2499         struct osc_async_page           *oap;
2500         struct osc_object               *obj = NULL;
2501         struct cl_req_attr              *crattr = NULL;
2502         loff_t                          starting_offset = OBD_OBJECT_EOF;
2503         loff_t                          ending_offset = 0;
2504         /* '1' for consistency with code that checks !mpflag to restore */
2505         int mpflag = 1;
2506         int                             mem_tight = 0;
2507         int                             page_count = 0;
2508         bool                            soft_sync = false;
2509         bool                            ndelay = false;
2510         int                             i;
2511         int                             grant = 0;
2512         int                             rc;
2513         __u32                           layout_version = 0;
2514         LIST_HEAD(rpc_list);
2515         struct ost_body                 *body;
2516         ENTRY;
2517         LASSERT(!list_empty(ext_list));
2518
2519         /* add pages into rpc_list to build BRW rpc */
2520         list_for_each_entry(ext, ext_list, oe_link) {
2521                 LASSERT(ext->oe_state == OES_RPC);
2522                 mem_tight |= ext->oe_memalloc;
2523                 grant += ext->oe_grants;
2524                 page_count += ext->oe_nr_pages;
2525                 layout_version = max(layout_version, ext->oe_layout_version);
2526                 if (obj == NULL)
2527                         obj = ext->oe_obj;
2528         }
2529
2530         soft_sync = osc_over_unstable_soft_limit(cli);
2531         if (mem_tight)
2532                 mpflag = memalloc_noreclaim_save();
2533
2534         OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2535         if (pga == NULL)
2536                 GOTO(out, rc = -ENOMEM);
2537
2538         OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2539         if (oa == NULL)
2540                 GOTO(out, rc = -ENOMEM);
2541
2542         i = 0;
2543         list_for_each_entry(ext, ext_list, oe_link) {
2544                 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2545                         if (mem_tight)
2546                                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2547                         if (soft_sync)
2548                                 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2549                         pga[i] = &oap->oap_brw_page;
2550                         pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2551                         i++;
2552
2553                         list_add_tail(&oap->oap_rpc_item, &rpc_list);
2554                         if (starting_offset == OBD_OBJECT_EOF ||
2555                             starting_offset > oap->oap_obj_off)
2556                                 starting_offset = oap->oap_obj_off;
2557                         else
2558                                 LASSERT(oap->oap_page_off == 0);
2559                         if (ending_offset < oap->oap_obj_off + oap->oap_count)
2560                                 ending_offset = oap->oap_obj_off +
2561                                                 oap->oap_count;
2562                         else
2563                                 LASSERT(oap->oap_page_off + oap->oap_count ==
2564                                         PAGE_SIZE);
2565                 }
2566                 if (ext->oe_ndelay)
2567                         ndelay = true;
2568         }
2569
2570         /* first page in the list */
2571         oap = list_first_entry(&rpc_list, typeof(*oap), oap_rpc_item);
2572
2573         crattr = &osc_env_info(env)->oti_req_attr;
2574         memset(crattr, 0, sizeof(*crattr));
2575         crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2576         crattr->cra_flags = ~0ULL;
2577         crattr->cra_page = oap2cl_page(oap);
2578         crattr->cra_oa = oa;
2579         cl_req_attr_set(env, osc2cl(obj), crattr);
2580
2581         if (cmd == OBD_BRW_WRITE) {
2582                 oa->o_grant_used = grant;
2583                 if (layout_version > 0) {
2584                         CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2585                                PFID(&oa->o_oi.oi_fid), layout_version);
2586
2587                         oa->o_layout_version = layout_version;
2588                         oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2589                 }
2590         }
2591
2592         sort_brw_pages(pga, page_count);
2593         rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2594         if (rc != 0) {
2595                 CERROR("prep_req failed: %d\n", rc);
2596                 GOTO(out, rc);
2597         }
2598
2599         req->rq_commit_cb = brw_commit;
2600         req->rq_interpret_reply = brw_interpret;
2601         req->rq_memalloc = mem_tight != 0;
2602         oap->oap_request = ptlrpc_request_addref(req);
2603         if (ndelay) {
2604                 req->rq_no_resend = req->rq_no_delay = 1;
2605                 /* probably set a shorter timeout value.
2606                  * to handle ETIMEDOUT in brw_interpret() correctly. */
2607                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2608         }
2609
2610         /* Need to update the timestamps after the request is built in case
2611          * we race with setattr (locally or in queue at OST).  If OST gets
2612          * later setattr before earlier BRW (as determined by the request xid),
2613          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2614          * way to do this in a single call.  bug 10150 */
2615         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2616         crattr->cra_oa = &body->oa;
2617         crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2618         cl_req_attr_set(env, osc2cl(obj), crattr);
2619         lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2620
2621         aa = ptlrpc_req_async_args(aa, req);
2622         INIT_LIST_HEAD(&aa->aa_oaps);
2623         list_splice_init(&rpc_list, &aa->aa_oaps);
2624         INIT_LIST_HEAD(&aa->aa_exts);
2625         list_splice_init(ext_list, &aa->aa_exts);
2626
2627         spin_lock(&cli->cl_loi_list_lock);
2628         starting_offset >>= PAGE_SHIFT;
2629         if (cmd == OBD_BRW_READ) {
2630                 cli->cl_r_in_flight++;
2631                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2632                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2633                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2634                                       starting_offset + 1);
2635         } else {
2636                 cli->cl_w_in_flight++;
2637                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2638                 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2639                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2640                                       starting_offset + 1);
2641         }
2642         spin_unlock(&cli->cl_loi_list_lock);
2643
2644         DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2645                   page_count, aa, cli->cl_r_in_flight,
2646                   cli->cl_w_in_flight);
2647         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2648
2649         ptlrpcd_add_req(req);
2650         rc = 0;
2651         EXIT;
2652
2653 out:
2654         if (mem_tight)
2655                 memalloc_noreclaim_restore(mpflag);
2656
2657         if (rc != 0) {
2658                 LASSERT(req == NULL);
2659
2660                 if (oa)
2661                         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2662                 if (pga) {
2663                         osc_release_bounce_pages(pga, page_count);
2664                         osc_release_ppga(pga, page_count);
2665                 }
2666                 /* this should happen rarely and is pretty bad, it makes the
2667                  * pending list not follow the dirty order
2668                  */
2669                 while ((ext = list_first_entry_or_null(ext_list,
2670                                                        struct osc_extent,
2671                                                        oe_link)) != NULL) {
2672                         list_del_init(&ext->oe_link);
2673                         osc_extent_finish(env, ext, 0, rc);
2674                 }
2675         }
2676         RETURN(rc);
2677 }
2678
2679 /* This is to refresh our lock in face of no RPCs. */
2680 void osc_send_empty_rpc(struct osc_object *osc, pgoff_t start)
2681 {
2682         struct ptlrpc_request *req;
2683         struct obdo oa;
2684         struct brw_page bpg = { .off = start, .count = 1};
2685         struct brw_page *pga = &bpg;
2686         int rc;
2687
2688         memset(&oa, 0, sizeof(oa));
2689         oa.o_oi = osc->oo_oinfo->loi_oi;
2690         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
2691         /* For updated servers - don't do a read */
2692         oa.o_flags = OBD_FL_NORPC;
2693
2694         rc = osc_brw_prep_request(OBD_BRW_READ, osc_cli(osc), &oa, 1, &pga,
2695                                   &req, 0);
2696
2697         /* If we succeeded we ship it off, if not there's no point in doing
2698          * anything. Also no resends.
2699          * No interpret callback, no commit callback.
2700          */
2701         if (!rc) {
2702                 req->rq_no_resend = 1;
2703                 ptlrpcd_add_req(req);
2704         }
2705 }
2706
2707 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2708 {
2709         int set = 0;
2710
2711         LASSERT(lock != NULL);
2712
2713         lock_res_and_lock(lock);
2714
2715         if (lock->l_ast_data == NULL)
2716                 lock->l_ast_data = data;
2717         if (lock->l_ast_data == data)
2718                 set = 1;
2719
2720         unlock_res_and_lock(lock);
2721
2722         return set;
2723 }
2724
2725 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2726                      void *cookie, struct lustre_handle *lockh,
2727                      enum ldlm_mode mode, __u64 *flags, bool speculative,
2728                      int errcode)
2729 {
2730         bool intent = *flags & LDLM_FL_HAS_INTENT;
2731         int rc;
2732         ENTRY;
2733
2734         /* The request was created before ldlm_cli_enqueue call. */
2735         if (intent && errcode == ELDLM_LOCK_ABORTED) {
2736                 struct ldlm_reply *rep;
2737
2738                 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2739                 LASSERT(rep != NULL);
2740
2741                 rep->lock_policy_res1 =
2742                         ptlrpc_status_ntoh(rep->lock_policy_res1);
2743                 if (rep->lock_policy_res1)
2744                         errcode = rep->lock_policy_res1;
2745                 if (!speculative)
2746                         *flags |= LDLM_FL_LVB_READY;
2747         } else if (errcode == ELDLM_OK) {
2748                 *flags |= LDLM_FL_LVB_READY;
2749         }
2750
2751         /* Call the update callback. */
2752         rc = (*upcall)(cookie, lockh, errcode);
2753
2754         /* release the reference taken in ldlm_cli_enqueue() */
2755         if (errcode == ELDLM_LOCK_MATCHED)
2756                 errcode = ELDLM_OK;
2757         if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2758                 ldlm_lock_decref(lockh, mode);
2759
2760         RETURN(rc);
2761 }
2762
2763 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2764                           void *args, int rc)
2765 {
2766         struct osc_enqueue_args *aa = args;
2767         struct ldlm_lock *lock;
2768         struct lustre_handle *lockh = &aa->oa_lockh;
2769         enum ldlm_mode mode = aa->oa_mode;
2770         struct ost_lvb *lvb = aa->oa_lvb;
2771         __u32 lvb_len = sizeof(*lvb);
2772         __u64 flags = 0;
2773         struct ldlm_enqueue_info einfo = {
2774                 .ei_type = aa->oa_type,
2775                 .ei_mode = mode,
2776         };
2777
2778         ENTRY;
2779
2780         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2781          * be valid. */
2782         lock = ldlm_handle2lock(lockh);
2783         LASSERTF(lock != NULL,
2784                  "lockh %#llx, req %p, aa %p - client evicted?\n",
2785                  lockh->cookie, req, aa);
2786
2787         /* Take an additional reference so that a blocking AST that
2788          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2789          * to arrive after an upcall has been executed by
2790          * osc_enqueue_fini(). */
2791         ldlm_lock_addref(lockh, mode);
2792
2793         /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2794         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2795
2796         /* Let CP AST to grant the lock first. */
2797         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2798
2799         if (aa->oa_speculative) {
2800                 LASSERT(aa->oa_lvb == NULL);
2801                 LASSERT(aa->oa_flags == NULL);
2802                 aa->oa_flags = &flags;
2803         }
2804
2805         /* Complete obtaining the lock procedure. */
2806         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2807                                    lvb, lvb_len, lockh, rc, false);
2808         /* Complete osc stuff. */
2809         rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2810                               aa->oa_flags, aa->oa_speculative, rc);
2811
2812         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2813
2814         ldlm_lock_decref(lockh, mode);
2815         LDLM_LOCK_PUT(lock);
2816         RETURN(rc);
2817 }
2818
2819 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2820  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2821  * other synchronous requests, however keeping some locks and trying to obtain
2822  * others may take a considerable amount of time in a case of ost failure; and
2823  * when other sync requests do not get released lock from a client, the client
2824  * is evicted from the cluster -- such scenarious make the life difficult, so
2825  * release locks just after they are obtained. */
2826 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2827                      __u64 *flags, union ldlm_policy_data *policy,
2828                      struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2829                      void *cookie, struct ldlm_enqueue_info *einfo,
2830                      struct ptlrpc_request_set *rqset, int async,
2831                      bool speculative)
2832 {
2833         struct obd_device *obd = exp->exp_obd;
2834         struct lustre_handle lockh = { 0 };
2835         struct ptlrpc_request *req = NULL;
2836         int intent = *flags & LDLM_FL_HAS_INTENT;
2837         __u64 match_flags = *flags;
2838         enum ldlm_mode mode;
2839         int rc;
2840         ENTRY;
2841
2842         /* Filesystem lock extents are extended to page boundaries so that
2843          * dealing with the page cache is a little smoother.  */
2844         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2845         policy->l_extent.end |= ~PAGE_MASK;
2846
2847         /* Next, search for already existing extent locks that will cover us */
2848         /* If we're trying to read, we also search for an existing PW lock.  The
2849          * VFS and page cache already protect us locally, so lots of readers/
2850          * writers can share a single PW lock.
2851          *
2852          * There are problems with conversion deadlocks, so instead of
2853          * converting a read lock to a write lock, we'll just enqueue a new
2854          * one.
2855          *
2856          * At some point we should cancel the read lock instead of making them
2857          * send us a blocking callback, but there are problems with canceling
2858          * locks out from other users right now, too. */
2859         mode = einfo->ei_mode;
2860         if (einfo->ei_mode == LCK_PR)
2861                 mode |= LCK_PW;
2862         /* Normal lock requests must wait for the LVB to be ready before
2863          * matching a lock; speculative lock requests do not need to,
2864          * because they will not actually use the lock. */
2865         if (!speculative)
2866                 match_flags |= LDLM_FL_LVB_READY;
2867         if (intent != 0)
2868                 match_flags |= LDLM_FL_BLOCK_GRANTED;
2869         mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2870                                einfo->ei_type, policy, mode, &lockh);
2871         if (mode) {
2872                 struct ldlm_lock *matched;
2873
2874                 if (*flags & LDLM_FL_TEST_LOCK)
2875                         RETURN(ELDLM_OK);
2876
2877                 matched = ldlm_handle2lock(&lockh);
2878                 if (speculative) {
2879                         /* This DLM lock request is speculative, and does not
2880                          * have an associated IO request. Therefore if there
2881                          * is already a DLM lock, it wll just inform the
2882                          * caller to cancel the request for this stripe.*/
2883                         lock_res_and_lock(matched);
2884                         if (ldlm_extent_equal(&policy->l_extent,
2885                             &matched->l_policy_data.l_extent))
2886                                 rc = -EEXIST;
2887                         else
2888                                 rc = -ECANCELED;
2889                         unlock_res_and_lock(matched);
2890
2891                         ldlm_lock_decref(&lockh, mode);
2892                         LDLM_LOCK_PUT(matched);
2893                         RETURN(rc);
2894                 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2895                         *flags |= LDLM_FL_LVB_READY;
2896
2897                         /* We already have a lock, and it's referenced. */
2898                         (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2899
2900                         ldlm_lock_decref(&lockh, mode);
2901                         LDLM_LOCK_PUT(matched);
2902                         RETURN(ELDLM_OK);
2903                 } else {
2904                         ldlm_lock_decref(&lockh, mode);
2905                         LDLM_LOCK_PUT(matched);
2906                 }
2907         }
2908
2909         if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2910                 RETURN(-ENOLCK);
2911
2912         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2913         *flags &= ~LDLM_FL_BLOCK_GRANTED;
2914
2915         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2916                               sizeof(*lvb), LVB_T_OST, &lockh, async);
2917         if (async) {
2918                 if (!rc) {
2919                         struct osc_enqueue_args *aa;
2920                         aa = ptlrpc_req_async_args(aa, req);
2921                         aa->oa_exp         = exp;
2922                         aa->oa_mode        = einfo->ei_mode;
2923                         aa->oa_type        = einfo->ei_type;
2924                         lustre_handle_copy(&aa->oa_lockh, &lockh);
2925                         aa->oa_upcall      = upcall;
2926                         aa->oa_cookie      = cookie;
2927                         aa->oa_speculative = speculative;
2928                         if (!speculative) {
2929                                 aa->oa_flags  = flags;
2930                                 aa->oa_lvb    = lvb;
2931                         } else {
2932                                 /* speculative locks are essentially to enqueue
2933                                  * a DLM lock  in advance, so we don't care
2934                                  * about the result of the enqueue. */
2935                                 aa->oa_lvb    = NULL;
2936                                 aa->oa_flags  = NULL;
2937                         }
2938
2939                         req->rq_interpret_reply = osc_enqueue_interpret;
2940                         ptlrpc_set_add_req(rqset, req);
2941                 }
2942                 RETURN(rc);
2943         }
2944
2945         rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2946                               flags, speculative, rc);
2947
2948         RETURN(rc);
2949 }
2950
2951 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2952                    struct ldlm_res_id *res_id, enum ldlm_type type,
2953                    union ldlm_policy_data *policy, enum ldlm_mode mode,
2954                    __u64 *flags, struct osc_object *obj,
2955                    struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2956 {
2957         struct obd_device *obd = exp->exp_obd;
2958         __u64 lflags = *flags;
2959         enum ldlm_mode rc;
2960         ENTRY;
2961
2962         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2963                 RETURN(-EIO);
2964
2965         /* Filesystem lock extents are extended to page boundaries so that
2966          * dealing with the page cache is a little smoother */
2967         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2968         policy->l_extent.end |= ~PAGE_MASK;
2969
2970         /* Next, search for already existing extent locks that will cover us */
2971         rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
2972                                         res_id, type, policy, mode, lockh,
2973                                         match_flags);
2974         if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2975                 RETURN(rc);
2976
2977         if (obj != NULL) {
2978                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2979
2980                 LASSERT(lock != NULL);
2981                 if (osc_set_lock_data(lock, obj)) {
2982                         lock_res_and_lock(lock);
2983                         if (!ldlm_is_lvb_cached(lock)) {
2984                                 LASSERT(lock->l_ast_data == obj);
2985                                 osc_lock_lvb_update(env, obj, lock, NULL);
2986                                 ldlm_set_lvb_cached(lock);
2987                         }
2988                         unlock_res_and_lock(lock);
2989                 } else {
2990                         ldlm_lock_decref(lockh, rc);
2991                         rc = 0;
2992                 }
2993                 LDLM_LOCK_PUT(lock);
2994         }
2995         RETURN(rc);
2996 }
2997
2998 static int osc_statfs_interpret(const struct lu_env *env,
2999                                 struct ptlrpc_request *req, void *args, int rc)
3000 {
3001         struct osc_async_args *aa = args;
3002         struct obd_statfs *msfs;
3003
3004         ENTRY;
3005         if (rc == -EBADR)
3006                 /*
3007                  * The request has in fact never been sent due to issues at
3008                  * a higher level (LOV).  Exit immediately since the caller
3009                  * is aware of the problem and takes care of the clean up.
3010                  */
3011                 RETURN(rc);
3012
3013         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3014             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3015                 GOTO(out, rc = 0);
3016
3017         if (rc != 0)
3018                 GOTO(out, rc);
3019
3020         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3021         if (msfs == NULL)
3022                 GOTO(out, rc = -EPROTO);
3023
3024         *aa->aa_oi->oi_osfs = *msfs;
3025 out:
3026         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3027
3028         RETURN(rc);
3029 }
3030
3031 static int osc_statfs_async(struct obd_export *exp,
3032                             struct obd_info *oinfo, time64_t max_age,
3033                             struct ptlrpc_request_set *rqset)
3034 {
3035         struct obd_device     *obd = class_exp2obd(exp);
3036         struct ptlrpc_request *req;
3037         struct osc_async_args *aa;
3038         int rc;
3039         ENTRY;
3040
3041         if (obd->obd_osfs_age >= max_age) {
3042                 CDEBUG(D_SUPER,
3043                        "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
3044                        obd->obd_name, &obd->obd_osfs,
3045                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
3046                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
3047                 spin_lock(&obd->obd_osfs_lock);
3048                 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
3049                 spin_unlock(&obd->obd_osfs_lock);
3050                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
3051                 if (oinfo->oi_cb_up)
3052                         oinfo->oi_cb_up(oinfo, 0);
3053
3054                 RETURN(0);
3055         }
3056
3057         /* We could possibly pass max_age in the request (as an absolute
3058          * timestamp or a "seconds.usec ago") so the target can avoid doing
3059          * extra calls into the filesystem if that isn't necessary (e.g.
3060          * during mount that would help a bit).  Having relative timestamps
3061          * is not so great if request processing is slow, while absolute
3062          * timestamps are not ideal because they need time synchronization. */
3063         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3064         if (req == NULL)
3065                 RETURN(-ENOMEM);
3066
3067         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3068         if (rc) {
3069                 ptlrpc_request_free(req);
3070                 RETURN(rc);
3071         }
3072         ptlrpc_request_set_replen(req);
3073         req->rq_request_portal = OST_CREATE_PORTAL;
3074         ptlrpc_at_set_req_timeout(req);
3075
3076         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3077                 /* procfs requests not want stat in wait for avoid deadlock */
3078                 req->rq_no_resend = 1;
3079                 req->rq_no_delay = 1;
3080         }
3081
3082         req->rq_interpret_reply = osc_statfs_interpret;
3083         aa = ptlrpc_req_async_args(aa, req);
3084         aa->aa_oi = oinfo;
3085
3086         ptlrpc_set_add_req(rqset, req);
3087         RETURN(0);
3088 }
3089
3090 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3091                       struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3092 {
3093         struct obd_device     *obd = class_exp2obd(exp);
3094         struct obd_statfs     *msfs;
3095         struct ptlrpc_request *req;
3096         struct obd_import     *imp, *imp0;
3097         int rc;
3098         ENTRY;
3099
3100         /*Since the request might also come from lprocfs, so we need
3101          *sync this with client_disconnect_export Bug15684
3102          */
3103         with_imp_locked(obd, imp0, rc)
3104                 imp = class_import_get(imp0);
3105         if (rc)
3106                 RETURN(rc);
3107
3108         /* We could possibly pass max_age in the request (as an absolute
3109          * timestamp or a "seconds.usec ago") so the target can avoid doing
3110          * extra calls into the filesystem if that isn't necessary (e.g.
3111          * during mount that would help a bit).  Having relative timestamps
3112          * is not so great if request processing is slow, while absolute
3113          * timestamps are not ideal because they need time synchronization. */
3114         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3115
3116         class_import_put(imp);
3117
3118         if (req == NULL)
3119                 RETURN(-ENOMEM);
3120
3121         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3122         if (rc) {
3123                 ptlrpc_request_free(req);
3124                 RETURN(rc);
3125         }
3126         ptlrpc_request_set_replen(req);
3127         req->rq_request_portal = OST_CREATE_PORTAL;
3128         ptlrpc_at_set_req_timeout(req);
3129
3130         if (flags & OBD_STATFS_NODELAY) {
3131                 /* procfs requests not want stat in wait for avoid deadlock */
3132                 req->rq_no_resend = 1;
3133                 req->rq_no_delay = 1;
3134         }
3135
3136         rc = ptlrpc_queue_wait(req);
3137         if (rc)
3138                 GOTO(out, rc);
3139
3140         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3141         if (msfs == NULL)
3142                 GOTO(out, rc = -EPROTO);
3143
3144         *osfs = *msfs;
3145
3146         EXIT;
3147 out:
3148         ptlrpc_req_finished(req);
3149         return rc;
3150 }
3151
3152 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3153                          void *karg, void __user *uarg)
3154 {
3155         struct obd_device *obd = exp->exp_obd;
3156         struct obd_ioctl_data *data = karg;
3157         int rc = 0;
3158
3159         ENTRY;
3160         if (!try_module_get(THIS_MODULE)) {
3161                 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3162                        module_name(THIS_MODULE));
3163                 return -EINVAL;
3164         }
3165         switch (cmd) {
3166         case OBD_IOC_CLIENT_RECOVER:
3167                 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3168                                            data->ioc_inlbuf1, 0);
3169                 if (rc > 0)
3170                         rc = 0;
3171                 break;
3172         case IOC_OSC_SET_ACTIVE:
3173                 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3174                                               data->ioc_offset);
3175                 break;
3176         default:
3177                 rc = -ENOTTY;
3178                 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3179                        obd->obd_name, cmd, current->comm, rc);
3180                 break;
3181         }
3182
3183         module_put(THIS_MODULE);
3184         return rc;
3185 }
3186
3187 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3188                        u32 keylen, void *key, u32 vallen, void *val,
3189                        struct ptlrpc_request_set *set)
3190 {
3191         struct ptlrpc_request *req;
3192         struct obd_device     *obd = exp->exp_obd;
3193         struct obd_import     *imp = class_exp2cliimp(exp);
3194         char                  *tmp;
3195         int                    rc;
3196         ENTRY;
3197
3198         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3199
3200         if (KEY_IS(KEY_CHECKSUM)) {
3201                 if (vallen != sizeof(int))
3202                         RETURN(-EINVAL);
3203                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3204                 RETURN(0);
3205         }
3206
3207         if (KEY_IS(KEY_SPTLRPC_CONF)) {
3208                 sptlrpc_conf_client_adapt(obd);
3209                 RETURN(0);
3210         }
3211
3212         if (KEY_IS(KEY_FLUSH_CTX)) {
3213                 sptlrpc_import_flush_my_ctx(imp);
3214                 RETURN(0);
3215         }
3216
3217         if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3218                 struct client_obd *cli = &obd->u.cli;
3219                 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3220                 long target = *(long *)val;
3221
3222                 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3223                 *(long *)val -= nr;
3224                 RETURN(0);
3225         }
3226
3227         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3228                 RETURN(-EINVAL);
3229
3230         /* We pass all other commands directly to OST. Since nobody calls osc
3231            methods directly and everybody is supposed to go through LOV, we
3232            assume lov checked invalid values for us.
3233            The only recognised values so far are evict_by_nid and mds_conn.
3234            Even if something bad goes through, we'd get a -EINVAL from OST
3235            anyway. */
3236
3237         req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3238                                                 &RQF_OST_SET_GRANT_INFO :
3239                                                 &RQF_OBD_SET_INFO);
3240         if (req == NULL)
3241                 RETURN(-ENOMEM);
3242
3243         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3244                              RCL_CLIENT, keylen);
3245         if (!KEY_IS(KEY_GRANT_SHRINK))
3246                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3247                                      RCL_CLIENT, vallen);
3248         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3249         if (rc) {
3250                 ptlrpc_request_free(req);
3251                 RETURN(rc);
3252         }
3253
3254         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3255         memcpy(tmp, key, keylen);
3256         tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3257                                                         &RMF_OST_BODY :
3258                                                         &RMF_SETINFO_VAL);
3259         memcpy(tmp, val, vallen);
3260
3261         if (KEY_IS(KEY_GRANT_SHRINK)) {
3262                 struct osc_grant_args *aa;
3263                 struct obdo *oa;
3264
3265                 aa = ptlrpc_req_async_args(aa, req);
3266                 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3267                 if (!oa) {
3268                         ptlrpc_req_finished(req);
3269                         RETURN(-ENOMEM);
3270                 }
3271                 *oa = ((struct ost_body *)val)->oa;
3272                 aa->aa_oa = oa;
3273                 req->rq_interpret_reply = osc_shrink_grant_interpret;
3274         }
3275
3276         ptlrpc_request_set_replen(req);
3277         if (!KEY_IS(KEY_GRANT_SHRINK)) {
3278                 LASSERT(set != NULL);
3279                 ptlrpc_set_add_req(set, req);
3280                 ptlrpc_check_set(NULL, set);
3281         } else {
3282                 ptlrpcd_add_req(req);
3283         }
3284
3285         RETURN(0);
3286 }
3287 EXPORT_SYMBOL(osc_set_info_async);
3288
3289 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3290                   struct obd_device *obd, struct obd_uuid *cluuid,
3291                   struct obd_connect_data *data, void *localdata)
3292 {
3293         struct client_obd *cli = &obd->u.cli;
3294
3295         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3296                 long lost_grant;
3297                 long grant;
3298
3299                 spin_lock(&cli->cl_loi_list_lock);
3300                 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3301                 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3302                         /* restore ocd_grant_blkbits as client page bits */
3303                         data->ocd_grant_blkbits = PAGE_SHIFT;
3304                         grant += cli->cl_dirty_grant;
3305                 } else {
3306                         grant += cli->cl_dirty_pages << PAGE_SHIFT;
3307                 }
3308                 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3309                 lost_grant = cli->cl_lost_grant;
3310                 cli->cl_lost_grant = 0;
3311                 spin_unlock(&cli->cl_loi_list_lock);
3312
3313                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3314                        " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3315                        data->ocd_version, data->ocd_grant, lost_grant);
3316         }
3317
3318         RETURN(0);
3319 }
3320 EXPORT_SYMBOL(osc_reconnect);
3321
3322 int osc_disconnect(struct obd_export *exp)
3323 {
3324         struct obd_device *obd = class_exp2obd(exp);
3325         int rc;
3326
3327         rc = client_disconnect_export(exp);
3328         /**
3329          * Initially we put del_shrink_grant before disconnect_export, but it
3330          * causes the following problem if setup (connect) and cleanup
3331          * (disconnect) are tangled together.
3332          *      connect p1                     disconnect p2
3333          *   ptlrpc_connect_import
3334          *     ...............               class_manual_cleanup
3335          *                                     osc_disconnect
3336          *                                     del_shrink_grant
3337          *   ptlrpc_connect_interrupt
3338          *     osc_init_grant
3339          *   add this client to shrink list
3340          *                                      cleanup_osc
3341          * Bang! grant shrink thread trigger the shrink. BUG18662
3342          */
3343         osc_del_grant_list(&obd->u.cli);
3344         return rc;
3345 }
3346 EXPORT_SYMBOL(osc_disconnect);
3347
3348 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3349                                  struct hlist_node *hnode, void *arg)
3350 {
3351         struct lu_env *env = arg;
3352         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3353         struct ldlm_lock *lock;
3354         struct osc_object *osc = NULL;
3355         ENTRY;
3356
3357         lock_res(res);
3358         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3359                 if (lock->l_ast_data != NULL && osc == NULL) {
3360                         osc = lock->l_ast_data;
3361                         cl_object_get(osc2cl(osc));
3362                 }
3363
3364                 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3365                  * by the 2nd round of ldlm_namespace_clean() call in
3366                  * osc_import_event(). */
3367                 ldlm_clear_cleaned(lock);
3368         }
3369         unlock_res(res);
3370
3371         if (osc != NULL) {
3372                 osc_object_invalidate(env, osc);
3373                 cl_object_put(env, osc2cl(osc));
3374         }
3375
3376         RETURN(0);
3377 }
3378 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3379
3380 static int osc_import_event(struct obd_device *obd,
3381                             struct obd_import *imp,
3382                             enum obd_import_event event)
3383 {
3384         struct client_obd *cli;
3385         int rc = 0;
3386
3387         ENTRY;
3388         LASSERT(imp->imp_obd == obd);
3389
3390         switch (event) {
3391         case IMP_EVENT_DISCON: {
3392                 cli = &obd->u.cli;
3393                 spin_lock(&cli->cl_loi_list_lock);
3394                 cli->cl_avail_grant = 0;
3395                 cli->cl_lost_grant = 0;
3396                 spin_unlock(&cli->cl_loi_list_lock);
3397                 break;
3398         }
3399         case IMP_EVENT_INACTIVE: {
3400                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3401                 break;
3402         }
3403         case IMP_EVENT_INVALIDATE: {
3404                 struct ldlm_namespace *ns = obd->obd_namespace;
3405                 struct lu_env         *env;
3406                 __u16                  refcheck;
3407
3408                 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3409
3410                 env = cl_env_get(&refcheck);
3411                 if (!IS_ERR(env)) {
3412                         osc_io_unplug(env, &obd->u.cli, NULL);
3413
3414                         cfs_hash_for_each_nolock(ns->ns_rs_hash,
3415                                                  osc_ldlm_resource_invalidate,
3416                                                  env, 0);
3417                         cl_env_put(env, &refcheck);
3418
3419                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3420                 } else
3421                         rc = PTR_ERR(env);
3422                 break;
3423         }
3424         case IMP_EVENT_ACTIVE: {
3425                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3426                 break;
3427         }
3428         case IMP_EVENT_OCD: {
3429                 struct obd_connect_data *ocd = &imp->imp_connect_data;
3430
3431                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3432                         osc_init_grant(&obd->u.cli, ocd);
3433
3434                 /* See bug 7198 */
3435                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3436                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3437
3438                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3439                 break;
3440         }
3441         case IMP_EVENT_DEACTIVATE: {
3442                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3443                 break;
3444         }
3445         case IMP_EVENT_ACTIVATE: {
3446                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3447                 break;
3448         }
3449         default:
3450                 CERROR("Unknown import event %d\n", event);
3451                 LBUG();
3452         }
3453         RETURN(rc);
3454 }
3455
3456 /**
3457  * Determine whether the lock can be canceled before replaying the lock
3458  * during recovery, see bug16774 for detailed information.
3459  *
3460  * \retval zero the lock can't be canceled
3461  * \retval other ok to cancel
3462  */
3463 static int osc_cancel_weight(struct ldlm_lock *lock)
3464 {
3465         /*
3466          * Cancel all unused and granted extent lock.
3467          */
3468         if (lock->l_resource->lr_type == LDLM_EXTENT &&
3469             ldlm_is_granted(lock) &&
3470             osc_ldlm_weigh_ast(lock) == 0)
3471                 RETURN(1);
3472
3473         RETURN(0);
3474 }
3475
3476 static int brw_queue_work(const struct lu_env *env, void *data)
3477 {
3478         struct client_obd *cli = data;
3479
3480         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3481
3482         osc_io_unplug(env, cli, NULL);
3483         RETURN(0);
3484 }
3485
3486 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3487 {
3488         struct client_obd *cli = &obd->u.cli;
3489         void *handler;
3490         int rc;
3491
3492         ENTRY;
3493
3494         rc = ptlrpcd_addref();
3495         if (rc)
3496                 RETURN(rc);
3497
3498         rc = client_obd_setup(obd, lcfg);
3499         if (rc)
3500                 GOTO(out_ptlrpcd, rc);
3501
3502
3503         handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3504         if (IS_ERR(handler))
3505                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3506         cli->cl_writeback_work = handler;
3507
3508         handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3509         if (IS_ERR(handler))
3510                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3511         cli->cl_lru_work = handler;
3512
3513         rc = osc_quota_setup(obd);
3514         if (rc)
3515                 GOTO(out_ptlrpcd_work, rc);
3516
3517         cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3518         cli->cl_root_squash = 0;
3519         osc_update_next_shrink(cli);
3520
3521         RETURN(rc);
3522
3523 out_ptlrpcd_work:
3524         if (cli->cl_writeback_work != NULL) {
3525                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3526                 cli->cl_writeback_work = NULL;
3527         }
3528         if (cli->cl_lru_work != NULL) {
3529                 ptlrpcd_destroy_work(cli->cl_lru_work);
3530                 cli->cl_lru_work = NULL;
3531         }
3532         client_obd_cleanup(obd);
3533 out_ptlrpcd:
3534         ptlrpcd_decref();
3535         RETURN(rc);
3536 }
3537 EXPORT_SYMBOL(osc_setup_common);
3538
3539 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3540 {
3541         struct client_obd *cli = &obd->u.cli;
3542         int                adding;
3543         int                added;
3544         int                req_count;
3545         int                rc;
3546
3547         ENTRY;
3548
3549         rc = osc_setup_common(obd, lcfg);
3550         if (rc < 0)
3551                 RETURN(rc);
3552
3553         rc = osc_tunables_init(obd);
3554         if (rc)
3555                 RETURN(rc);
3556
3557         /*
3558          * We try to control the total number of requests with a upper limit
3559          * osc_reqpool_maxreqcount. There might be some race which will cause
3560          * over-limit allocation, but it is fine.
3561          */
3562         req_count = atomic_read(&osc_pool_req_count);
3563         if (req_count < osc_reqpool_maxreqcount) {
3564                 adding = cli->cl_max_rpcs_in_flight + 2;
3565                 if (req_count + adding > osc_reqpool_maxreqcount)
3566                         adding = osc_reqpool_maxreqcount - req_count;
3567
3568                 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3569                 atomic_add(added, &osc_pool_req_count);
3570         }
3571
3572         ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3573
3574         spin_lock(&osc_shrink_lock);
3575         list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3576         spin_unlock(&osc_shrink_lock);
3577         cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3578         cli->cl_import->imp_idle_debug = D_HA;
3579
3580         RETURN(0);
3581 }
3582
3583 int osc_precleanup_common(struct obd_device *obd)
3584 {
3585         struct client_obd *cli = &obd->u.cli;
3586         ENTRY;
3587
3588         /* LU-464
3589          * for echo client, export may be on zombie list, wait for
3590          * zombie thread to cull it, because cli.cl_import will be
3591          * cleared in client_disconnect_export():
3592          *   class_export_destroy() -> obd_cleanup() ->
3593          *   echo_device_free() -> echo_client_cleanup() ->
3594          *   obd_disconnect() -> osc_disconnect() ->
3595          *   client_disconnect_export()
3596          */
3597         obd_zombie_barrier();
3598         if (cli->cl_writeback_work) {
3599                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3600                 cli->cl_writeback_work = NULL;
3601         }
3602
3603         if (cli->cl_lru_work) {
3604                 ptlrpcd_destroy_work(cli->cl_lru_work);
3605                 cli->cl_lru_work = NULL;
3606         }
3607
3608         obd_cleanup_client_import(obd);
3609         RETURN(0);
3610 }
3611 EXPORT_SYMBOL(osc_precleanup_common);
3612
3613 static int osc_precleanup(struct obd_device *obd)
3614 {
3615         ENTRY;
3616
3617         osc_precleanup_common(obd);
3618
3619         ptlrpc_lprocfs_unregister_obd(obd);
3620         RETURN(0);
3621 }
3622
3623 int osc_cleanup_common(struct obd_device *obd)
3624 {
3625         struct client_obd *cli = &obd->u.cli;
3626         int rc;
3627
3628         ENTRY;
3629
3630         spin_lock(&osc_shrink_lock);
3631         list_del(&cli->cl_shrink_list);
3632         spin_unlock(&osc_shrink_lock);
3633
3634         /* lru cleanup */
3635         if (cli->cl_cache != NULL) {
3636                 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3637                 spin_lock(&cli->cl_cache->ccc_lru_lock);
3638                 list_del_init(&cli->cl_lru_osc);
3639                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3640                 cli->cl_lru_left = NULL;
3641                 cl_cache_decref(cli->cl_cache);
3642                 cli->cl_cache = NULL;
3643         }
3644
3645         /* free memory of osc quota cache */
3646         osc_quota_cleanup(obd);
3647
3648         rc = client_obd_cleanup(obd);
3649
3650         ptlrpcd_decref();
3651         RETURN(rc);
3652 }
3653 EXPORT_SYMBOL(osc_cleanup_common);
3654
3655 static const struct obd_ops osc_obd_ops = {
3656         .o_owner                = THIS_MODULE,
3657         .o_setup                = osc_setup,
3658         .o_precleanup           = osc_precleanup,
3659         .o_cleanup              = osc_cleanup_common,
3660         .o_add_conn             = client_import_add_conn,
3661         .o_del_conn             = client_import_del_conn,
3662         .o_connect              = client_connect_import,
3663         .o_reconnect            = osc_reconnect,
3664         .o_disconnect           = osc_disconnect,
3665         .o_statfs               = osc_statfs,
3666         .o_statfs_async         = osc_statfs_async,
3667         .o_create               = osc_create,
3668         .o_destroy              = osc_destroy,
3669         .o_getattr              = osc_getattr,
3670         .o_setattr              = osc_setattr,
3671         .o_iocontrol            = osc_iocontrol,
3672         .o_set_info_async       = osc_set_info_async,
3673         .o_import_event         = osc_import_event,
3674         .o_quotactl             = osc_quotactl,
3675 };
3676
3677 LIST_HEAD(osc_shrink_list);
3678 DEFINE_SPINLOCK(osc_shrink_lock);
3679
3680 #ifdef HAVE_SHRINKER_COUNT
3681 static struct shrinker osc_cache_shrinker = {
3682         .count_objects  = osc_cache_shrink_count,
3683         .scan_objects   = osc_cache_shrink_scan,
3684         .seeks          = DEFAULT_SEEKS,
3685 };
3686 #else
3687 static int osc_cache_shrink(struct shrinker *shrinker,
3688                             struct shrink_control *sc)
3689 {
3690         (void)osc_cache_shrink_scan(shrinker, sc);
3691
3692         return osc_cache_shrink_count(shrinker, sc);
3693 }
3694
3695 static struct shrinker osc_cache_shrinker = {
3696         .shrink   = osc_cache_shrink,
3697         .seeks    = DEFAULT_SEEKS,
3698 };
3699 #endif
3700
3701 static int __init osc_init(void)
3702 {
3703         unsigned int reqpool_size;
3704         unsigned int reqsize;
3705         int rc;
3706         ENTRY;
3707
3708         /* print an address of _any_ initialized kernel symbol from this
3709          * module, to allow debugging with gdb that doesn't support data
3710          * symbols from modules.*/
3711         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3712
3713         rc = lu_kmem_init(osc_caches);
3714         if (rc)
3715                 RETURN(rc);
3716
3717         rc = class_register_type(&osc_obd_ops, NULL, true,
3718                                  LUSTRE_OSC_NAME, &osc_device_type);
3719         if (rc)
3720                 GOTO(out_kmem, rc);
3721
3722         rc = register_shrinker(&osc_cache_shrinker);
3723         if (rc)
3724                 GOTO(out_type, rc);
3725
3726         /* This is obviously too much memory, only prevent overflow here */
3727         if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3728                 GOTO(out_shrinker, rc = -EINVAL);
3729
3730         reqpool_size = osc_reqpool_mem_max << 20;
3731
3732         reqsize = 1;
3733         while (reqsize < OST_IO_MAXREQSIZE)
3734                 reqsize = reqsize << 1;
3735
3736         /*
3737          * We don't enlarge the request count in OSC pool according to
3738          * cl_max_rpcs_in_flight. The allocation from the pool will only be
3739          * tried after normal allocation failed. So a small OSC pool won't
3740          * cause much performance degression in most of cases.
3741          */
3742         osc_reqpool_maxreqcount = reqpool_size / reqsize;
3743
3744         atomic_set(&osc_pool_req_count, 0);
3745         osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3746                                           ptlrpc_add_rqs_to_pool);
3747
3748         if (osc_rq_pool == NULL)
3749                 GOTO(out_shrinker, rc = -ENOMEM);
3750
3751         rc = osc_start_grant_work();
3752         if (rc != 0)
3753                 GOTO(out_req_pool, rc);
3754
3755         RETURN(rc);
3756
3757 out_req_pool:
3758         ptlrpc_free_rq_pool(osc_rq_pool);
3759 out_shrinker:
3760         unregister_shrinker(&osc_cache_shrinker);
3761 out_type:
3762         class_unregister_type(LUSTRE_OSC_NAME);
3763 out_kmem:
3764         lu_kmem_fini(osc_caches);
3765
3766         RETURN(rc);
3767 }
3768
3769 static void __exit osc_exit(void)
3770 {
3771         osc_stop_grant_work();
3772         unregister_shrinker(&osc_cache_shrinker);
3773         class_unregister_type(LUSTRE_OSC_NAME);
3774         lu_kmem_fini(osc_caches);
3775         ptlrpc_free_rq_pool(osc_rq_pool);
3776 }
3777
3778 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3779 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3780 MODULE_VERSION(LUSTRE_VERSION_STRING);
3781 MODULE_LICENSE("GPL");
3782
3783 module_init(osc_init);
3784 module_exit(osc_exit);