Whamcloud - gitweb
32190a5965d8fbd3df88f4ecb938247a7bbdacbe
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #define DEBUG_SUBSYSTEM S_OSC
33
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_ioctl_old.h>
43 #include <lustre_net.h>
44 #include <lustre_obdo.h>
45 #include <lustre_osc.h>
46 #include <obd.h>
47 #include <obd_cksum.h>
48 #include <obd_class.h>
49
50 #include "osc_internal.h"
51 #include <lnet/lnet_rdma.h>
52
53 atomic_t osc_pool_req_count;
54 unsigned int osc_reqpool_maxreqcount;
55 struct ptlrpc_request_pool *osc_rq_pool;
56
57 /* max memory used for request pool, unit is MB */
58 static unsigned int osc_reqpool_mem_max = 5;
59 module_param(osc_reqpool_mem_max, uint, 0444);
60
61 static int osc_idle_timeout = 20;
62 module_param(osc_idle_timeout, uint, 0644);
63
64 #define osc_grant_args osc_brw_async_args
65
66 struct osc_setattr_args {
67         struct obdo             *sa_oa;
68         obd_enqueue_update_f     sa_upcall;
69         void                    *sa_cookie;
70 };
71
72 struct osc_fsync_args {
73         struct osc_object       *fa_obj;
74         struct obdo             *fa_oa;
75         obd_enqueue_update_f    fa_upcall;
76         void                    *fa_cookie;
77 };
78
79 struct osc_ladvise_args {
80         struct obdo             *la_oa;
81         obd_enqueue_update_f     la_upcall;
82         void                    *la_cookie;
83 };
84
85 static void osc_release_ppga(struct brw_page **ppga, size_t count);
86 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
87                          void *data, int rc);
88
89 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
90 {
91         struct ost_body *body;
92
93         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
94         LASSERT(body);
95
96         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
97 }
98
99 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
100                        struct obdo *oa)
101 {
102         struct ptlrpc_request   *req;
103         struct ost_body         *body;
104         int                      rc;
105
106         ENTRY;
107         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
108         if (req == NULL)
109                 RETURN(-ENOMEM);
110
111         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
112         if (rc) {
113                 ptlrpc_request_free(req);
114                 RETURN(rc);
115         }
116
117         osc_pack_req_body(req, oa);
118
119         ptlrpc_request_set_replen(req);
120
121         rc = ptlrpc_queue_wait(req);
122         if (rc)
123                 GOTO(out, rc);
124
125         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
126         if (body == NULL)
127                 GOTO(out, rc = -EPROTO);
128
129         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
130         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
131
132         oa->o_blksize = cli_brw_size(exp->exp_obd);
133         oa->o_valid |= OBD_MD_FLBLKSZ;
134
135         EXIT;
136 out:
137         ptlrpc_req_finished(req);
138
139         return rc;
140 }
141
142 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
143                        struct obdo *oa)
144 {
145         struct ptlrpc_request   *req;
146         struct ost_body         *body;
147         int                      rc;
148
149         ENTRY;
150         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
151
152         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
153         if (req == NULL)
154                 RETURN(-ENOMEM);
155
156         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
157         if (rc) {
158                 ptlrpc_request_free(req);
159                 RETURN(rc);
160         }
161
162         osc_pack_req_body(req, oa);
163
164         ptlrpc_request_set_replen(req);
165
166         rc = ptlrpc_queue_wait(req);
167         if (rc)
168                 GOTO(out, rc);
169
170         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
171         if (body == NULL)
172                 GOTO(out, rc = -EPROTO);
173
174         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
175
176         EXIT;
177 out:
178         ptlrpc_req_finished(req);
179
180         RETURN(rc);
181 }
182
183 static int osc_setattr_interpret(const struct lu_env *env,
184                                  struct ptlrpc_request *req, void *args, int rc)
185 {
186         struct osc_setattr_args *sa = args;
187         struct ost_body *body;
188
189         ENTRY;
190
191         if (rc != 0)
192                 GOTO(out, rc);
193
194         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
195         if (body == NULL)
196                 GOTO(out, rc = -EPROTO);
197
198         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
199                              &body->oa);
200 out:
201         rc = sa->sa_upcall(sa->sa_cookie, rc);
202         RETURN(rc);
203 }
204
205 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
206                       obd_enqueue_update_f upcall, void *cookie,
207                       struct ptlrpc_request_set *rqset)
208 {
209         struct ptlrpc_request   *req;
210         struct osc_setattr_args *sa;
211         int                      rc;
212
213         ENTRY;
214
215         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
216         if (req == NULL)
217                 RETURN(-ENOMEM);
218
219         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
220         if (rc) {
221                 ptlrpc_request_free(req);
222                 RETURN(rc);
223         }
224
225         osc_pack_req_body(req, oa);
226
227         ptlrpc_request_set_replen(req);
228
229         /* do mds to ost setattr asynchronously */
230         if (!rqset) {
231                 /* Do not wait for response. */
232                 ptlrpcd_add_req(req);
233         } else {
234                 req->rq_interpret_reply = osc_setattr_interpret;
235
236                 sa = ptlrpc_req_async_args(sa, req);
237                 sa->sa_oa = oa;
238                 sa->sa_upcall = upcall;
239                 sa->sa_cookie = cookie;
240
241                 ptlrpc_set_add_req(rqset, req);
242         }
243
244         RETURN(0);
245 }
246
247 static int osc_ladvise_interpret(const struct lu_env *env,
248                                  struct ptlrpc_request *req,
249                                  void *arg, int rc)
250 {
251         struct osc_ladvise_args *la = arg;
252         struct ost_body *body;
253         ENTRY;
254
255         if (rc != 0)
256                 GOTO(out, rc);
257
258         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
259         if (body == NULL)
260                 GOTO(out, rc = -EPROTO);
261
262         *la->la_oa = body->oa;
263 out:
264         rc = la->la_upcall(la->la_cookie, rc);
265         RETURN(rc);
266 }
267
268 /**
269  * If rqset is NULL, do not wait for response. Upcall and cookie could also
270  * be NULL in this case
271  */
272 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
273                      struct ladvise_hdr *ladvise_hdr,
274                      obd_enqueue_update_f upcall, void *cookie,
275                      struct ptlrpc_request_set *rqset)
276 {
277         struct ptlrpc_request   *req;
278         struct ost_body         *body;
279         struct osc_ladvise_args *la;
280         int                      rc;
281         struct lu_ladvise       *req_ladvise;
282         struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
283         int                      num_advise = ladvise_hdr->lah_count;
284         struct ladvise_hdr      *req_ladvise_hdr;
285         ENTRY;
286
287         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
288         if (req == NULL)
289                 RETURN(-ENOMEM);
290
291         req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
292                              num_advise * sizeof(*ladvise));
293         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
294         if (rc != 0) {
295                 ptlrpc_request_free(req);
296                 RETURN(rc);
297         }
298         req->rq_request_portal = OST_IO_PORTAL;
299         ptlrpc_at_set_req_timeout(req);
300
301         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
302         LASSERT(body);
303         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
304                              oa);
305
306         req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
307                                                  &RMF_OST_LADVISE_HDR);
308         memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
309
310         req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
311         memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
312         ptlrpc_request_set_replen(req);
313
314         if (rqset == NULL) {
315                 /* Do not wait for response. */
316                 ptlrpcd_add_req(req);
317                 RETURN(0);
318         }
319
320         req->rq_interpret_reply = osc_ladvise_interpret;
321         la = ptlrpc_req_async_args(la, req);
322         la->la_oa = oa;
323         la->la_upcall = upcall;
324         la->la_cookie = cookie;
325
326         ptlrpc_set_add_req(rqset, req);
327
328         RETURN(0);
329 }
330
331 static int osc_create(const struct lu_env *env, struct obd_export *exp,
332                       struct obdo *oa)
333 {
334         struct ptlrpc_request *req;
335         struct ost_body       *body;
336         int                    rc;
337         ENTRY;
338
339         LASSERT(oa != NULL);
340         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
341         LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
342
343         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
344         if (req == NULL)
345                 GOTO(out, rc = -ENOMEM);
346
347         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
348         if (rc) {
349                 ptlrpc_request_free(req);
350                 GOTO(out, rc);
351         }
352
353         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
354         LASSERT(body);
355
356         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
357
358         ptlrpc_request_set_replen(req);
359
360         rc = ptlrpc_queue_wait(req);
361         if (rc)
362                 GOTO(out_req, rc);
363
364         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365         if (body == NULL)
366                 GOTO(out_req, rc = -EPROTO);
367
368         CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
369         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
370
371         oa->o_blksize = cli_brw_size(exp->exp_obd);
372         oa->o_valid |= OBD_MD_FLBLKSZ;
373
374         CDEBUG(D_HA, "transno: %lld\n",
375                lustre_msg_get_transno(req->rq_repmsg));
376 out_req:
377         ptlrpc_req_finished(req);
378 out:
379         RETURN(rc);
380 }
381
382 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
383                    obd_enqueue_update_f upcall, void *cookie)
384 {
385         struct ptlrpc_request *req;
386         struct osc_setattr_args *sa;
387         struct obd_import *imp = class_exp2cliimp(exp);
388         struct ost_body *body;
389         int rc;
390
391         ENTRY;
392
393         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
394         if (req == NULL)
395                 RETURN(-ENOMEM);
396
397         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
398         if (rc < 0) {
399                 ptlrpc_request_free(req);
400                 RETURN(rc);
401         }
402
403         osc_set_io_portal(req);
404
405         ptlrpc_at_set_req_timeout(req);
406
407         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
408
409         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
410
411         ptlrpc_request_set_replen(req);
412
413         req->rq_interpret_reply = osc_setattr_interpret;
414         sa = ptlrpc_req_async_args(sa, req);
415         sa->sa_oa = oa;
416         sa->sa_upcall = upcall;
417         sa->sa_cookie = cookie;
418
419         ptlrpcd_add_req(req);
420
421         RETURN(0);
422 }
423 EXPORT_SYMBOL(osc_punch_send);
424
425 /**
426  * osc_fallocate_base() - Handles fallocate request.
427  *
428  * @exp:        Export structure
429  * @oa:         Attributes passed to OSS from client (obdo structure)
430  * @upcall:     Primary & supplementary group information
431  * @cookie:     Exclusive identifier
432  * @rqset:      Request list.
433  * @mode:       Operation done on given range.
434  *
435  * osc_fallocate_base() - Handles fallocate requests only. Only block
436  * allocation or standard preallocate operation is supported currently.
437  * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
438  * is supported via SETATTR request.
439  *
440  * Return: Non-zero on failure and O on success.
441  */
442 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
443                        obd_enqueue_update_f upcall, void *cookie, int mode)
444 {
445         struct ptlrpc_request *req;
446         struct osc_setattr_args *sa;
447         struct ost_body *body;
448         struct obd_import *imp = class_exp2cliimp(exp);
449         int rc;
450         ENTRY;
451
452         oa->o_falloc_mode = mode;
453         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
454                                    &RQF_OST_FALLOCATE);
455         if (req == NULL)
456                 RETURN(-ENOMEM);
457
458         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
459         if (rc != 0) {
460                 ptlrpc_request_free(req);
461                 RETURN(rc);
462         }
463
464         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
465         LASSERT(body);
466
467         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
468
469         ptlrpc_request_set_replen(req);
470
471         req->rq_interpret_reply = osc_setattr_interpret;
472         BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
473         sa = ptlrpc_req_async_args(sa, req);
474         sa->sa_oa = oa;
475         sa->sa_upcall = upcall;
476         sa->sa_cookie = cookie;
477
478         ptlrpcd_add_req(req);
479
480         RETURN(0);
481 }
482 EXPORT_SYMBOL(osc_fallocate_base);
483
484 static int osc_sync_interpret(const struct lu_env *env,
485                               struct ptlrpc_request *req, void *args, int rc)
486 {
487         struct osc_fsync_args *fa = args;
488         struct ost_body *body;
489         struct cl_attr *attr = &osc_env_info(env)->oti_attr;
490         unsigned long valid = 0;
491         struct cl_object *obj;
492         ENTRY;
493
494         if (rc != 0)
495                 GOTO(out, rc);
496
497         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
498         if (body == NULL) {
499                 CERROR("can't unpack ost_body\n");
500                 GOTO(out, rc = -EPROTO);
501         }
502
503         *fa->fa_oa = body->oa;
504         obj = osc2cl(fa->fa_obj);
505
506         /* Update osc object's blocks attribute */
507         cl_object_attr_lock(obj);
508         if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
509                 attr->cat_blocks = body->oa.o_blocks;
510                 valid |= CAT_BLOCKS;
511         }
512
513         if (valid != 0)
514                 cl_object_attr_update(env, obj, attr, valid);
515         cl_object_attr_unlock(obj);
516
517 out:
518         rc = fa->fa_upcall(fa->fa_cookie, rc);
519         RETURN(rc);
520 }
521
522 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
523                   obd_enqueue_update_f upcall, void *cookie,
524                   struct ptlrpc_request_set *rqset)
525 {
526         struct obd_export     *exp = osc_export(obj);
527         struct ptlrpc_request *req;
528         struct ost_body       *body;
529         struct osc_fsync_args *fa;
530         int                    rc;
531         ENTRY;
532
533         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
534         if (req == NULL)
535                 RETURN(-ENOMEM);
536
537         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
538         if (rc) {
539                 ptlrpc_request_free(req);
540                 RETURN(rc);
541         }
542
543         /* overload the size and blocks fields in the oa with start/end */
544         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
545         LASSERT(body);
546         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
547
548         ptlrpc_request_set_replen(req);
549         req->rq_interpret_reply = osc_sync_interpret;
550
551         fa = ptlrpc_req_async_args(fa, req);
552         fa->fa_obj = obj;
553         fa->fa_oa = oa;
554         fa->fa_upcall = upcall;
555         fa->fa_cookie = cookie;
556
557         ptlrpc_set_add_req(rqset, req);
558
559         RETURN (0);
560 }
561
562 /* Find and cancel locally locks matched by @mode in the resource found by
563  * @objid. Found locks are added into @cancel list. Returns the amount of
564  * locks added to @cancels list. */
565 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
566                                    struct list_head *cancels,
567                                    enum ldlm_mode mode, __u64 lock_flags)
568 {
569         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
570         struct ldlm_res_id res_id;
571         struct ldlm_resource *res;
572         int count;
573         ENTRY;
574
575         /* Return, i.e. cancel nothing, only if ELC is supported (flag in
576          * export) but disabled through procfs (flag in NS).
577          *
578          * This distinguishes from a case when ELC is not supported originally,
579          * when we still want to cancel locks in advance and just cancel them
580          * locally, without sending any RPC. */
581         if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
582                 RETURN(0);
583
584         ostid_build_res_name(&oa->o_oi, &res_id);
585         res = ldlm_resource_get(ns, &res_id, 0, 0);
586         if (IS_ERR(res))
587                 RETURN(0);
588
589         LDLM_RESOURCE_ADDREF(res);
590         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
591                                            lock_flags, 0, NULL);
592         LDLM_RESOURCE_DELREF(res);
593         ldlm_resource_putref(res);
594         RETURN(count);
595 }
596
597 static int osc_destroy_interpret(const struct lu_env *env,
598                                  struct ptlrpc_request *req, void *args, int rc)
599 {
600         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
601
602         atomic_dec(&cli->cl_destroy_in_flight);
603         wake_up(&cli->cl_destroy_waitq);
604
605         return 0;
606 }
607
608 static int osc_can_send_destroy(struct client_obd *cli)
609 {
610         if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
611             cli->cl_max_rpcs_in_flight) {
612                 /* The destroy request can be sent */
613                 return 1;
614         }
615         if (atomic_dec_return(&cli->cl_destroy_in_flight) <
616             cli->cl_max_rpcs_in_flight) {
617                 /*
618                  * The counter has been modified between the two atomic
619                  * operations.
620                  */
621                 wake_up(&cli->cl_destroy_waitq);
622         }
623         return 0;
624 }
625
626 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
627                        struct obdo *oa)
628 {
629         struct client_obd     *cli = &exp->exp_obd->u.cli;
630         struct ptlrpc_request *req;
631         struct ost_body       *body;
632         LIST_HEAD(cancels);
633         int rc, count;
634         ENTRY;
635
636         if (!oa) {
637                 CDEBUG(D_INFO, "oa NULL\n");
638                 RETURN(-EINVAL);
639         }
640
641         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
642                                         LDLM_FL_DISCARD_DATA);
643
644         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
645         if (req == NULL) {
646                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
647                 RETURN(-ENOMEM);
648         }
649
650         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
651                                0, &cancels, count);
652         if (rc) {
653                 ptlrpc_request_free(req);
654                 RETURN(rc);
655         }
656
657         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
658         ptlrpc_at_set_req_timeout(req);
659
660         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
661         LASSERT(body);
662         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
663
664         ptlrpc_request_set_replen(req);
665
666         req->rq_interpret_reply = osc_destroy_interpret;
667         if (!osc_can_send_destroy(cli)) {
668                 /*
669                  * Wait until the number of on-going destroy RPCs drops
670                  * under max_rpc_in_flight
671                  */
672                 rc = l_wait_event_abortable_exclusive(
673                         cli->cl_destroy_waitq,
674                         osc_can_send_destroy(cli));
675                 if (rc) {
676                         ptlrpc_req_finished(req);
677                         RETURN(-EINTR);
678                 }
679         }
680
681         /* Do not wait for response */
682         ptlrpcd_add_req(req);
683         RETURN(0);
684 }
685
686 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
687                                 long writing_bytes)
688 {
689         u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
690
691         LASSERT(!(oa->o_valid & bits));
692
693         oa->o_valid |= bits;
694         spin_lock(&cli->cl_loi_list_lock);
695         if (cli->cl_ocd_grant_param)
696                 oa->o_dirty = cli->cl_dirty_grant;
697         else
698                 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
699         if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
700                 CERROR("dirty %lu > dirty_max %lu\n",
701                        cli->cl_dirty_pages,
702                        cli->cl_dirty_max_pages);
703                 oa->o_undirty = 0;
704         } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
705                             (long)(obd_max_dirty_pages + 1))) {
706                 /* The atomic_read() allowing the atomic_inc() are
707                  * not covered by a lock thus they may safely race and trip
708                  * this CERROR() unless we add in a small fudge factor (+1). */
709                 CERROR("%s: dirty %ld > system dirty_max %ld\n",
710                        cli_name(cli), atomic_long_read(&obd_dirty_pages),
711                        obd_max_dirty_pages);
712                 oa->o_undirty = 0;
713         } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
714                             0x7fffffff)) {
715                 CERROR("dirty %lu - dirty_max %lu too big???\n",
716                        cli->cl_dirty_pages, cli->cl_dirty_max_pages);
717                 oa->o_undirty = 0;
718         } else {
719                 unsigned long nrpages;
720                 unsigned long undirty;
721
722                 nrpages = cli->cl_max_pages_per_rpc;
723                 nrpages *= cli->cl_max_rpcs_in_flight + 1;
724                 nrpages = max(nrpages, cli->cl_dirty_max_pages);
725                 undirty = nrpages << PAGE_SHIFT;
726                 if (cli->cl_ocd_grant_param) {
727                         int nrextents;
728
729                         /* take extent tax into account when asking for more
730                          * grant space */
731                         nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
732                                      cli->cl_max_extent_pages;
733                         undirty += nrextents * cli->cl_grant_extent_tax;
734                 }
735                 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
736                  * to add extent tax, etc.
737                  */
738                 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
739                                     ~(PTLRPC_MAX_BRW_SIZE * 4UL));
740         }
741         oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
742         /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
743         if (cli->cl_lost_grant > INT_MAX) {
744                 CDEBUG(D_CACHE,
745                       "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
746                       cli_name(cli), cli->cl_lost_grant);
747                 oa->o_dropped = INT_MAX;
748         } else {
749                 oa->o_dropped = cli->cl_lost_grant;
750         }
751         cli->cl_lost_grant -= oa->o_dropped;
752         spin_unlock(&cli->cl_loi_list_lock);
753         CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
754                " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
755                oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
756 }
757
758 void osc_update_next_shrink(struct client_obd *cli)
759 {
760         cli->cl_next_shrink_grant = ktime_get_seconds() +
761                                     cli->cl_grant_shrink_interval;
762
763         CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
764                cli->cl_next_shrink_grant);
765 }
766 EXPORT_SYMBOL(osc_update_next_shrink);
767
768 static void __osc_update_grant(struct client_obd *cli, u64 grant)
769 {
770         spin_lock(&cli->cl_loi_list_lock);
771         cli->cl_avail_grant += grant;
772         spin_unlock(&cli->cl_loi_list_lock);
773 }
774
775 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
776 {
777         if (body->oa.o_valid & OBD_MD_FLGRANT) {
778                 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
779                 __osc_update_grant(cli, body->oa.o_grant);
780         }
781 }
782
783 /**
784  * grant thread data for shrinking space.
785  */
786 struct grant_thread_data {
787         struct list_head        gtd_clients;
788         struct mutex            gtd_mutex;
789         unsigned long           gtd_stopped:1;
790 };
791 static struct grant_thread_data client_gtd;
792
793 static int osc_shrink_grant_interpret(const struct lu_env *env,
794                                       struct ptlrpc_request *req,
795                                       void *args, int rc)
796 {
797         struct osc_grant_args *aa = args;
798         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
799         struct ost_body *body;
800
801         if (rc != 0) {
802                 __osc_update_grant(cli, aa->aa_oa->o_grant);
803                 GOTO(out, rc);
804         }
805
806         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
807         LASSERT(body);
808         osc_update_grant(cli, body);
809 out:
810         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
811         aa->aa_oa = NULL;
812
813         return rc;
814 }
815
816 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
817 {
818         spin_lock(&cli->cl_loi_list_lock);
819         oa->o_grant = cli->cl_avail_grant / 4;
820         cli->cl_avail_grant -= oa->o_grant;
821         spin_unlock(&cli->cl_loi_list_lock);
822         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
823                 oa->o_valid |= OBD_MD_FLFLAGS;
824                 oa->o_flags = 0;
825         }
826         oa->o_flags |= OBD_FL_SHRINK_GRANT;
827         osc_update_next_shrink(cli);
828 }
829
830 /* Shrink the current grant, either from some large amount to enough for a
831  * full set of in-flight RPCs, or if we have already shrunk to that limit
832  * then to enough for a single RPC.  This avoids keeping more grant than
833  * needed, and avoids shrinking the grant piecemeal. */
834 static int osc_shrink_grant(struct client_obd *cli)
835 {
836         __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
837                              (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
838
839         spin_lock(&cli->cl_loi_list_lock);
840         if (cli->cl_avail_grant <= target_bytes)
841                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
842         spin_unlock(&cli->cl_loi_list_lock);
843
844         return osc_shrink_grant_to_target(cli, target_bytes);
845 }
846
847 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
848 {
849         int                     rc = 0;
850         struct ost_body        *body;
851         ENTRY;
852
853         spin_lock(&cli->cl_loi_list_lock);
854         /* Don't shrink if we are already above or below the desired limit
855          * We don't want to shrink below a single RPC, as that will negatively
856          * impact block allocation and long-term performance. */
857         if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
858                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
859
860         if (target_bytes >= cli->cl_avail_grant) {
861                 spin_unlock(&cli->cl_loi_list_lock);
862                 RETURN(0);
863         }
864         spin_unlock(&cli->cl_loi_list_lock);
865
866         OBD_ALLOC_PTR(body);
867         if (!body)
868                 RETURN(-ENOMEM);
869
870         osc_announce_cached(cli, &body->oa, 0);
871
872         spin_lock(&cli->cl_loi_list_lock);
873         if (target_bytes >= cli->cl_avail_grant) {
874                 /* available grant has changed since target calculation */
875                 spin_unlock(&cli->cl_loi_list_lock);
876                 GOTO(out_free, rc = 0);
877         }
878         body->oa.o_grant = cli->cl_avail_grant - target_bytes;
879         cli->cl_avail_grant = target_bytes;
880         spin_unlock(&cli->cl_loi_list_lock);
881         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
882                 body->oa.o_valid |= OBD_MD_FLFLAGS;
883                 body->oa.o_flags = 0;
884         }
885         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
886         osc_update_next_shrink(cli);
887
888         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
889                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
890                                 sizeof(*body), body, NULL);
891         if (rc != 0)
892                 __osc_update_grant(cli, body->oa.o_grant);
893 out_free:
894         OBD_FREE_PTR(body);
895         RETURN(rc);
896 }
897
898 static int osc_should_shrink_grant(struct client_obd *client)
899 {
900         time64_t next_shrink = client->cl_next_shrink_grant;
901
902         if (client->cl_import == NULL)
903                 return 0;
904
905         if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
906             client->cl_import->imp_grant_shrink_disabled) {
907                 osc_update_next_shrink(client);
908                 return 0;
909         }
910
911         if (ktime_get_seconds() >= next_shrink - 5) {
912                 /* Get the current RPC size directly, instead of going via:
913                  * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
914                  * Keep comment here so that it can be found by searching. */
915                 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
916
917                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
918                     client->cl_avail_grant > brw_size)
919                         return 1;
920                 else
921                         osc_update_next_shrink(client);
922         }
923         return 0;
924 }
925
926 #define GRANT_SHRINK_RPC_BATCH  100
927
928 static struct delayed_work work;
929
930 static void osc_grant_work_handler(struct work_struct *data)
931 {
932         struct client_obd *cli;
933         int rpc_sent;
934         bool init_next_shrink = true;
935         time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
936
937         rpc_sent = 0;
938         mutex_lock(&client_gtd.gtd_mutex);
939         list_for_each_entry(cli, &client_gtd.gtd_clients,
940                             cl_grant_chain) {
941                 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
942                     osc_should_shrink_grant(cli)) {
943                         osc_shrink_grant(cli);
944                         rpc_sent++;
945                 }
946
947                 if (!init_next_shrink) {
948                         if (cli->cl_next_shrink_grant < next_shrink &&
949                             cli->cl_next_shrink_grant > ktime_get_seconds())
950                                 next_shrink = cli->cl_next_shrink_grant;
951                 } else {
952                         init_next_shrink = false;
953                         next_shrink = cli->cl_next_shrink_grant;
954                 }
955         }
956         mutex_unlock(&client_gtd.gtd_mutex);
957
958         if (client_gtd.gtd_stopped == 1)
959                 return;
960
961         if (next_shrink > ktime_get_seconds()) {
962                 time64_t delay = next_shrink - ktime_get_seconds();
963
964                 schedule_delayed_work(&work, cfs_time_seconds(delay));
965         } else {
966                 schedule_work(&work.work);
967         }
968 }
969
970 void osc_schedule_grant_work(void)
971 {
972         cancel_delayed_work_sync(&work);
973         schedule_work(&work.work);
974 }
975 EXPORT_SYMBOL(osc_schedule_grant_work);
976
977 /**
978  * Start grant thread for returing grant to server for idle clients.
979  */
980 static int osc_start_grant_work(void)
981 {
982         client_gtd.gtd_stopped = 0;
983         mutex_init(&client_gtd.gtd_mutex);
984         INIT_LIST_HEAD(&client_gtd.gtd_clients);
985
986         INIT_DELAYED_WORK(&work, osc_grant_work_handler);
987         schedule_work(&work.work);
988
989         return 0;
990 }
991
992 static void osc_stop_grant_work(void)
993 {
994         client_gtd.gtd_stopped = 1;
995         cancel_delayed_work_sync(&work);
996 }
997
998 static void osc_add_grant_list(struct client_obd *client)
999 {
1000         mutex_lock(&client_gtd.gtd_mutex);
1001         list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
1002         mutex_unlock(&client_gtd.gtd_mutex);
1003 }
1004
1005 static void osc_del_grant_list(struct client_obd *client)
1006 {
1007         if (list_empty(&client->cl_grant_chain))
1008                 return;
1009
1010         mutex_lock(&client_gtd.gtd_mutex);
1011         list_del_init(&client->cl_grant_chain);
1012         mutex_unlock(&client_gtd.gtd_mutex);
1013 }
1014
1015 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1016 {
1017         /*
1018          * ocd_grant is the total grant amount we're expect to hold: if we've
1019          * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1020          * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1021          * dirty.
1022          *
1023          * race is tolerable here: if we're evicted, but imp_state already
1024          * left EVICTED state, then cl_dirty_pages must be 0 already.
1025          */
1026         spin_lock(&cli->cl_loi_list_lock);
1027         cli->cl_avail_grant = ocd->ocd_grant;
1028         if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1029                 unsigned long consumed = cli->cl_reserved_grant;
1030
1031                 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1032                         consumed += cli->cl_dirty_grant;
1033                 else
1034                         consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1035                 if (cli->cl_avail_grant < consumed) {
1036                         CERROR("%s: granted %ld but already consumed %ld\n",
1037                                cli_name(cli), cli->cl_avail_grant, consumed);
1038                         cli->cl_avail_grant = 0;
1039                 } else {
1040                         cli->cl_avail_grant -= consumed;
1041                 }
1042         }
1043
1044         if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1045                 u64 size;
1046                 int chunk_mask;
1047
1048                 /* overhead for each extent insertion */
1049                 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1050                 /* determine the appropriate chunk size used by osc_extent. */
1051                 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1052                                           ocd->ocd_grant_blkbits);
1053                 /* max_pages_per_rpc must be chunk aligned */
1054                 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1055                 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1056                                              ~chunk_mask) & chunk_mask;
1057                 /* determine maximum extent size, in #pages */
1058                 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1059                 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1060                 cli->cl_ocd_grant_param = 1;
1061         } else {
1062                 cli->cl_ocd_grant_param = 0;
1063                 cli->cl_grant_extent_tax = 0;
1064                 cli->cl_chunkbits = PAGE_SHIFT;
1065                 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1066         }
1067         spin_unlock(&cli->cl_loi_list_lock);
1068
1069         CDEBUG(D_CACHE,
1070                "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1071                cli_name(cli),
1072                cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1073                cli->cl_max_extent_pages);
1074
1075         if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1076                 osc_add_grant_list(cli);
1077 }
1078 EXPORT_SYMBOL(osc_init_grant);
1079
1080 /* We assume that the reason this OSC got a short read is because it read
1081  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1082  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1083  * this stripe never got written at or beyond this stripe offset yet. */
1084 static void handle_short_read(int nob_read, size_t page_count,
1085                               struct brw_page **pga)
1086 {
1087         char *ptr;
1088         int i = 0;
1089
1090         /* skip bytes read OK */
1091         while (nob_read > 0) {
1092                 LASSERT (page_count > 0);
1093
1094                 if (pga[i]->count > nob_read) {
1095                         /* EOF inside this page */
1096                         ptr = kmap(pga[i]->pg) +
1097                                 (pga[i]->off & ~PAGE_MASK);
1098                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1099                         kunmap(pga[i]->pg);
1100                         page_count--;
1101                         i++;
1102                         break;
1103                 }
1104
1105                 nob_read -= pga[i]->count;
1106                 page_count--;
1107                 i++;
1108         }
1109
1110         /* zero remaining pages */
1111         while (page_count-- > 0) {
1112                 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1113                 memset(ptr, 0, pga[i]->count);
1114                 kunmap(pga[i]->pg);
1115                 i++;
1116         }
1117 }
1118
1119 static int check_write_rcs(struct ptlrpc_request *req,
1120                            int requested_nob, int niocount,
1121                            size_t page_count, struct brw_page **pga)
1122 {
1123         int     i;
1124         __u32   *remote_rcs;
1125
1126         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1127                                                   sizeof(*remote_rcs) *
1128                                                   niocount);
1129         if (remote_rcs == NULL) {
1130                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1131                 return(-EPROTO);
1132         }
1133
1134         /* return error if any niobuf was in error */
1135         for (i = 0; i < niocount; i++) {
1136                 if ((int)remote_rcs[i] < 0) {
1137                         CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1138                                i, remote_rcs[i], req);
1139                         return remote_rcs[i];
1140                 }
1141
1142                 if (remote_rcs[i] != 0) {
1143                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1144                                 i, remote_rcs[i], req);
1145                         return(-EPROTO);
1146                 }
1147         }
1148         if (req->rq_bulk != NULL &&
1149             req->rq_bulk->bd_nob_transferred != requested_nob) {
1150                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1151                        req->rq_bulk->bd_nob_transferred, requested_nob);
1152                 return(-EPROTO);
1153         }
1154
1155         return (0);
1156 }
1157
1158 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1159 {
1160         if (p1->flag != p2->flag) {
1161                 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1162                                   OBD_BRW_SYNC       | OBD_BRW_ASYNC   |
1163                                   OBD_BRW_NOQUOTA    | OBD_BRW_SOFT_SYNC |
1164                                   OBD_BRW_SYS_RESOURCE);
1165
1166                 /* warn if we try to combine flags that we don't know to be
1167                  * safe to combine */
1168                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1169                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1170                               "report this at https://jira.whamcloud.com/\n",
1171                               p1->flag, p2->flag);
1172                 }
1173                 return 0;
1174         }
1175
1176         return (p1->off + p1->count == p2->off);
1177 }
1178
1179 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1180 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1181                                    size_t pg_count, struct brw_page **pga,
1182                                    int opc, obd_dif_csum_fn *fn,
1183                                    int sector_size,
1184                                    u32 *check_sum, bool resend)
1185 {
1186         struct ahash_request *req;
1187         /* Used Adler as the default checksum type on top of DIF tags */
1188         unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1189         struct page *__page;
1190         unsigned char *buffer;
1191         __be16 *guard_start;
1192         int guard_number;
1193         int used_number = 0;
1194         int used;
1195         u32 cksum;
1196         unsigned int bufsize = sizeof(cksum);
1197         int rc = 0, rc2;
1198         int i = 0;
1199
1200         LASSERT(pg_count > 0);
1201
1202         __page = alloc_page(GFP_KERNEL);
1203         if (__page == NULL)
1204                 return -ENOMEM;
1205
1206         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1207         if (IS_ERR(req)) {
1208                 rc = PTR_ERR(req);
1209                 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1210                        obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1211                 GOTO(out, rc);
1212         }
1213
1214         buffer = kmap(__page);
1215         guard_start = (__be16 *)buffer;
1216         guard_number = PAGE_SIZE / sizeof(*guard_start);
1217         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1218                "GRD tags per page=%u, resend=%u, bytes=%u, pages=%zu\n",
1219                guard_number, resend, nob, pg_count);
1220
1221         while (nob > 0 && pg_count > 0) {
1222                 int off = pga[i]->off & ~PAGE_MASK;
1223                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1224                 int guards_needed = DIV_ROUND_UP(off + count, sector_size) -
1225                                         (off / sector_size);
1226
1227                 if (guards_needed > guard_number - used_number) {
1228                         cfs_crypto_hash_update_page(req, __page, 0,
1229                                 used_number * sizeof(*guard_start));
1230                         used_number = 0;
1231                 }
1232
1233                 /* corrupt the data before we compute the checksum, to
1234                  * simulate an OST->client data error */
1235                 if (unlikely(i == 0 && opc == OST_READ &&
1236                              OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1237                         unsigned char *ptr = kmap(pga[i]->pg);
1238
1239                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1240                         kunmap(pga[i]->pg);
1241                 }
1242
1243                 /*
1244                  * The left guard number should be able to hold checksums of a
1245                  * whole page
1246                  */
1247                 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1248                                                   pga[i]->off & ~PAGE_MASK,
1249                                                   count,
1250                                                   guard_start + used_number,
1251                                                   guard_number - used_number,
1252                                                   &used, sector_size,
1253                                                   fn);
1254                 if (unlikely(resend))
1255                         CDEBUG(D_PAGE | D_HA,
1256                                "pga[%u]: used %u off %llu+%u gen checksum: %*phN\n",
1257                                i, used, pga[i]->off & ~PAGE_MASK, count,
1258                                (int)(used * sizeof(*guard_start)),
1259                                guard_start + used_number);
1260                 if (rc)
1261                         break;
1262
1263                 used_number += used;
1264                 nob -= pga[i]->count;
1265                 pg_count--;
1266                 i++;
1267         }
1268         kunmap(__page);
1269         if (rc)
1270                 GOTO(out_hash, rc);
1271
1272         if (used_number != 0)
1273                 cfs_crypto_hash_update_page(req, __page, 0,
1274                         used_number * sizeof(*guard_start));
1275
1276 out_hash:
1277         rc2 = cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1278         if (!rc)
1279                 rc = rc2;
1280         if (rc == 0) {
1281                 /* For sending we only compute the wrong checksum instead
1282                  * of corrupting the data so it is still correct on a redo */
1283                 if (opc == OST_WRITE &&
1284                                 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1285                         cksum++;
1286
1287                 *check_sum = cksum;
1288         }
1289 out:
1290         __free_page(__page);
1291         return rc;
1292 }
1293 #else /* !CONFIG_CRC_T10DIF */
1294 #define obd_dif_ip_fn NULL
1295 #define obd_dif_crc_fn NULL
1296 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum, re) \
1297         -EOPNOTSUPP
1298 #endif /* CONFIG_CRC_T10DIF */
1299
1300 static int osc_checksum_bulk(int nob, size_t pg_count,
1301                              struct brw_page **pga, int opc,
1302                              enum cksum_types cksum_type,
1303                              u32 *cksum)
1304 {
1305         int                             i = 0;
1306         struct ahash_request           *req;
1307         unsigned int                    bufsize;
1308         unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
1309
1310         LASSERT(pg_count > 0);
1311
1312         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1313         if (IS_ERR(req)) {
1314                 CERROR("Unable to initialize checksum hash %s\n",
1315                        cfs_crypto_hash_name(cfs_alg));
1316                 return PTR_ERR(req);
1317         }
1318
1319         while (nob > 0 && pg_count > 0) {
1320                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1321
1322                 /* corrupt the data before we compute the checksum, to
1323                  * simulate an OST->client data error */
1324                 if (i == 0 && opc == OST_READ &&
1325                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1326                         unsigned char *ptr = kmap(pga[i]->pg);
1327                         int off = pga[i]->off & ~PAGE_MASK;
1328
1329                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1330                         kunmap(pga[i]->pg);
1331                 }
1332                 cfs_crypto_hash_update_page(req, pga[i]->pg,
1333                                             pga[i]->off & ~PAGE_MASK,
1334                                             count);
1335                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1336                                (int)(pga[i]->off & ~PAGE_MASK));
1337
1338                 nob -= pga[i]->count;
1339                 pg_count--;
1340                 i++;
1341         }
1342
1343         bufsize = sizeof(*cksum);
1344         cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1345
1346         /* For sending we only compute the wrong checksum instead
1347          * of corrupting the data so it is still correct on a redo */
1348         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1349                 (*cksum)++;
1350
1351         return 0;
1352 }
1353
1354 static int osc_checksum_bulk_rw(const char *obd_name,
1355                                 enum cksum_types cksum_type,
1356                                 int nob, size_t pg_count,
1357                                 struct brw_page **pga, int opc,
1358                                 u32 *check_sum, bool resend)
1359 {
1360         obd_dif_csum_fn *fn = NULL;
1361         int sector_size = 0;
1362         int rc;
1363
1364         ENTRY;
1365         obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
1366
1367         if (fn)
1368                 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1369                                              opc, fn, sector_size, check_sum,
1370                                              resend);
1371         else
1372                 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1373                                        check_sum);
1374
1375         RETURN(rc);
1376 }
1377
1378 #ifdef CONFIG_LL_ENCRYPTION
1379 /**
1380  * osc_encrypt_pagecache_blocks() - overlay to llcrypt_encrypt_pagecache_blocks
1381  * @srcpage:      The locked pagecache page containing the block(s) to encrypt
1382  * @dstpage:      The page to put encryption result
1383  * @len:       Total size of the block(s) to encrypt.  Must be a nonzero
1384  *              multiple of the filesystem's block size.
1385  * @offs:      Byte offset within @page of the first block to encrypt.  Must be
1386  *              a multiple of the filesystem's block size.
1387  * @gfp_flags: Memory allocation flags
1388  *
1389  * This overlay function is necessary to be able to provide our own bounce page.
1390  */
1391 static struct page *osc_encrypt_pagecache_blocks(struct page *srcpage,
1392                                                  struct page *dstpage,
1393                                                  unsigned int len,
1394                                                  unsigned int offs,
1395                                                  gfp_t gfp_flags)
1396
1397 {
1398         const struct inode *inode = srcpage->mapping->host;
1399         const unsigned int blockbits = inode->i_blkbits;
1400         const unsigned int blocksize = 1 << blockbits;
1401         u64 lblk_num = ((u64)srcpage->index << (PAGE_SHIFT - blockbits)) +
1402                 (offs >> blockbits);
1403         unsigned int i;
1404         int err;
1405
1406         if (unlikely(!dstpage))
1407                 return llcrypt_encrypt_pagecache_blocks(srcpage, len, offs,
1408                                                         gfp_flags);
1409
1410         if (WARN_ON_ONCE(!PageLocked(srcpage)))
1411                 return ERR_PTR(-EINVAL);
1412
1413         if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
1414                 return ERR_PTR(-EINVAL);
1415
1416         /* Set PagePrivate2 for disambiguation in
1417          * osc_finalize_bounce_page().
1418          * It means cipher page was not allocated by llcrypt.
1419          */
1420         SetPagePrivate2(dstpage);
1421
1422         for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
1423                 err = llcrypt_encrypt_block(inode, srcpage, dstpage, blocksize,
1424                                             i, lblk_num, gfp_flags);
1425                 if (err)
1426                         return ERR_PTR(err);
1427         }
1428         SetPagePrivate(dstpage);
1429         set_page_private(dstpage, (unsigned long)srcpage);
1430         return dstpage;
1431 }
1432
1433 /**
1434  * osc_finalize_bounce_page() - overlay to llcrypt_finalize_bounce_page
1435  *
1436  * This overlay function is necessary to handle bounce pages
1437  * allocated by ourselves.
1438  */
1439 static inline void osc_finalize_bounce_page(struct page **pagep)
1440 {
1441         struct page *page = *pagep;
1442
1443         /* PagePrivate2 was set in osc_encrypt_pagecache_blocks
1444          * to indicate the cipher page was allocated by ourselves.
1445          * So we must not free it via llcrypt.
1446          */
1447         if (unlikely(!page || !PagePrivate2(page)))
1448                 return llcrypt_finalize_bounce_page(pagep);
1449
1450         if (llcrypt_is_bounce_page(page)) {
1451                 *pagep = llcrypt_pagecache_page(page);
1452                 ClearPagePrivate2(page);
1453                 set_page_private(page, (unsigned long)NULL);
1454                 ClearPagePrivate(page);
1455         }
1456 }
1457 #else /* !CONFIG_LL_ENCRYPTION */
1458 #define osc_encrypt_pagecache_blocks(srcpage, dstpage, len, offs, gfp_flags) \
1459         llcrypt_encrypt_pagecache_blocks(srcpage, len, offs, gfp_flags)
1460 #define osc_finalize_bounce_page(page) llcrypt_finalize_bounce_page(page)
1461 #endif
1462
1463 static inline void osc_release_bounce_pages(struct brw_page **pga,
1464                                             u32 page_count)
1465 {
1466 #ifdef HAVE_LUSTRE_CRYPTO
1467         struct page **pa = NULL;
1468         int i, j = 0;
1469
1470         if (!pga[0])
1471                 return;
1472
1473 #ifdef CONFIG_LL_ENCRYPTION
1474         if (PageChecked(pga[0]->pg)) {
1475                 OBD_ALLOC_PTR_ARRAY_LARGE(pa, page_count);
1476                 if (!pa)
1477                         return;
1478         }
1479 #endif
1480
1481         for (i = 0; i < page_count; i++) {
1482                 /* Bounce pages used by osc_encrypt_pagecache_blocks()
1483                  * called from osc_brw_prep_request()
1484                  * are identified thanks to the PageChecked flag.
1485                  */
1486                 if (PageChecked(pga[i]->pg)) {
1487                         if (pa)
1488                                 pa[j++] = pga[i]->pg;
1489                         osc_finalize_bounce_page(&pga[i]->pg);
1490                 }
1491                 pga[i]->count -= pga[i]->bp_count_diff;
1492                 pga[i]->off += pga[i]->bp_off_diff;
1493         }
1494
1495         if (pa) {
1496                 sptlrpc_enc_pool_put_pages_array(pa, j);
1497                 OBD_FREE_PTR_ARRAY_LARGE(pa, page_count);
1498         }
1499 #endif
1500 }
1501
1502 static int
1503 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1504                      u32 page_count, struct brw_page **pga,
1505                      struct ptlrpc_request **reqp, int resend)
1506 {
1507         struct ptlrpc_request *req;
1508         struct ptlrpc_bulk_desc *desc;
1509         struct ost_body *body;
1510         struct obd_ioobj *ioobj;
1511         struct niobuf_remote *niobuf;
1512         int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1513         struct osc_brw_async_args *aa;
1514         struct req_capsule *pill;
1515         struct brw_page *pg_prev;
1516         void *short_io_buf;
1517         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1518         struct inode *inode = NULL;
1519         bool directio = false;
1520         bool gpu = 0;
1521         bool enable_checksum = true;
1522         struct cl_page *clpage;
1523
1524         ENTRY;
1525         if (pga[0]->pg) {
1526                 clpage = oap2cl_page(brw_page2oap(pga[0]));
1527                 inode = clpage->cp_inode;
1528                 if (clpage->cp_type == CPT_TRANSIENT)
1529                         directio = true;
1530         }
1531         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1532                 RETURN(-ENOMEM); /* Recoverable */
1533         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1534                 RETURN(-EINVAL); /* Fatal */
1535
1536         if ((cmd & OBD_BRW_WRITE) != 0) {
1537                 opc = OST_WRITE;
1538                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1539                                                 osc_rq_pool,
1540                                                 &RQF_OST_BRW_WRITE);
1541         } else {
1542                 opc = OST_READ;
1543                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1544         }
1545         if (req == NULL)
1546                 RETURN(-ENOMEM);
1547
1548         if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode) &&
1549             llcrypt_has_encryption_key(inode)) {
1550                 struct page **pa = NULL;
1551
1552 #ifdef CONFIG_LL_ENCRYPTION
1553                 OBD_ALLOC_PTR_ARRAY_LARGE(pa, page_count);
1554                 if (pa == NULL) {
1555                         ptlrpc_request_free(req);
1556                         RETURN(-ENOMEM);
1557                 }
1558
1559                 rc = sptlrpc_enc_pool_get_pages_array(pa, page_count);
1560                 if (rc) {
1561                         CDEBUG(D_SEC, "failed to allocate from enc pool: %d\n",
1562                                rc);
1563                         ptlrpc_request_free(req);
1564                         RETURN(rc);
1565                 }
1566 #endif
1567
1568                 for (i = 0; i < page_count; i++) {
1569                         struct brw_page *brwpg = pga[i];
1570                         struct page *data_page = NULL;
1571                         bool retried = false;
1572                         bool lockedbymyself;
1573                         u32 nunits = (brwpg->off & ~PAGE_MASK) + brwpg->count;
1574                         struct address_space *map_orig = NULL;
1575                         pgoff_t index_orig;
1576
1577 retry_encrypt:
1578                         nunits = round_up(nunits, LUSTRE_ENCRYPTION_UNIT_SIZE);
1579                         /* The page can already be locked when we arrive here.
1580                          * This is possible when cl_page_assume/vvp_page_assume
1581                          * is stuck on wait_on_page_writeback with page lock
1582                          * held. In this case there is no risk for the lock to
1583                          * be released while we are doing our encryption
1584                          * processing, because writeback against that page will
1585                          * end in vvp_page_completion_write/cl_page_completion,
1586                          * which means only once the page is fully processed.
1587                          */
1588                         lockedbymyself = trylock_page(brwpg->pg);
1589                         if (directio) {
1590                                 map_orig = brwpg->pg->mapping;
1591                                 brwpg->pg->mapping = inode->i_mapping;
1592                                 index_orig = brwpg->pg->index;
1593                                 clpage = oap2cl_page(brw_page2oap(brwpg));
1594                                 brwpg->pg->index = clpage->cp_page_index;
1595                         }
1596                         data_page =
1597                                 osc_encrypt_pagecache_blocks(brwpg->pg,
1598                                                             pa ? pa[i] : NULL,
1599                                                             nunits, 0,
1600                                                             GFP_NOFS);
1601                         if (directio) {
1602                                 brwpg->pg->mapping = map_orig;
1603                                 brwpg->pg->index = index_orig;
1604                         }
1605                         if (lockedbymyself)
1606                                 unlock_page(brwpg->pg);
1607                         if (IS_ERR(data_page)) {
1608                                 rc = PTR_ERR(data_page);
1609                                 if (rc == -ENOMEM && !retried) {
1610                                         retried = true;
1611                                         rc = 0;
1612                                         goto retry_encrypt;
1613                                 }
1614                                 if (pa) {
1615                                         sptlrpc_enc_pool_put_pages_array(pa + i,
1616                                                                 page_count - i);
1617                                         OBD_FREE_PTR_ARRAY_LARGE(pa,
1618                                                                  page_count);
1619                                 }
1620                                 ptlrpc_request_free(req);
1621                                 RETURN(rc);
1622                         }
1623                         /* Set PageChecked flag on bounce page for
1624                          * disambiguation in osc_release_bounce_pages().
1625                          */
1626                         SetPageChecked(data_page);
1627                         brwpg->pg = data_page;
1628                         /* there should be no gap in the middle of page array */
1629                         if (i == page_count - 1) {
1630                                 struct osc_async_page *oap =
1631                                         brw_page2oap(brwpg);
1632
1633                                 oa->o_size = oap->oap_count +
1634                                         oap->oap_obj_off + oap->oap_page_off;
1635                         }
1636                         /* len is forced to nunits, and relative offset to 0
1637                          * so store the old, clear text info
1638                          */
1639                         brwpg->bp_count_diff = nunits - brwpg->count;
1640                         brwpg->count = nunits;
1641                         brwpg->bp_off_diff = brwpg->off & ~PAGE_MASK;
1642                         brwpg->off = brwpg->off & PAGE_MASK;
1643                 }
1644
1645                 if (pa)
1646                         OBD_FREE_PTR_ARRAY_LARGE(pa, page_count);
1647         } else if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1648                 struct osc_async_page *oap = brw_page2oap(pga[0]);
1649                 struct cl_page *clpage = oap2cl_page(oap);
1650                 struct cl_object *clobj = clpage->cp_obj;
1651                 struct cl_attr attr = { 0 };
1652                 struct lu_env *env;
1653                 __u16 refcheck;
1654
1655                 env = cl_env_get(&refcheck);
1656                 if (IS_ERR(env)) {
1657                         rc = PTR_ERR(env);
1658                         ptlrpc_request_free(req);
1659                         RETURN(rc);
1660                 }
1661
1662                 cl_object_attr_lock(clobj);
1663                 rc = cl_object_attr_get(env, clobj, &attr);
1664                 cl_object_attr_unlock(clobj);
1665                 cl_env_put(env, &refcheck);
1666                 if (rc != 0) {
1667                         ptlrpc_request_free(req);
1668                         RETURN(rc);
1669                 }
1670                 if (attr.cat_size)
1671                         oa->o_size = attr.cat_size;
1672         } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode) &&
1673                    llcrypt_has_encryption_key(inode)) {
1674                 for (i = 0; i < page_count; i++) {
1675                         struct brw_page *pg = pga[i];
1676                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1677
1678                         nunits = round_up(nunits, LUSTRE_ENCRYPTION_UNIT_SIZE);
1679                         /* count/off are forced to cover the whole encryption
1680                          * unit size so that all encrypted data is stored on the
1681                          * OST, so adjust bp_{count,off}_diff for the size of
1682                          * the clear text.
1683                          */
1684                         pg->bp_count_diff = nunits - pg->count;
1685                         pg->count = nunits;
1686                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1687                         pg->off = pg->off & PAGE_MASK;
1688                 }
1689         }
1690
1691         for (niocount = i = 1; i < page_count; i++) {
1692                 if (!can_merge_pages(pga[i - 1], pga[i]))
1693                         niocount++;
1694         }
1695
1696         pill = &req->rq_pill;
1697         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1698                              sizeof(*ioobj));
1699         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1700                              niocount * sizeof(*niobuf));
1701
1702         for (i = 0; i < page_count; i++) {
1703                 short_io_size += pga[i]->count;
1704                 if (!inode || !IS_ENCRYPTED(inode) ||
1705                     !llcrypt_has_encryption_key(inode)) {
1706                         pga[i]->bp_count_diff = 0;
1707                         pga[i]->bp_off_diff = 0;
1708                 }
1709         }
1710
1711         if (brw_page2oap(pga[0])->oap_brw_flags & OBD_BRW_RDMA_ONLY) {
1712                 enable_checksum = false;
1713                 short_io_size = 0;
1714                 gpu = 1;
1715         }
1716
1717         /* Check if read/write is small enough to be a short io. */
1718         if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1719             !imp_connect_shortio(cli->cl_import))
1720                 short_io_size = 0;
1721
1722         /* If this is an empty RPC to old server, just ignore it */
1723         if (!short_io_size && !pga[0]->pg) {
1724                 ptlrpc_request_free(req);
1725                 RETURN(-ENODATA);
1726         }
1727
1728         req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1729                              opc == OST_READ ? 0 : short_io_size);
1730         if (opc == OST_READ)
1731                 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1732                                      short_io_size);
1733
1734         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1735         if (rc) {
1736                 ptlrpc_request_free(req);
1737                 RETURN(rc);
1738         }
1739         osc_set_io_portal(req);
1740
1741         ptlrpc_at_set_req_timeout(req);
1742         /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1743          * retry logic */
1744         req->rq_no_retry_einprogress = 1;
1745
1746         if (short_io_size != 0) {
1747                 desc = NULL;
1748                 short_io_buf = NULL;
1749                 goto no_bulk;
1750         }
1751
1752         desc = ptlrpc_prep_bulk_imp(req, page_count,
1753                 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1754                 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1755                         PTLRPC_BULK_PUT_SINK),
1756                 OST_BULK_PORTAL,
1757                 &ptlrpc_bulk_kiov_pin_ops);
1758
1759         if (desc == NULL)
1760                 GOTO(out, rc = -ENOMEM);
1761         /* NB request now owns desc and will free it when it gets freed */
1762         desc->bd_is_rdma = gpu;
1763 no_bulk:
1764         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1765         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1766         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1767         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1768
1769         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1770
1771         /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1772          * and from_kgid(), because they are asynchronous. Fortunately, variable
1773          * oa contains valid o_uid and o_gid in these two operations.
1774          * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1775          * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1776          * other process logic */
1777         body->oa.o_uid = oa->o_uid;
1778         body->oa.o_gid = oa->o_gid;
1779
1780         obdo_to_ioobj(oa, ioobj);
1781         ioobj->ioo_bufcnt = niocount;
1782         /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1783          * that might be send for this request.  The actual number is decided
1784          * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1785          * "max - 1" for old client compatibility sending "0", and also so the
1786          * the actual maximum is a power-of-two number, not one less. LU-1431 */
1787         if (desc != NULL)
1788                 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1789         else /* short io */
1790                 ioobj_max_brw_set(ioobj, 0);
1791
1792         if (inode && IS_ENCRYPTED(inode) &&
1793             llcrypt_has_encryption_key(inode) &&
1794             !OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_ENCFLAG)) {
1795                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1796                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1797                         body->oa.o_flags = 0;
1798                 }
1799                 body->oa.o_flags |= LUSTRE_ENCRYPT_FL;
1800         }
1801
1802         if (short_io_size != 0) {
1803                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1804                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1805                         body->oa.o_flags = 0;
1806                 }
1807                 body->oa.o_flags |= OBD_FL_SHORT_IO;
1808                 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1809                        short_io_size);
1810                 if (opc == OST_WRITE) {
1811                         short_io_buf = req_capsule_client_get(pill,
1812                                                               &RMF_SHORT_IO);
1813                         LASSERT(short_io_buf != NULL);
1814                 }
1815         }
1816
1817         LASSERT(page_count > 0);
1818         pg_prev = pga[0];
1819         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1820                 struct brw_page *pg = pga[i];
1821                 int poff = pg->off & ~PAGE_MASK;
1822
1823                 LASSERT(pg->count > 0);
1824                 /* make sure there is no gap in the middle of page array */
1825                 LASSERTF(page_count == 1 ||
1826                          (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1827                           ergo(i > 0 && i < page_count - 1,
1828                                poff == 0 && pg->count == PAGE_SIZE)   &&
1829                           ergo(i == page_count - 1, poff == 0)),
1830                          "i: %d/%d pg: %p off: %llu, count: %u\n",
1831                          i, page_count, pg, pg->off, pg->count);
1832                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1833                          "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1834                          " prev_pg %p [pri %lu ind %lu] off %llu\n",
1835                          i, page_count,
1836                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1837                          pg_prev->pg, page_private(pg_prev->pg),
1838                          pg_prev->pg->index, pg_prev->off);
1839                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1840                         (pg->flag & OBD_BRW_SRVLOCK));
1841                 if (short_io_size != 0 && opc == OST_WRITE) {
1842                         unsigned char *ptr = kmap_atomic(pg->pg);
1843
1844                         LASSERT(short_io_size >= requested_nob + pg->count);
1845                         memcpy(short_io_buf + requested_nob,
1846                                ptr + poff,
1847                                pg->count);
1848                         kunmap_atomic(ptr);
1849                 } else if (short_io_size == 0) {
1850                         desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1851                                                          pg->count);
1852                 }
1853                 requested_nob += pg->count;
1854
1855                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1856                         niobuf--;
1857                         niobuf->rnb_len += pg->count;
1858                 } else {
1859                         niobuf->rnb_offset = pg->off;
1860                         niobuf->rnb_len    = pg->count;
1861                         niobuf->rnb_flags  = pg->flag;
1862                 }
1863                 pg_prev = pg;
1864         }
1865
1866         LASSERTF((void *)(niobuf - niocount) ==
1867                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1868                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1869                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1870
1871         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1872         if (resend) {
1873                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1874                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1875                         body->oa.o_flags = 0;
1876                 }
1877                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1878         }
1879
1880         if (osc_should_shrink_grant(cli))
1881                 osc_shrink_grant_local(cli, &body->oa);
1882
1883         if (!cli->cl_checksum || sptlrpc_flavor_has_bulk(&req->rq_flvr))
1884                 enable_checksum = false;
1885
1886         /* size[REQ_REC_OFF] still sizeof (*body) */
1887         if (opc == OST_WRITE) {
1888                 if (enable_checksum) {
1889                         /* store cl_cksum_type in a local variable since
1890                          * it can be changed via lprocfs */
1891                         enum cksum_types cksum_type = cli->cl_cksum_type;
1892
1893                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1894                                 body->oa.o_flags = 0;
1895
1896                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1897                                                                 cksum_type);
1898                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1899
1900                         rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1901                                                   requested_nob, page_count,
1902                                                   pga, OST_WRITE,
1903                                                   &body->oa.o_cksum, resend);
1904                         if (rc < 0) {
1905                                 CDEBUG(D_PAGE, "failed to checksum: rc = %d\n",
1906                                        rc);
1907                                 GOTO(out, rc);
1908                         }
1909                         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1910                                "checksum at write origin: %x (%x)\n",
1911                                body->oa.o_cksum, cksum_type);
1912
1913                         /* save this in 'oa', too, for later checking */
1914                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1915                         oa->o_flags |= obd_cksum_type_pack(obd_name,
1916                                                            cksum_type);
1917                 } else {
1918                         /* clear out the checksum flag, in case this is a
1919                          * resend but cl_checksum is no longer set. b=11238 */
1920                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1921                 }
1922                 oa->o_cksum = body->oa.o_cksum;
1923                 /* 1 RC per niobuf */
1924                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1925                                      sizeof(__u32) * niocount);
1926         } else {
1927                 if (enable_checksum) {
1928                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1929                                 body->oa.o_flags = 0;
1930                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1931                                 cli->cl_cksum_type);
1932                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1933                 }
1934
1935                 /* Client cksum has been already copied to wire obdo in previous
1936                  * lustre_set_wire_obdo(), and in the case a bulk-read is being
1937                  * resent due to cksum error, this will allow Server to
1938                  * check+dump pages on its side */
1939         }
1940         ptlrpc_request_set_replen(req);
1941
1942         aa = ptlrpc_req_async_args(aa, req);
1943         aa->aa_oa = oa;
1944         aa->aa_requested_nob = requested_nob;
1945         aa->aa_nio_count = niocount;
1946         aa->aa_page_count = page_count;
1947         aa->aa_resends = 0;
1948         aa->aa_ppga = pga;
1949         aa->aa_cli = cli;
1950         INIT_LIST_HEAD(&aa->aa_oaps);
1951
1952         *reqp = req;
1953         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1954         CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1955                 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1956                 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1957         RETURN(0);
1958
1959  out:
1960         ptlrpc_req_finished(req);
1961         RETURN(rc);
1962 }
1963
1964 char dbgcksum_file_name[PATH_MAX];
1965
1966 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1967                                 struct brw_page **pga, __u32 server_cksum,
1968                                 __u32 client_cksum)
1969 {
1970         struct file *filp;
1971         int rc, i;
1972         unsigned int len;
1973         char *buf;
1974
1975         /* will only keep dump of pages on first error for the same range in
1976          * file/fid, not during the resends/retries. */
1977         snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1978                  "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1979                  (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1980                   libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1981                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1982                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1983                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1984                  pga[0]->off,
1985                  pga[page_count-1]->off + pga[page_count-1]->count - 1,
1986                  client_cksum, server_cksum);
1987         CWARN("dumping checksum data to %s\n", dbgcksum_file_name);
1988         filp = filp_open(dbgcksum_file_name,
1989                          O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1990         if (IS_ERR(filp)) {
1991                 rc = PTR_ERR(filp);
1992                 if (rc == -EEXIST)
1993                         CDEBUG(D_INFO, "%s: can't open to dump pages with "
1994                                "checksum error: rc = %d\n", dbgcksum_file_name,
1995                                rc);
1996                 else
1997                         CERROR("%s: can't open to dump pages with checksum "
1998                                "error: rc = %d\n", dbgcksum_file_name, rc);
1999                 return;
2000         }
2001
2002         for (i = 0; i < page_count; i++) {
2003                 len = pga[i]->count;
2004                 buf = kmap(pga[i]->pg);
2005                 while (len != 0) {
2006                         rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
2007                         if (rc < 0) {
2008                                 CERROR("%s: wanted to write %u but got %d "
2009                                        "error\n", dbgcksum_file_name, len, rc);
2010                                 break;
2011                         }
2012                         len -= rc;
2013                         buf += rc;
2014                 }
2015                 kunmap(pga[i]->pg);
2016         }
2017
2018         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
2019         if (rc)
2020                 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
2021         filp_close(filp, NULL);
2022
2023         libcfs_debug_dumplog();
2024 }
2025
2026 static int
2027 check_write_checksum(struct obdo *oa, const struct lnet_processid *peer,
2028                      __u32 client_cksum, __u32 server_cksum,
2029                      struct osc_brw_async_args *aa)
2030 {
2031         const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
2032         enum cksum_types cksum_type;
2033         obd_dif_csum_fn *fn = NULL;
2034         int sector_size = 0;
2035         __u32 new_cksum;
2036         char *msg;
2037         int rc;
2038
2039         if (server_cksum == client_cksum) {
2040                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2041                 return 0;
2042         }
2043
2044         if (aa->aa_cli->cl_checksum_dump)
2045                 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
2046                                     server_cksum, client_cksum);
2047
2048         cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
2049                                            oa->o_flags : 0);
2050
2051         switch (cksum_type) {
2052         case OBD_CKSUM_T10IP512:
2053                 fn = obd_dif_ip_fn;
2054                 sector_size = 512;
2055                 break;
2056         case OBD_CKSUM_T10IP4K:
2057                 fn = obd_dif_ip_fn;
2058                 sector_size = 4096;
2059                 break;
2060         case OBD_CKSUM_T10CRC512:
2061                 fn = obd_dif_crc_fn;
2062                 sector_size = 512;
2063                 break;
2064         case OBD_CKSUM_T10CRC4K:
2065                 fn = obd_dif_crc_fn;
2066                 sector_size = 4096;
2067                 break;
2068         default:
2069                 break;
2070         }
2071
2072         if (fn)
2073                 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
2074                                              aa->aa_page_count, aa->aa_ppga,
2075                                              OST_WRITE, fn, sector_size,
2076                                              &new_cksum, true);
2077         else
2078                 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
2079                                        aa->aa_ppga, OST_WRITE, cksum_type,
2080                                        &new_cksum);
2081
2082         if (rc < 0)
2083                 msg = "failed to calculate the client write checksum";
2084         else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
2085                 msg = "the server did not use the checksum type specified in "
2086                       "the original request - likely a protocol problem";
2087         else if (new_cksum == server_cksum)
2088                 msg = "changed on the client after we checksummed it - "
2089                       "likely false positive due to mmap IO (bug 11742)";
2090         else if (new_cksum == client_cksum)
2091                 msg = "changed in transit before arrival at OST";
2092         else
2093                 msg = "changed in transit AND doesn't match the original - "
2094                       "likely false positive due to mmap IO (bug 11742)";
2095
2096         LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
2097                            DFID " object "DOSTID" extent [%llu-%llu], original "
2098                            "client csum %x (type %x), server csum %x (type %x),"
2099                            " client csum now %x\n",
2100                            obd_name, msg, libcfs_nidstr(&peer->nid),
2101                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
2102                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
2103                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
2104                            POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
2105                            aa->aa_ppga[aa->aa_page_count - 1]->off +
2106                                 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
2107                            client_cksum,
2108                            obd_cksum_type_unpack(aa->aa_oa->o_flags),
2109                            server_cksum, cksum_type, new_cksum);
2110         return 1;
2111 }
2112
2113 /* Note rc enters this function as number of bytes transferred */
2114 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
2115 {
2116         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
2117         struct client_obd *cli = aa->aa_cli;
2118         const char *obd_name = cli->cl_import->imp_obd->obd_name;
2119         const struct lnet_processid *peer =
2120                 &req->rq_import->imp_connection->c_peer;
2121         struct ost_body *body;
2122         u32 client_cksum = 0;
2123         struct inode *inode = NULL;
2124         unsigned int blockbits = 0, blocksize = 0;
2125         struct cl_page *clpage;
2126
2127         ENTRY;
2128
2129         if (rc < 0 && rc != -EDQUOT) {
2130                 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
2131                 RETURN(rc);
2132         }
2133
2134         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
2135         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
2136         if (body == NULL) {
2137                 DEBUG_REQ(D_INFO, req, "cannot unpack body");
2138                 RETURN(-EPROTO);
2139         }
2140
2141         /* set/clear over quota flag for a uid/gid/projid */
2142         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
2143             body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
2144                 unsigned qid[LL_MAXQUOTAS] = {
2145                                          body->oa.o_uid, body->oa.o_gid,
2146                                          body->oa.o_projid };
2147                 CDEBUG(D_QUOTA,
2148                        "setdq for [%u %u %u] with valid %#llx, flags %x\n",
2149                        body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
2150                        body->oa.o_valid, body->oa.o_flags);
2151                 osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
2152                                 body->oa.o_flags);
2153         }
2154
2155         osc_update_grant(cli, body);
2156
2157         if (rc < 0)
2158                 RETURN(rc);
2159
2160         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
2161                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
2162
2163         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2164                 if (rc > 0) {
2165                         CERROR("%s: unexpected positive size %d\n",
2166                                obd_name, rc);
2167                         RETURN(-EPROTO);
2168                 }
2169
2170                 if (req->rq_bulk != NULL &&
2171                     sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
2172                         RETURN(-EAGAIN);
2173
2174                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
2175                     check_write_checksum(&body->oa, peer, client_cksum,
2176                                          body->oa.o_cksum, aa))
2177                         RETURN(-EAGAIN);
2178
2179                 rc = check_write_rcs(req, aa->aa_requested_nob,
2180                                      aa->aa_nio_count, aa->aa_page_count,
2181                                      aa->aa_ppga);
2182                 GOTO(out, rc);
2183         }
2184
2185         /* The rest of this function executes only for OST_READs */
2186
2187         if (req->rq_bulk == NULL) {
2188                 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
2189                                           RCL_SERVER);
2190                 LASSERT(rc == req->rq_status);
2191         } else {
2192                 /* if unwrap_bulk failed, return -EAGAIN to retry */
2193                 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
2194         }
2195         if (rc < 0)
2196                 GOTO(out, rc = -EAGAIN);
2197
2198         if (rc > aa->aa_requested_nob) {
2199                 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
2200                        rc, aa->aa_requested_nob);
2201                 RETURN(-EPROTO);
2202         }
2203
2204         if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2205                 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2206                        rc, req->rq_bulk->bd_nob_transferred);
2207                 RETURN(-EPROTO);
2208         }
2209
2210         if (req->rq_bulk == NULL) {
2211                 /* short io */
2212                 int nob, pg_count, i = 0;
2213                 unsigned char *buf;
2214
2215                 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2216                 pg_count = aa->aa_page_count;
2217                 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2218                                                    rc);
2219                 nob = rc;
2220                 while (nob > 0 && pg_count > 0) {
2221                         unsigned char *ptr;
2222                         int count = aa->aa_ppga[i]->count > nob ?
2223                                     nob : aa->aa_ppga[i]->count;
2224
2225                         CDEBUG(D_CACHE, "page %p count %d\n",
2226                                aa->aa_ppga[i]->pg, count);
2227                         ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2228                         memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2229                                count);
2230                         kunmap_atomic((void *) ptr);
2231
2232                         buf += count;
2233                         nob -= count;
2234                         i++;
2235                         pg_count--;
2236                 }
2237         }
2238
2239         if (rc < aa->aa_requested_nob)
2240                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2241
2242         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2243                 static int cksum_counter;
2244                 u32 server_cksum = body->oa.o_cksum;
2245                 int nob = rc;
2246                 char *via = "";
2247                 char *router = "";
2248                 enum cksum_types cksum_type;
2249                 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2250                         body->oa.o_flags : 0;
2251
2252                 cksum_type = obd_cksum_type_unpack(o_flags);
2253                 rc = osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2254                                           aa->aa_page_count, aa->aa_ppga,
2255                                           OST_READ, &client_cksum, false);
2256                 if (rc < 0)
2257                         GOTO(out, rc);
2258
2259                 if (req->rq_bulk != NULL &&
2260                     !nid_same(&peer->nid, &req->rq_bulk->bd_sender)) {
2261                         via = " via ";
2262                         router = libcfs_nidstr(&req->rq_bulk->bd_sender);
2263                 }
2264
2265                 if (server_cksum != client_cksum) {
2266                         struct ost_body *clbody;
2267                         __u32 client_cksum2;
2268                         u32 page_count = aa->aa_page_count;
2269
2270                         osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2271                                              page_count, aa->aa_ppga,
2272                                              OST_READ, &client_cksum2, true);
2273                         clbody = req_capsule_client_get(&req->rq_pill,
2274                                                         &RMF_OST_BODY);
2275                         if (cli->cl_checksum_dump)
2276                                 dump_all_bulk_pages(&clbody->oa, page_count,
2277                                                     aa->aa_ppga, server_cksum,
2278                                                     client_cksum);
2279
2280                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2281                                            "%s%s%s inode "DFID" object "DOSTID
2282                                            " extent [%llu-%llu], client %x/%x, "
2283                                            "server %x, cksum_type %x\n",
2284                                            obd_name,
2285                                            libcfs_nidstr(&peer->nid),
2286                                            via, router,
2287                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2288                                                 clbody->oa.o_parent_seq : 0ULL,
2289                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2290                                                 clbody->oa.o_parent_oid : 0,
2291                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2292                                                 clbody->oa.o_parent_ver : 0,
2293                                            POSTID(&body->oa.o_oi),
2294                                            aa->aa_ppga[0]->off,
2295                                            aa->aa_ppga[page_count-1]->off +
2296                                            aa->aa_ppga[page_count-1]->count - 1,
2297                                            client_cksum, client_cksum2,
2298                                            server_cksum, cksum_type);
2299                         cksum_counter = 0;
2300                         aa->aa_oa->o_cksum = client_cksum;
2301                         rc = -EAGAIN;
2302                 } else {
2303                         cksum_counter++;
2304                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2305                         rc = 0;
2306                 }
2307         } else if (unlikely(client_cksum)) {
2308                 static int cksum_missed;
2309
2310                 cksum_missed++;
2311                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2312                         CERROR("%s: checksum %u requested from %s but not sent\n",
2313                                obd_name, cksum_missed,
2314                                libcfs_nidstr(&peer->nid));
2315         } else {
2316                 rc = 0;
2317         }
2318
2319         /* get the inode from the first cl_page */
2320         clpage = oap2cl_page(brw_page2oap(aa->aa_ppga[0]));
2321         inode = clpage->cp_inode;
2322         if (clpage->cp_type == CPT_TRANSIENT && inode) {
2323                 blockbits = inode->i_blkbits;
2324                 blocksize = 1 << blockbits;
2325         }
2326         if (inode && IS_ENCRYPTED(inode)) {
2327                 int idx;
2328
2329                 if (!llcrypt_has_encryption_key(inode)) {
2330                         CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2331                         GOTO(out, rc);
2332                 }
2333                 for (idx = 0; idx < aa->aa_page_count; idx++) {
2334                         struct brw_page *brwpg = aa->aa_ppga[idx];
2335                         unsigned int offs = 0;
2336
2337                         while (offs < PAGE_SIZE) {
2338                                 /* do not decrypt if page is all 0s */
2339                                 if (memchr_inv(page_address(brwpg->pg) + offs,
2340                                       0, LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2341                                         /* if page is empty forward info to
2342                                          * upper layers (ll_io_zero_page) by
2343                                          * clearing PagePrivate2
2344                                          */
2345                                         if (!offs)
2346                                                 ClearPagePrivate2(brwpg->pg);
2347                                         break;
2348                                 }
2349
2350                                 if (blockbits) {
2351                                         /* This is direct IO case. Directly call
2352                                          * decrypt function that takes inode as
2353                                          * input parameter. Page does not need
2354                                          * to be locked.
2355                                          */
2356                                         u64 lblk_num;
2357                                         unsigned int i;
2358
2359                                         clpage =
2360                                                oap2cl_page(brw_page2oap(brwpg));
2361                                         lblk_num =
2362                                                 ((u64)(clpage->cp_page_index) <<
2363                                                 (PAGE_SHIFT - blockbits)) +
2364                                                 (offs >> blockbits);
2365                                         for (i = offs;
2366                                              i < offs +
2367                                                     LUSTRE_ENCRYPTION_UNIT_SIZE;
2368                                              i += blocksize, lblk_num++) {
2369                                                 rc =
2370                                                   llcrypt_decrypt_block_inplace(
2371                                                           inode, brwpg->pg,
2372                                                           blocksize, i,
2373                                                           lblk_num);
2374                                                 if (rc)
2375                                                         break;
2376                                         }
2377                                 } else {
2378                                         rc = llcrypt_decrypt_pagecache_blocks(
2379                                                 brwpg->pg,
2380                                                 LUSTRE_ENCRYPTION_UNIT_SIZE,
2381                                                 offs);
2382                                 }
2383                                 if (rc)
2384                                         GOTO(out, rc);
2385
2386                                 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2387                         }
2388                 }
2389         }
2390
2391 out:
2392         if (rc >= 0)
2393                 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2394                                      aa->aa_oa, &body->oa);
2395
2396         RETURN(rc);
2397 }
2398
2399 static int osc_brw_redo_request(struct ptlrpc_request *request,
2400                                 struct osc_brw_async_args *aa, int rc)
2401 {
2402         struct ptlrpc_request *new_req;
2403         struct osc_brw_async_args *new_aa;
2404         struct osc_async_page *oap;
2405         ENTRY;
2406
2407         /* The below message is checked in replay-ost-single.sh test_8ae*/
2408         DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2409                   "redo for recoverable error %d", rc);
2410
2411         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2412                                 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2413                                   aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2414                                   aa->aa_ppga, &new_req, 1);
2415         if (rc)
2416                 RETURN(rc);
2417
2418         list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2419                 if (oap->oap_request != NULL) {
2420                         LASSERTF(request == oap->oap_request,
2421                                  "request %p != oap_request %p\n",
2422                                  request, oap->oap_request);
2423                 }
2424         }
2425         /*
2426          * New request takes over pga and oaps from old request.
2427          * Note that copying a list_head doesn't work, need to move it...
2428          */
2429         aa->aa_resends++;
2430         new_req->rq_interpret_reply = request->rq_interpret_reply;
2431         new_req->rq_async_args = request->rq_async_args;
2432         new_req->rq_commit_cb = request->rq_commit_cb;
2433         /* cap resend delay to the current request timeout, this is similar to
2434          * what ptlrpc does (see after_reply()) */
2435         if (aa->aa_resends > new_req->rq_timeout)
2436                 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2437         else
2438                 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2439         new_req->rq_generation_set = 1;
2440         new_req->rq_import_generation = request->rq_import_generation;
2441
2442         new_aa = ptlrpc_req_async_args(new_aa, new_req);
2443
2444         INIT_LIST_HEAD(&new_aa->aa_oaps);
2445         list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2446         INIT_LIST_HEAD(&new_aa->aa_exts);
2447         list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2448         new_aa->aa_resends = aa->aa_resends;
2449
2450         list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2451                 if (oap->oap_request) {
2452                         ptlrpc_req_finished(oap->oap_request);
2453                         oap->oap_request = ptlrpc_request_addref(new_req);
2454                 }
2455         }
2456
2457         /* XXX: This code will run into problem if we're going to support
2458          * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2459          * and wait for all of them to be finished. We should inherit request
2460          * set from old request. */
2461         ptlrpcd_add_req(new_req);
2462
2463         DEBUG_REQ(D_INFO, new_req, "new request");
2464         RETURN(0);
2465 }
2466
2467 /*
2468  * ugh, we want disk allocation on the target to happen in offset order.  we'll
2469  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2470  * fine for our small page arrays and doesn't require allocation.  its an
2471  * insertion sort that swaps elements that are strides apart, shrinking the
2472  * stride down until its '1' and the array is sorted.
2473  */
2474 static void sort_brw_pages(struct brw_page **array, int num)
2475 {
2476         int stride, i, j;
2477         struct brw_page *tmp;
2478
2479         if (num == 1)
2480                 return;
2481         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2482                 ;
2483
2484         do {
2485                 stride /= 3;
2486                 for (i = stride ; i < num ; i++) {
2487                         tmp = array[i];
2488                         j = i;
2489                         while (j >= stride && array[j - stride]->off > tmp->off) {
2490                                 array[j] = array[j - stride];
2491                                 j -= stride;
2492                         }
2493                         array[j] = tmp;
2494                 }
2495         } while (stride > 1);
2496 }
2497
2498 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2499 {
2500         LASSERT(ppga != NULL);
2501         OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2502 }
2503
2504 static int brw_interpret(const struct lu_env *env,
2505                          struct ptlrpc_request *req, void *args, int rc)
2506 {
2507         struct osc_brw_async_args *aa = args;
2508         struct osc_extent *ext;
2509         struct osc_extent *tmp;
2510         struct client_obd *cli = aa->aa_cli;
2511         unsigned long transferred = 0;
2512
2513         ENTRY;
2514
2515         rc = osc_brw_fini_request(req, rc);
2516         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2517
2518         /* restore clear text pages */
2519         osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2520
2521         /*
2522          * When server returns -EINPROGRESS, client should always retry
2523          * regardless of the number of times the bulk was resent already.
2524          */
2525         if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2526                 if (req->rq_import_generation !=
2527                     req->rq_import->imp_generation) {
2528                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2529                                ""DOSTID", rc = %d.\n",
2530                                req->rq_import->imp_obd->obd_name,
2531                                POSTID(&aa->aa_oa->o_oi), rc);
2532                 } else if (rc == -EINPROGRESS ||
2533                            client_should_resend(aa->aa_resends, aa->aa_cli)) {
2534                         rc = osc_brw_redo_request(req, aa, rc);
2535                 } else {
2536                         CERROR("%s: too many resent retries for object: "
2537                                "%llu:%llu, rc = %d.\n",
2538                                req->rq_import->imp_obd->obd_name,
2539                                POSTID(&aa->aa_oa->o_oi), rc);
2540                 }
2541
2542                 if (rc == 0)
2543                         RETURN(0);
2544                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2545                         rc = -EIO;
2546         }
2547
2548         if (rc == 0) {
2549                 struct obdo *oa = aa->aa_oa;
2550                 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2551                 unsigned long valid = 0;
2552                 struct cl_object *obj;
2553                 struct osc_async_page *last;
2554
2555                 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2556                 obj = osc2cl(last->oap_obj);
2557
2558                 cl_object_attr_lock(obj);
2559                 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2560                         attr->cat_blocks = oa->o_blocks;
2561                         valid |= CAT_BLOCKS;
2562                 }
2563                 if (oa->o_valid & OBD_MD_FLMTIME) {
2564                         attr->cat_mtime = oa->o_mtime;
2565                         valid |= CAT_MTIME;
2566                 }
2567                 if (oa->o_valid & OBD_MD_FLATIME) {
2568                         attr->cat_atime = oa->o_atime;
2569                         valid |= CAT_ATIME;
2570                 }
2571                 if (oa->o_valid & OBD_MD_FLCTIME) {
2572                         attr->cat_ctime = oa->o_ctime;
2573                         valid |= CAT_CTIME;
2574                 }
2575
2576                 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2577                         struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2578                         loff_t last_off = last->oap_count + last->oap_obj_off +
2579                                 last->oap_page_off;
2580
2581                         /* Change file size if this is an out of quota or
2582                          * direct IO write and it extends the file size */
2583                         if (loi->loi_lvb.lvb_size < last_off) {
2584                                 attr->cat_size = last_off;
2585                                 valid |= CAT_SIZE;
2586                         }
2587                         /* Extend KMS if it's not a lockless write */
2588                         if (loi->loi_kms < last_off &&
2589                             oap2osc_page(last)->ops_srvlock == 0) {
2590                                 attr->cat_kms = last_off;
2591                                 valid |= CAT_KMS;
2592                         }
2593                 }
2594
2595                 if (valid != 0)
2596                         cl_object_attr_update(env, obj, attr, valid);
2597                 cl_object_attr_unlock(obj);
2598         }
2599         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2600         aa->aa_oa = NULL;
2601
2602         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2603                 osc_inc_unstable_pages(req);
2604
2605         list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2606                 list_del_init(&ext->oe_link);
2607                 osc_extent_finish(env, ext, 1,
2608                                   rc && req->rq_no_delay ? -EAGAIN : rc);
2609         }
2610         LASSERT(list_empty(&aa->aa_exts));
2611         LASSERT(list_empty(&aa->aa_oaps));
2612
2613         transferred = (req->rq_bulk == NULL ? /* short io */
2614                        aa->aa_requested_nob :
2615                        req->rq_bulk->bd_nob_transferred);
2616
2617         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2618         ptlrpc_lprocfs_brw(req, transferred);
2619
2620         spin_lock(&cli->cl_loi_list_lock);
2621         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2622          * is called so we know whether to go to sync BRWs or wait for more
2623          * RPCs to complete */
2624         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2625                 cli->cl_w_in_flight--;
2626         else
2627                 cli->cl_r_in_flight--;
2628         osc_wake_cache_waiters(cli);
2629         spin_unlock(&cli->cl_loi_list_lock);
2630
2631         osc_io_unplug(env, cli, NULL);
2632         RETURN(rc);
2633 }
2634
2635 static void brw_commit(struct ptlrpc_request *req)
2636 {
2637         /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2638          * this called via the rq_commit_cb, I need to ensure
2639          * osc_dec_unstable_pages is still called. Otherwise unstable
2640          * pages may be leaked. */
2641         spin_lock(&req->rq_lock);
2642         if (likely(req->rq_unstable)) {
2643                 req->rq_unstable = 0;
2644                 spin_unlock(&req->rq_lock);
2645
2646                 osc_dec_unstable_pages(req);
2647         } else {
2648                 req->rq_committed = 1;
2649                 spin_unlock(&req->rq_lock);
2650         }
2651 }
2652
2653 /**
2654  * Build an RPC by the list of extent @ext_list. The caller must ensure
2655  * that the total pages in this list are NOT over max pages per RPC.
2656  * Extents in the list must be in OES_RPC state.
2657  */
2658 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2659                   struct list_head *ext_list, int cmd)
2660 {
2661         struct ptlrpc_request           *req = NULL;
2662         struct osc_extent               *ext;
2663         struct brw_page                 **pga = NULL;
2664         struct osc_brw_async_args       *aa = NULL;
2665         struct obdo                     *oa = NULL;
2666         struct osc_async_page           *oap;
2667         struct osc_object               *obj = NULL;
2668         struct cl_req_attr              *crattr = NULL;
2669         loff_t                          starting_offset = OBD_OBJECT_EOF;
2670         loff_t                          ending_offset = 0;
2671         /* '1' for consistency with code that checks !mpflag to restore */
2672         int mpflag = 1;
2673         int                             mem_tight = 0;
2674         int                             page_count = 0;
2675         bool                            soft_sync = false;
2676         bool                            ndelay = false;
2677         int                             i;
2678         int                             grant = 0;
2679         int                             rc;
2680         __u32                           layout_version = 0;
2681         LIST_HEAD(rpc_list);
2682         struct ost_body                 *body;
2683         ENTRY;
2684         LASSERT(!list_empty(ext_list));
2685
2686         /* add pages into rpc_list to build BRW rpc */
2687         list_for_each_entry(ext, ext_list, oe_link) {
2688                 LASSERT(ext->oe_state == OES_RPC);
2689                 mem_tight |= ext->oe_memalloc;
2690                 grant += ext->oe_grants;
2691                 page_count += ext->oe_nr_pages;
2692                 layout_version = max(layout_version, ext->oe_layout_version);
2693                 if (obj == NULL)
2694                         obj = ext->oe_obj;
2695         }
2696
2697         soft_sync = osc_over_unstable_soft_limit(cli);
2698         if (mem_tight)
2699                 mpflag = memalloc_noreclaim_save();
2700
2701         OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2702         if (pga == NULL)
2703                 GOTO(out, rc = -ENOMEM);
2704
2705         OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2706         if (oa == NULL)
2707                 GOTO(out, rc = -ENOMEM);
2708
2709         i = 0;
2710         list_for_each_entry(ext, ext_list, oe_link) {
2711                 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2712                         if (mem_tight)
2713                                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2714                         if (soft_sync)
2715                                 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2716                         pga[i] = &oap->oap_brw_page;
2717                         pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2718                         i++;
2719
2720                         list_add_tail(&oap->oap_rpc_item, &rpc_list);
2721                         if (starting_offset == OBD_OBJECT_EOF ||
2722                             starting_offset > oap->oap_obj_off)
2723                                 starting_offset = oap->oap_obj_off;
2724                         else
2725                                 LASSERT(oap->oap_page_off == 0);
2726                         if (ending_offset < oap->oap_obj_off + oap->oap_count)
2727                                 ending_offset = oap->oap_obj_off +
2728                                                 oap->oap_count;
2729                         else
2730                                 LASSERT(oap->oap_page_off + oap->oap_count ==
2731                                         PAGE_SIZE);
2732                 }
2733                 if (ext->oe_ndelay)
2734                         ndelay = true;
2735         }
2736
2737         /* first page in the list */
2738         oap = list_first_entry(&rpc_list, typeof(*oap), oap_rpc_item);
2739
2740         crattr = &osc_env_info(env)->oti_req_attr;
2741         memset(crattr, 0, sizeof(*crattr));
2742         crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2743         crattr->cra_flags = ~0ULL;
2744         crattr->cra_page = oap2cl_page(oap);
2745         crattr->cra_oa = oa;
2746         cl_req_attr_set(env, osc2cl(obj), crattr);
2747
2748         if (cmd == OBD_BRW_WRITE) {
2749                 oa->o_grant_used = grant;
2750                 if (layout_version > 0) {
2751                         CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2752                                PFID(&oa->o_oi.oi_fid), layout_version);
2753
2754                         oa->o_layout_version = layout_version;
2755                         oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2756                 }
2757         }
2758
2759         sort_brw_pages(pga, page_count);
2760         rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2761         if (rc != 0) {
2762                 CERROR("prep_req failed: %d\n", rc);
2763                 GOTO(out, rc);
2764         }
2765
2766         req->rq_commit_cb = brw_commit;
2767         req->rq_interpret_reply = brw_interpret;
2768         req->rq_memalloc = mem_tight != 0;
2769         oap->oap_request = ptlrpc_request_addref(req);
2770         if (ndelay) {
2771                 req->rq_no_resend = req->rq_no_delay = 1;
2772                 /* probably set a shorter timeout value.
2773                  * to handle ETIMEDOUT in brw_interpret() correctly. */
2774                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2775         }
2776
2777         /* Need to update the timestamps after the request is built in case
2778          * we race with setattr (locally or in queue at OST).  If OST gets
2779          * later setattr before earlier BRW (as determined by the request xid),
2780          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2781          * way to do this in a single call.  bug 10150 */
2782         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2783         crattr->cra_oa = &body->oa;
2784         crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2785         cl_req_attr_set(env, osc2cl(obj), crattr);
2786         lustre_msg_set_uid_gid(req->rq_reqmsg, &crattr->cra_uid,
2787                                &crattr->cra_gid);
2788         lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2789
2790         aa = ptlrpc_req_async_args(aa, req);
2791         INIT_LIST_HEAD(&aa->aa_oaps);
2792         list_splice_init(&rpc_list, &aa->aa_oaps);
2793         INIT_LIST_HEAD(&aa->aa_exts);
2794         list_splice_init(ext_list, &aa->aa_exts);
2795
2796         spin_lock(&cli->cl_loi_list_lock);
2797         starting_offset >>= PAGE_SHIFT;
2798         ending_offset >>= PAGE_SHIFT;
2799         if (cmd == OBD_BRW_READ) {
2800                 cli->cl_r_in_flight++;
2801                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2802                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2803                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2804                                       starting_offset + 1);
2805         } else {
2806                 cli->cl_w_in_flight++;
2807                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2808                 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2809                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2810                                       starting_offset + 1);
2811         }
2812         spin_unlock(&cli->cl_loi_list_lock);
2813
2814         DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2815                   page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
2816         if (libcfs_debug & D_IOTRACE) {
2817                 struct lu_fid fid;
2818
2819                 fid.f_seq = crattr->cra_oa->o_parent_seq;
2820                 fid.f_oid = crattr->cra_oa->o_parent_oid;
2821                 fid.f_ver = crattr->cra_oa->o_parent_ver;
2822                 CDEBUG(D_IOTRACE,
2823                        DFID": %d %s pages, start %lld, end %lld, now %ur/%uw in flight\n",
2824                        PFID(&fid), page_count,
2825                        cmd == OBD_BRW_READ ? "read" : "write", starting_offset,
2826                        ending_offset, cli->cl_r_in_flight, cli->cl_w_in_flight);
2827         }
2828         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2829
2830         ptlrpcd_add_req(req);
2831         rc = 0;
2832         EXIT;
2833
2834 out:
2835         if (mem_tight)
2836                 memalloc_noreclaim_restore(mpflag);
2837
2838         if (rc != 0) {
2839                 LASSERT(req == NULL);
2840
2841                 if (oa)
2842                         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2843                 if (pga) {
2844                         osc_release_bounce_pages(pga, page_count);
2845                         osc_release_ppga(pga, page_count);
2846                 }
2847                 /* this should happen rarely and is pretty bad, it makes the
2848                  * pending list not follow the dirty order
2849                  */
2850                 while ((ext = list_first_entry_or_null(ext_list,
2851                                                        struct osc_extent,
2852                                                        oe_link)) != NULL) {
2853                         list_del_init(&ext->oe_link);
2854                         osc_extent_finish(env, ext, 0, rc);
2855                 }
2856         }
2857         RETURN(rc);
2858 }
2859
2860 /* This is to refresh our lock in face of no RPCs. */
2861 void osc_send_empty_rpc(struct osc_object *osc, pgoff_t start)
2862 {
2863         struct ptlrpc_request *req;
2864         struct obdo oa;
2865         struct brw_page bpg = { .off = start, .count = 1};
2866         struct brw_page *pga = &bpg;
2867         int rc;
2868
2869         memset(&oa, 0, sizeof(oa));
2870         oa.o_oi = osc->oo_oinfo->loi_oi;
2871         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
2872         /* For updated servers - don't do a read */
2873         oa.o_flags = OBD_FL_NORPC;
2874
2875         rc = osc_brw_prep_request(OBD_BRW_READ, osc_cli(osc), &oa, 1, &pga,
2876                                   &req, 0);
2877
2878         /* If we succeeded we ship it off, if not there's no point in doing
2879          * anything. Also no resends.
2880          * No interpret callback, no commit callback.
2881          */
2882         if (!rc) {
2883                 req->rq_no_resend = 1;
2884                 ptlrpcd_add_req(req);
2885         }
2886 }
2887
2888 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2889 {
2890         int set = 0;
2891
2892         LASSERT(lock != NULL);
2893
2894         lock_res_and_lock(lock);
2895
2896         if (lock->l_ast_data == NULL)
2897                 lock->l_ast_data = data;
2898         if (lock->l_ast_data == data)
2899                 set = 1;
2900
2901         unlock_res_and_lock(lock);
2902
2903         return set;
2904 }
2905
2906 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2907                      void *cookie, struct lustre_handle *lockh,
2908                      enum ldlm_mode mode, __u64 *flags, bool speculative,
2909                      int errcode)
2910 {
2911         bool intent = *flags & LDLM_FL_HAS_INTENT;
2912         int rc;
2913         ENTRY;
2914
2915         /* The request was created before ldlm_cli_enqueue call. */
2916         if (intent && errcode == ELDLM_LOCK_ABORTED) {
2917                 struct ldlm_reply *rep;
2918
2919                 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2920                 LASSERT(rep != NULL);
2921
2922                 rep->lock_policy_res1 =
2923                         ptlrpc_status_ntoh(rep->lock_policy_res1);
2924                 if (rep->lock_policy_res1)
2925                         errcode = rep->lock_policy_res1;
2926                 if (!speculative)
2927                         *flags |= LDLM_FL_LVB_READY;
2928         } else if (errcode == ELDLM_OK) {
2929                 *flags |= LDLM_FL_LVB_READY;
2930         }
2931
2932         /* Call the update callback. */
2933         rc = (*upcall)(cookie, lockh, errcode);
2934
2935         /* release the reference taken in ldlm_cli_enqueue() */
2936         if (errcode == ELDLM_LOCK_MATCHED)
2937                 errcode = ELDLM_OK;
2938         if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2939                 ldlm_lock_decref(lockh, mode);
2940
2941         RETURN(rc);
2942 }
2943
2944 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2945                           void *args, int rc)
2946 {
2947         struct osc_enqueue_args *aa = args;
2948         struct ldlm_lock *lock;
2949         struct lustre_handle *lockh = &aa->oa_lockh;
2950         enum ldlm_mode mode = aa->oa_mode;
2951         struct ost_lvb *lvb = aa->oa_lvb;
2952         __u32 lvb_len = sizeof(*lvb);
2953         __u64 flags = 0;
2954         struct ldlm_enqueue_info einfo = {
2955                 .ei_type = aa->oa_type,
2956                 .ei_mode = mode,
2957         };
2958
2959         ENTRY;
2960
2961         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2962          * be valid. */
2963         lock = ldlm_handle2lock(lockh);
2964         LASSERTF(lock != NULL,
2965                  "lockh %#llx, req %p, aa %p - client evicted?\n",
2966                  lockh->cookie, req, aa);
2967
2968         /* Take an additional reference so that a blocking AST that
2969          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2970          * to arrive after an upcall has been executed by
2971          * osc_enqueue_fini(). */
2972         ldlm_lock_addref(lockh, mode);
2973
2974         /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2975         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2976
2977         /* Let CP AST to grant the lock first. */
2978         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2979
2980         if (aa->oa_speculative) {
2981                 LASSERT(aa->oa_lvb == NULL);
2982                 LASSERT(aa->oa_flags == NULL);
2983                 aa->oa_flags = &flags;
2984         }
2985
2986         /* Complete obtaining the lock procedure. */
2987         rc = ldlm_cli_enqueue_fini(aa->oa_exp, &req->rq_pill, &einfo, 1,
2988                                    aa->oa_flags, lvb, lvb_len, lockh, rc,
2989                                    false);
2990         /* Complete osc stuff. */
2991         rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2992                               aa->oa_flags, aa->oa_speculative, rc);
2993
2994         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2995
2996         ldlm_lock_decref(lockh, mode);
2997         LDLM_LOCK_PUT(lock);
2998         RETURN(rc);
2999 }
3000
3001 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
3002  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
3003  * other synchronous requests, however keeping some locks and trying to obtain
3004  * others may take a considerable amount of time in a case of ost failure; and
3005  * when other sync requests do not get released lock from a client, the client
3006  * is evicted from the cluster -- such scenarious make the life difficult, so
3007  * release locks just after they are obtained. */
3008 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3009                      __u64 *flags, union ldlm_policy_data *policy,
3010                      struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
3011                      void *cookie, struct ldlm_enqueue_info *einfo,
3012                      struct ptlrpc_request_set *rqset, int async,
3013                      bool speculative)
3014 {
3015         struct obd_device *obd = exp->exp_obd;
3016         struct lustre_handle lockh = { 0 };
3017         struct ptlrpc_request *req = NULL;
3018         int intent = *flags & LDLM_FL_HAS_INTENT;
3019         __u64 search_flags = *flags;
3020         __u64 match_flags = 0;
3021         enum ldlm_mode mode;
3022         int rc;
3023         ENTRY;
3024
3025         /* Filesystem lock extents are extended to page boundaries so that
3026          * dealing with the page cache is a little smoother.  */
3027         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
3028         policy->l_extent.end |= ~PAGE_MASK;
3029
3030         /* Next, search for already existing extent locks that will cover us */
3031         /* If we're trying to read, we also search for an existing PW lock.  The
3032          * VFS and page cache already protect us locally, so lots of readers/
3033          * writers can share a single PW lock.
3034          *
3035          * There are problems with conversion deadlocks, so instead of
3036          * converting a read lock to a write lock, we'll just enqueue a new
3037          * one.
3038          *
3039          * At some point we should cancel the read lock instead of making them
3040          * send us a blocking callback, but there are problems with canceling
3041          * locks out from other users right now, too. */
3042         mode = einfo->ei_mode;
3043         if (einfo->ei_mode == LCK_PR)
3044                 mode |= LCK_PW;
3045         /* Normal lock requests must wait for the LVB to be ready before
3046          * matching a lock; speculative lock requests do not need to,
3047          * because they will not actually use the lock. */
3048         if (!speculative)
3049                 search_flags |= LDLM_FL_LVB_READY;
3050         if (intent != 0)
3051                 search_flags |= LDLM_FL_BLOCK_GRANTED;
3052         if (mode == LCK_GROUP)
3053                 match_flags = LDLM_MATCH_GROUP;
3054         mode = ldlm_lock_match_with_skip(obd->obd_namespace, search_flags, 0,
3055                                          res_id, einfo->ei_type, policy, mode,
3056                                          &lockh, match_flags);
3057         if (mode) {
3058                 struct ldlm_lock *matched;
3059
3060                 if (*flags & LDLM_FL_TEST_LOCK)
3061                         RETURN(ELDLM_OK);
3062
3063                 matched = ldlm_handle2lock(&lockh);
3064                 if (speculative) {
3065                         /* This DLM lock request is speculative, and does not
3066                          * have an associated IO request. Therefore if there
3067                          * is already a DLM lock, it wll just inform the
3068                          * caller to cancel the request for this stripe.*/
3069                         lock_res_and_lock(matched);
3070                         if (ldlm_extent_equal(&policy->l_extent,
3071                             &matched->l_policy_data.l_extent))
3072                                 rc = -EEXIST;
3073                         else
3074                                 rc = -ECANCELED;
3075                         unlock_res_and_lock(matched);
3076
3077                         ldlm_lock_decref(&lockh, mode);
3078                         LDLM_LOCK_PUT(matched);
3079                         RETURN(rc);
3080                 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
3081                         *flags |= LDLM_FL_LVB_READY;
3082
3083                         /* We already have a lock, and it's referenced. */
3084                         (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
3085
3086                         ldlm_lock_decref(&lockh, mode);
3087                         LDLM_LOCK_PUT(matched);
3088                         RETURN(ELDLM_OK);
3089                 } else {
3090                         ldlm_lock_decref(&lockh, mode);
3091                         LDLM_LOCK_PUT(matched);
3092                 }
3093         }
3094
3095         if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
3096                 RETURN(-ENOLCK);
3097
3098         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
3099         *flags &= ~LDLM_FL_BLOCK_GRANTED;
3100
3101         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
3102                               sizeof(*lvb), LVB_T_OST, &lockh, async);
3103         if (async) {
3104                 if (!rc) {
3105                         struct osc_enqueue_args *aa;
3106                         aa = ptlrpc_req_async_args(aa, req);
3107                         aa->oa_exp         = exp;
3108                         aa->oa_mode        = einfo->ei_mode;
3109                         aa->oa_type        = einfo->ei_type;
3110                         lustre_handle_copy(&aa->oa_lockh, &lockh);
3111                         aa->oa_upcall      = upcall;
3112                         aa->oa_cookie      = cookie;
3113                         aa->oa_speculative = speculative;
3114                         if (!speculative) {
3115                                 aa->oa_flags  = flags;
3116                                 aa->oa_lvb    = lvb;
3117                         } else {
3118                                 /* speculative locks are essentially to enqueue
3119                                  * a DLM lock  in advance, so we don't care
3120                                  * about the result of the enqueue. */
3121                                 aa->oa_lvb    = NULL;
3122                                 aa->oa_flags  = NULL;
3123                         }
3124
3125                         req->rq_interpret_reply = osc_enqueue_interpret;
3126                         ptlrpc_set_add_req(rqset, req);
3127                 }
3128                 RETURN(rc);
3129         }
3130
3131         rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
3132                               flags, speculative, rc);
3133
3134         RETURN(rc);
3135 }
3136
3137 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
3138                    struct ldlm_res_id *res_id, enum ldlm_type type,
3139                    union ldlm_policy_data *policy, enum ldlm_mode mode,
3140                    __u64 *flags, struct osc_object *obj,
3141                    struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
3142 {
3143         struct obd_device *obd = exp->exp_obd;
3144         __u64 lflags = *flags;
3145         enum ldlm_mode rc;
3146         ENTRY;
3147
3148         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
3149                 RETURN(-EIO);
3150
3151         /* Filesystem lock extents are extended to page boundaries so that
3152          * dealing with the page cache is a little smoother */
3153         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
3154         policy->l_extent.end |= ~PAGE_MASK;
3155
3156         /* Next, search for already existing extent locks that will cover us */
3157         rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
3158                                         res_id, type, policy, mode, lockh,
3159                                         match_flags);
3160         if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
3161                 RETURN(rc);
3162
3163         if (obj != NULL) {
3164                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
3165
3166                 LASSERT(lock != NULL);
3167                 if (osc_set_lock_data(lock, obj)) {
3168                         lock_res_and_lock(lock);
3169                         if (!ldlm_is_lvb_cached(lock)) {
3170                                 LASSERT(lock->l_ast_data == obj);
3171                                 osc_lock_lvb_update(env, obj, lock, NULL);
3172                                 ldlm_set_lvb_cached(lock);
3173                         }
3174                         unlock_res_and_lock(lock);
3175                 } else {
3176                         ldlm_lock_decref(lockh, rc);
3177                         rc = 0;
3178                 }
3179                 LDLM_LOCK_PUT(lock);
3180         }
3181         RETURN(rc);
3182 }
3183
3184 static int osc_statfs_interpret(const struct lu_env *env,
3185                                 struct ptlrpc_request *req, void *args, int rc)
3186 {
3187         struct osc_async_args *aa = args;
3188         struct obd_statfs *msfs;
3189
3190         ENTRY;
3191         if (rc == -EBADR)
3192                 /*
3193                  * The request has in fact never been sent due to issues at
3194                  * a higher level (LOV).  Exit immediately since the caller
3195                  * is aware of the problem and takes care of the clean up.
3196                  */
3197                 RETURN(rc);
3198
3199         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3200             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3201                 GOTO(out, rc = 0);
3202
3203         if (rc != 0)
3204                 GOTO(out, rc);
3205
3206         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3207         if (msfs == NULL)
3208                 GOTO(out, rc = -EPROTO);
3209
3210         *aa->aa_oi->oi_osfs = *msfs;
3211 out:
3212         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3213
3214         RETURN(rc);
3215 }
3216
3217 static int osc_statfs_async(struct obd_export *exp,
3218                             struct obd_info *oinfo, time64_t max_age,
3219                             struct ptlrpc_request_set *rqset)
3220 {
3221         struct obd_device     *obd = class_exp2obd(exp);
3222         struct ptlrpc_request *req;
3223         struct osc_async_args *aa;
3224         int rc;
3225         ENTRY;
3226
3227         if (obd->obd_osfs_age >= max_age) {
3228                 CDEBUG(D_SUPER,
3229                        "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
3230                        obd->obd_name, &obd->obd_osfs,
3231                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
3232                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
3233                 spin_lock(&obd->obd_osfs_lock);
3234                 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
3235                 spin_unlock(&obd->obd_osfs_lock);
3236                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
3237                 if (oinfo->oi_cb_up)
3238                         oinfo->oi_cb_up(oinfo, 0);
3239
3240                 RETURN(0);
3241         }
3242
3243         /* We could possibly pass max_age in the request (as an absolute
3244          * timestamp or a "seconds.usec ago") so the target can avoid doing
3245          * extra calls into the filesystem if that isn't necessary (e.g.
3246          * during mount that would help a bit).  Having relative timestamps
3247          * is not so great if request processing is slow, while absolute
3248          * timestamps are not ideal because they need time synchronization. */
3249         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3250         if (req == NULL)
3251                 RETURN(-ENOMEM);
3252
3253         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3254         if (rc) {
3255                 ptlrpc_request_free(req);
3256                 RETURN(rc);
3257         }
3258         ptlrpc_request_set_replen(req);
3259         req->rq_request_portal = OST_CREATE_PORTAL;
3260         ptlrpc_at_set_req_timeout(req);
3261
3262         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3263                 /* procfs requests not want stat in wait for avoid deadlock */
3264                 req->rq_no_resend = 1;
3265                 req->rq_no_delay = 1;
3266         }
3267
3268         req->rq_interpret_reply = osc_statfs_interpret;
3269         aa = ptlrpc_req_async_args(aa, req);
3270         aa->aa_oi = oinfo;
3271
3272         ptlrpc_set_add_req(rqset, req);
3273         RETURN(0);
3274 }
3275
3276 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3277                       struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3278 {
3279         struct obd_device     *obd = class_exp2obd(exp);
3280         struct obd_statfs     *msfs;
3281         struct ptlrpc_request *req;
3282         struct obd_import     *imp, *imp0;
3283         int rc;
3284         ENTRY;
3285
3286         /*Since the request might also come from lprocfs, so we need
3287          *sync this with client_disconnect_export Bug15684
3288          */
3289         with_imp_locked(obd, imp0, rc)
3290                 imp = class_import_get(imp0);
3291         if (rc)
3292                 RETURN(rc);
3293
3294         /* We could possibly pass max_age in the request (as an absolute
3295          * timestamp or a "seconds.usec ago") so the target can avoid doing
3296          * extra calls into the filesystem if that isn't necessary (e.g.
3297          * during mount that would help a bit).  Having relative timestamps
3298          * is not so great if request processing is slow, while absolute
3299          * timestamps are not ideal because they need time synchronization. */
3300         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3301
3302         class_import_put(imp);
3303
3304         if (req == NULL)
3305                 RETURN(-ENOMEM);
3306
3307         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3308         if (rc) {
3309                 ptlrpc_request_free(req);
3310                 RETURN(rc);
3311         }
3312         ptlrpc_request_set_replen(req);
3313         req->rq_request_portal = OST_CREATE_PORTAL;
3314         ptlrpc_at_set_req_timeout(req);
3315
3316         if (flags & OBD_STATFS_NODELAY) {
3317                 /* procfs requests not want stat in wait for avoid deadlock */
3318                 req->rq_no_resend = 1;
3319                 req->rq_no_delay = 1;
3320         }
3321
3322         rc = ptlrpc_queue_wait(req);
3323         if (rc)
3324                 GOTO(out, rc);
3325
3326         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3327         if (msfs == NULL)
3328                 GOTO(out, rc = -EPROTO);
3329
3330         *osfs = *msfs;
3331
3332         EXIT;
3333 out:
3334         ptlrpc_req_finished(req);
3335         return rc;
3336 }
3337
3338 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3339                          void *karg, void __user *uarg)
3340 {
3341         struct obd_device *obd = exp->exp_obd;
3342         struct obd_ioctl_data *data;
3343         int rc;
3344
3345         ENTRY;
3346         CDEBUG(D_IOCTL, "%s: cmd=%x len=%u karg=%pK uarg=%pK\n",
3347                obd->obd_name, cmd, len, karg, uarg);
3348
3349         if (!try_module_get(THIS_MODULE)) {
3350                 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3351                        module_name(THIS_MODULE));
3352                 RETURN(-EINVAL);
3353         }
3354
3355         switch (cmd) {
3356         case OBD_IOC_CLIENT_RECOVER:
3357                 if (unlikely(karg == NULL)) {
3358                         OBD_IOC_ERROR(obd->obd_name, cmd, "karg=NULL",
3359                                       rc = -EINVAL);
3360                         break;
3361                 }
3362                 data = karg;
3363                 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3364                                            data->ioc_inlbuf1, 0);
3365                 if (rc > 0)
3366                         rc = 0;
3367                 break;
3368         case OBD_IOC_GETATTR:
3369                 if (unlikely(karg == NULL)) {
3370                         OBD_IOC_ERROR(obd->obd_name, cmd, "karg=NULL",
3371                                       rc = -EINVAL);
3372                         break;
3373                 }
3374                 data = karg;
3375                 rc = obd_getattr(NULL, exp, &data->ioc_obdo1);
3376                 break;
3377 #ifdef IOC_OSC_SET_ACTIVE
3378         case_OBD_IOC_DEPRECATED_FT(IOC_OSC_SET_ACTIVE, obd->obd_name, 2, 17);
3379 #endif
3380         case OBD_IOC_SET_ACTIVE:
3381                 if (unlikely(karg == NULL)) {
3382                         OBD_IOC_ERROR(obd->obd_name, cmd, "karg=NULL",
3383                                       rc = -EINVAL);
3384                         break;
3385                 }
3386                 data = karg;
3387                 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3388                                               data->ioc_offset);
3389                 break;
3390         default:
3391                 rc = OBD_IOC_DEBUG(D_IOCTL, obd->obd_name, cmd, "unrecognized",
3392                                    -ENOTTY);
3393                 break;
3394         }
3395
3396         module_put(THIS_MODULE);
3397         return rc;
3398 }
3399
3400 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3401                        u32 keylen, void *key, u32 vallen, void *val,
3402                        struct ptlrpc_request_set *set)
3403 {
3404         struct ptlrpc_request *req;
3405         struct obd_device     *obd = exp->exp_obd;
3406         struct obd_import     *imp = class_exp2cliimp(exp);
3407         char                  *tmp;
3408         int                    rc;
3409         ENTRY;
3410
3411         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3412
3413         if (KEY_IS(KEY_CHECKSUM)) {
3414                 if (vallen != sizeof(int))
3415                         RETURN(-EINVAL);
3416                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3417                 RETURN(0);
3418         }
3419
3420         if (KEY_IS(KEY_SPTLRPC_CONF)) {
3421                 sptlrpc_conf_client_adapt(obd);
3422                 RETURN(0);
3423         }
3424
3425         if (KEY_IS(KEY_FLUSH_CTX)) {
3426                 sptlrpc_import_flush_my_ctx(imp);
3427                 RETURN(0);
3428         }
3429
3430         if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3431                 struct client_obd *cli = &obd->u.cli;
3432                 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3433                 long target = *(long *)val;
3434
3435                 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3436                 *(long *)val -= nr;
3437                 RETURN(0);
3438         }
3439
3440         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3441                 RETURN(-EINVAL);
3442
3443         /* We pass all other commands directly to OST. Since nobody calls osc
3444            methods directly and everybody is supposed to go through LOV, we
3445            assume lov checked invalid values for us.
3446            The only recognised values so far are evict_by_nid and mds_conn.
3447            Even if something bad goes through, we'd get a -EINVAL from OST
3448            anyway. */
3449
3450         req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3451                                                 &RQF_OST_SET_GRANT_INFO :
3452                                                 &RQF_OBD_SET_INFO);
3453         if (req == NULL)
3454                 RETURN(-ENOMEM);
3455
3456         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3457                              RCL_CLIENT, keylen);
3458         if (!KEY_IS(KEY_GRANT_SHRINK))
3459                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3460                                      RCL_CLIENT, vallen);
3461         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3462         if (rc) {
3463                 ptlrpc_request_free(req);
3464                 RETURN(rc);
3465         }
3466
3467         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3468         memcpy(tmp, key, keylen);
3469         tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3470                                                         &RMF_OST_BODY :
3471                                                         &RMF_SETINFO_VAL);
3472         memcpy(tmp, val, vallen);
3473
3474         if (KEY_IS(KEY_GRANT_SHRINK)) {
3475                 struct osc_grant_args *aa;
3476                 struct obdo *oa;
3477
3478                 aa = ptlrpc_req_async_args(aa, req);
3479                 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3480                 if (!oa) {
3481                         ptlrpc_req_finished(req);
3482                         RETURN(-ENOMEM);
3483                 }
3484                 *oa = ((struct ost_body *)val)->oa;
3485                 aa->aa_oa = oa;
3486                 req->rq_interpret_reply = osc_shrink_grant_interpret;
3487         }
3488
3489         ptlrpc_request_set_replen(req);
3490         if (!KEY_IS(KEY_GRANT_SHRINK)) {
3491                 LASSERT(set != NULL);
3492                 ptlrpc_set_add_req(set, req);
3493                 ptlrpc_check_set(NULL, set);
3494         } else {
3495                 ptlrpcd_add_req(req);
3496         }
3497
3498         RETURN(0);
3499 }
3500 EXPORT_SYMBOL(osc_set_info_async);
3501
3502 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3503                   struct obd_device *obd, struct obd_uuid *cluuid,
3504                   struct obd_connect_data *data, void *localdata)
3505 {
3506         struct client_obd *cli = &obd->u.cli;
3507
3508         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3509                 long lost_grant;
3510                 long grant;
3511
3512                 spin_lock(&cli->cl_loi_list_lock);
3513                 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3514                 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3515                         /* restore ocd_grant_blkbits as client page bits */
3516                         data->ocd_grant_blkbits = PAGE_SHIFT;
3517                         grant += cli->cl_dirty_grant;
3518                 } else {
3519                         grant += cli->cl_dirty_pages << PAGE_SHIFT;
3520                 }
3521                 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3522                 lost_grant = cli->cl_lost_grant;
3523                 cli->cl_lost_grant = 0;
3524                 spin_unlock(&cli->cl_loi_list_lock);
3525
3526                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3527                        " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3528                        data->ocd_version, data->ocd_grant, lost_grant);
3529         }
3530
3531         RETURN(0);
3532 }
3533 EXPORT_SYMBOL(osc_reconnect);
3534
3535 int osc_disconnect(struct obd_export *exp)
3536 {
3537         struct obd_device *obd = class_exp2obd(exp);
3538         int rc;
3539
3540         rc = client_disconnect_export(exp);
3541         /**
3542          * Initially we put del_shrink_grant before disconnect_export, but it
3543          * causes the following problem if setup (connect) and cleanup
3544          * (disconnect) are tangled together.
3545          *      connect p1                     disconnect p2
3546          *   ptlrpc_connect_import
3547          *     ...............               class_manual_cleanup
3548          *                                     osc_disconnect
3549          *                                     del_shrink_grant
3550          *   ptlrpc_connect_interrupt
3551          *     osc_init_grant
3552          *   add this client to shrink list
3553          *                                      cleanup_osc
3554          * Bang! grant shrink thread trigger the shrink. BUG18662
3555          */
3556         osc_del_grant_list(&obd->u.cli);
3557         return rc;
3558 }
3559 EXPORT_SYMBOL(osc_disconnect);
3560
3561 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3562                                  struct hlist_node *hnode, void *arg)
3563 {
3564         struct lu_env *env = arg;
3565         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3566         struct ldlm_lock *lock;
3567         struct osc_object *osc = NULL;
3568         ENTRY;
3569
3570         lock_res(res);
3571         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3572                 if (lock->l_ast_data != NULL && osc == NULL) {
3573                         osc = lock->l_ast_data;
3574                         cl_object_get(osc2cl(osc));
3575                 }
3576
3577                 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3578                  * by the 2nd round of ldlm_namespace_clean() call in
3579                  * osc_import_event(). */
3580                 ldlm_clear_cleaned(lock);
3581         }
3582         unlock_res(res);
3583
3584         if (osc != NULL) {
3585                 osc_object_invalidate(env, osc);
3586                 cl_object_put(env, osc2cl(osc));
3587         }
3588
3589         RETURN(0);
3590 }
3591 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3592
3593 static int osc_import_event(struct obd_device *obd,
3594                             struct obd_import *imp,
3595                             enum obd_import_event event)
3596 {
3597         struct client_obd *cli;
3598         int rc = 0;
3599
3600         ENTRY;
3601         LASSERT(imp->imp_obd == obd);
3602
3603         switch (event) {
3604         case IMP_EVENT_DISCON: {
3605                 cli = &obd->u.cli;
3606                 spin_lock(&cli->cl_loi_list_lock);
3607                 cli->cl_avail_grant = 0;
3608                 cli->cl_lost_grant = 0;
3609                 spin_unlock(&cli->cl_loi_list_lock);
3610                 break;
3611         }
3612         case IMP_EVENT_INACTIVE: {
3613                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3614                 break;
3615         }
3616         case IMP_EVENT_INVALIDATE: {
3617                 struct ldlm_namespace *ns = obd->obd_namespace;
3618                 struct lu_env         *env;
3619                 __u16                  refcheck;
3620
3621                 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3622
3623                 env = cl_env_get(&refcheck);
3624                 if (!IS_ERR(env)) {
3625                         osc_io_unplug(env, &obd->u.cli, NULL);
3626
3627                         cfs_hash_for_each_nolock(ns->ns_rs_hash,
3628                                                  osc_ldlm_resource_invalidate,
3629                                                  env, 0);
3630                         cl_env_put(env, &refcheck);
3631
3632                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3633                 } else
3634                         rc = PTR_ERR(env);
3635                 break;
3636         }
3637         case IMP_EVENT_ACTIVE: {
3638                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3639                 break;
3640         }
3641         case IMP_EVENT_OCD: {
3642                 struct obd_connect_data *ocd = &imp->imp_connect_data;
3643
3644                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3645                         osc_init_grant(&obd->u.cli, ocd);
3646
3647                 /* See bug 7198 */
3648                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3649                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3650
3651                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3652                 break;
3653         }
3654         case IMP_EVENT_DEACTIVATE: {
3655                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3656                 break;
3657         }
3658         case IMP_EVENT_ACTIVATE: {
3659                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3660                 break;
3661         }
3662         default:
3663                 CERROR("Unknown import event %d\n", event);
3664                 LBUG();
3665         }
3666         RETURN(rc);
3667 }
3668
3669 /**
3670  * Determine whether the lock can be canceled before replaying the lock
3671  * during recovery, see bug16774 for detailed information.
3672  *
3673  * \retval zero the lock can't be canceled
3674  * \retval other ok to cancel
3675  */
3676 static int osc_cancel_weight(struct ldlm_lock *lock)
3677 {
3678         /*
3679          * Cancel all unused and granted extent lock.
3680          */
3681         if (lock->l_resource->lr_type == LDLM_EXTENT &&
3682             ldlm_is_granted(lock) &&
3683             osc_ldlm_weigh_ast(lock) == 0)
3684                 RETURN(1);
3685
3686         RETURN(0);
3687 }
3688
3689 static int brw_queue_work(const struct lu_env *env, void *data)
3690 {
3691         struct client_obd *cli = data;
3692
3693         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3694
3695         osc_io_unplug(env, cli, NULL);
3696         RETURN(0);
3697 }
3698
3699 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3700 {
3701         struct client_obd *cli = &obd->u.cli;
3702         void *handler;
3703         int rc;
3704
3705         ENTRY;
3706
3707         rc = ptlrpcd_addref();
3708         if (rc)
3709                 RETURN(rc);
3710
3711         rc = client_obd_setup(obd, lcfg);
3712         if (rc)
3713                 GOTO(out_ptlrpcd, rc);
3714
3715
3716         handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3717         if (IS_ERR(handler))
3718                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3719         cli->cl_writeback_work = handler;
3720
3721         handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3722         if (IS_ERR(handler))
3723                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3724         cli->cl_lru_work = handler;
3725
3726         rc = osc_quota_setup(obd);
3727         if (rc)
3728                 GOTO(out_ptlrpcd_work, rc);
3729
3730         cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3731         cli->cl_root_squash = 0;
3732         osc_update_next_shrink(cli);
3733
3734         RETURN(rc);
3735
3736 out_ptlrpcd_work:
3737         if (cli->cl_writeback_work != NULL) {
3738                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3739                 cli->cl_writeback_work = NULL;
3740         }
3741         if (cli->cl_lru_work != NULL) {
3742                 ptlrpcd_destroy_work(cli->cl_lru_work);
3743                 cli->cl_lru_work = NULL;
3744         }
3745         client_obd_cleanup(obd);
3746 out_ptlrpcd:
3747         ptlrpcd_decref();
3748         RETURN(rc);
3749 }
3750 EXPORT_SYMBOL(osc_setup_common);
3751
3752 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3753 {
3754         struct client_obd *cli = &obd->u.cli;
3755         int                adding;
3756         int                added;
3757         int                req_count;
3758         int                rc;
3759
3760         ENTRY;
3761
3762         rc = osc_setup_common(obd, lcfg);
3763         if (rc < 0)
3764                 RETURN(rc);
3765
3766         rc = osc_tunables_init(obd);
3767         if (rc)
3768                 RETURN(rc);
3769
3770         /*
3771          * We try to control the total number of requests with a upper limit
3772          * osc_reqpool_maxreqcount. There might be some race which will cause
3773          * over-limit allocation, but it is fine.
3774          */
3775         req_count = atomic_read(&osc_pool_req_count);
3776         if (req_count < osc_reqpool_maxreqcount) {
3777                 adding = cli->cl_max_rpcs_in_flight + 2;
3778                 if (req_count + adding > osc_reqpool_maxreqcount)
3779                         adding = osc_reqpool_maxreqcount - req_count;
3780
3781                 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3782                 atomic_add(added, &osc_pool_req_count);
3783         }
3784
3785         ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3786
3787         spin_lock(&osc_shrink_lock);
3788         list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3789         spin_unlock(&osc_shrink_lock);
3790         cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3791         cli->cl_import->imp_idle_debug = D_HA;
3792
3793         RETURN(0);
3794 }
3795
3796 int osc_precleanup_common(struct obd_device *obd)
3797 {
3798         struct client_obd *cli = &obd->u.cli;
3799         ENTRY;
3800
3801         /* LU-464
3802          * for echo client, export may be on zombie list, wait for
3803          * zombie thread to cull it, because cli.cl_import will be
3804          * cleared in client_disconnect_export():
3805          *   class_export_destroy() -> obd_cleanup() ->
3806          *   echo_device_free() -> echo_client_cleanup() ->
3807          *   obd_disconnect() -> osc_disconnect() ->
3808          *   client_disconnect_export()
3809          */
3810         obd_zombie_barrier();
3811         if (cli->cl_writeback_work) {
3812                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3813                 cli->cl_writeback_work = NULL;
3814         }
3815
3816         if (cli->cl_lru_work) {
3817                 ptlrpcd_destroy_work(cli->cl_lru_work);
3818                 cli->cl_lru_work = NULL;
3819         }
3820
3821         obd_cleanup_client_import(obd);
3822         RETURN(0);
3823 }
3824 EXPORT_SYMBOL(osc_precleanup_common);
3825
3826 static int osc_precleanup(struct obd_device *obd)
3827 {
3828         ENTRY;
3829
3830         osc_precleanup_common(obd);
3831
3832         ptlrpc_lprocfs_unregister_obd(obd);
3833         RETURN(0);
3834 }
3835
3836 int osc_cleanup_common(struct obd_device *obd)
3837 {
3838         struct client_obd *cli = &obd->u.cli;
3839         int rc;
3840
3841         ENTRY;
3842
3843         spin_lock(&osc_shrink_lock);
3844         list_del(&cli->cl_shrink_list);
3845         spin_unlock(&osc_shrink_lock);
3846
3847         /* lru cleanup */
3848         if (cli->cl_cache != NULL) {
3849                 LASSERT(refcount_read(&cli->cl_cache->ccc_users) > 0);
3850                 spin_lock(&cli->cl_cache->ccc_lru_lock);
3851                 list_del_init(&cli->cl_lru_osc);
3852                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3853                 cli->cl_lru_left = NULL;
3854                 cl_cache_decref(cli->cl_cache);
3855                 cli->cl_cache = NULL;
3856         }
3857
3858         /* free memory of osc quota cache */
3859         osc_quota_cleanup(obd);
3860
3861         rc = client_obd_cleanup(obd);
3862
3863         ptlrpcd_decref();
3864         RETURN(rc);
3865 }
3866 EXPORT_SYMBOL(osc_cleanup_common);
3867
3868 static const struct obd_ops osc_obd_ops = {
3869         .o_owner                = THIS_MODULE,
3870         .o_setup                = osc_setup,
3871         .o_precleanup           = osc_precleanup,
3872         .o_cleanup              = osc_cleanup_common,
3873         .o_add_conn             = client_import_add_conn,
3874         .o_del_conn             = client_import_del_conn,
3875         .o_connect              = client_connect_import,
3876         .o_reconnect            = osc_reconnect,
3877         .o_disconnect           = osc_disconnect,
3878         .o_statfs               = osc_statfs,
3879         .o_statfs_async         = osc_statfs_async,
3880         .o_create               = osc_create,
3881         .o_destroy              = osc_destroy,
3882         .o_getattr              = osc_getattr,
3883         .o_setattr              = osc_setattr,
3884         .o_iocontrol            = osc_iocontrol,
3885         .o_set_info_async       = osc_set_info_async,
3886         .o_import_event         = osc_import_event,
3887         .o_quotactl             = osc_quotactl,
3888 };
3889
3890 LIST_HEAD(osc_shrink_list);
3891 DEFINE_SPINLOCK(osc_shrink_lock);
3892
3893 #ifdef HAVE_SHRINKER_COUNT
3894 static struct shrinker osc_cache_shrinker = {
3895         .count_objects  = osc_cache_shrink_count,
3896         .scan_objects   = osc_cache_shrink_scan,
3897         .seeks          = DEFAULT_SEEKS,
3898 };
3899 #else
3900 static int osc_cache_shrink(struct shrinker *shrinker,
3901                             struct shrink_control *sc)
3902 {
3903         (void)osc_cache_shrink_scan(shrinker, sc);
3904
3905         return osc_cache_shrink_count(shrinker, sc);
3906 }
3907
3908 static struct shrinker osc_cache_shrinker = {
3909         .shrink   = osc_cache_shrink,
3910         .seeks    = DEFAULT_SEEKS,
3911 };
3912 #endif
3913
3914 static int __init osc_init(void)
3915 {
3916         unsigned int reqpool_size;
3917         unsigned int reqsize;
3918         int rc;
3919         ENTRY;
3920
3921         /* print an address of _any_ initialized kernel symbol from this
3922          * module, to allow debugging with gdb that doesn't support data
3923          * symbols from modules.*/
3924         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3925
3926         rc = lu_kmem_init(osc_caches);
3927         if (rc)
3928                 RETURN(rc);
3929
3930         rc = register_shrinker(&osc_cache_shrinker);
3931         if (rc)
3932                 GOTO(out_kmem, rc);
3933
3934         /* This is obviously too much memory, only prevent overflow here */
3935         if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3936                 GOTO(out_shrinker, rc = -EINVAL);
3937
3938         reqpool_size = osc_reqpool_mem_max << 20;
3939
3940         reqsize = 1;
3941         while (reqsize < OST_IO_MAXREQSIZE)
3942                 reqsize = reqsize << 1;
3943
3944         /*
3945          * We don't enlarge the request count in OSC pool according to
3946          * cl_max_rpcs_in_flight. The allocation from the pool will only be
3947          * tried after normal allocation failed. So a small OSC pool won't
3948          * cause much performance degression in most of cases.
3949          */
3950         osc_reqpool_maxreqcount = reqpool_size / reqsize;
3951
3952         atomic_set(&osc_pool_req_count, 0);
3953         osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3954                                           ptlrpc_add_rqs_to_pool);
3955
3956         if (osc_rq_pool == NULL)
3957                 GOTO(out_shrinker, rc = -ENOMEM);
3958
3959         rc = osc_start_grant_work();
3960         if (rc != 0)
3961                 GOTO(out_req_pool, rc);
3962
3963         rc = class_register_type(&osc_obd_ops, NULL, true,
3964                                  LUSTRE_OSC_NAME, &osc_device_type);
3965         if (rc < 0)
3966                 GOTO(out_stop_grant, rc);
3967
3968         RETURN(rc);
3969
3970 out_stop_grant:
3971         osc_stop_grant_work();
3972 out_req_pool:
3973         ptlrpc_free_rq_pool(osc_rq_pool);
3974 out_shrinker:
3975         unregister_shrinker(&osc_cache_shrinker);
3976 out_kmem:
3977         lu_kmem_fini(osc_caches);
3978
3979         RETURN(rc);
3980 }
3981
3982 static void __exit osc_exit(void)
3983 {
3984         class_unregister_type(LUSTRE_OSC_NAME);
3985         ptlrpc_free_rq_pool(osc_rq_pool);
3986         osc_stop_grant_work();
3987         unregister_shrinker(&osc_cache_shrinker);
3988         lu_kmem_fini(osc_caches);
3989 }
3990
3991 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3992 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3993 MODULE_VERSION(LUSTRE_VERSION_STRING);
3994 MODULE_LICENSE("GPL");
3995
3996 module_init(osc_init);
3997 module_exit(osc_exit);