Whamcloud - gitweb
68da59423377d5283df71d2c046503670d9bfa6e
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #define DEBUG_SUBSYSTEM S_OSC
33
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
44 #include <obd.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
49
50 #include "osc_internal.h"
51 #include <lnet/lnet_rdma.h>
52
53 atomic_t osc_pool_req_count;
54 unsigned int osc_reqpool_maxreqcount;
55 struct ptlrpc_request_pool *osc_rq_pool;
56
57 /* max memory used for request pool, unit is MB */
58 static unsigned int osc_reqpool_mem_max = 5;
59 module_param(osc_reqpool_mem_max, uint, 0444);
60
61 static int osc_idle_timeout = 20;
62 module_param(osc_idle_timeout, uint, 0644);
63
64 #define osc_grant_args osc_brw_async_args
65
66 struct osc_setattr_args {
67         struct obdo             *sa_oa;
68         obd_enqueue_update_f     sa_upcall;
69         void                    *sa_cookie;
70 };
71
72 struct osc_fsync_args {
73         struct osc_object       *fa_obj;
74         struct obdo             *fa_oa;
75         obd_enqueue_update_f    fa_upcall;
76         void                    *fa_cookie;
77 };
78
79 struct osc_ladvise_args {
80         struct obdo             *la_oa;
81         obd_enqueue_update_f     la_upcall;
82         void                    *la_cookie;
83 };
84
85 static void osc_release_ppga(struct brw_page **ppga, size_t count);
86 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
87                          void *data, int rc);
88
89 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
90 {
91         struct ost_body *body;
92
93         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
94         LASSERT(body);
95
96         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
97 }
98
99 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
100                        struct obdo *oa)
101 {
102         struct ptlrpc_request   *req;
103         struct ost_body         *body;
104         int                      rc;
105
106         ENTRY;
107         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
108         if (req == NULL)
109                 RETURN(-ENOMEM);
110
111         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
112         if (rc) {
113                 ptlrpc_request_free(req);
114                 RETURN(rc);
115         }
116
117         osc_pack_req_body(req, oa);
118
119         ptlrpc_request_set_replen(req);
120
121         rc = ptlrpc_queue_wait(req);
122         if (rc)
123                 GOTO(out, rc);
124
125         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
126         if (body == NULL)
127                 GOTO(out, rc = -EPROTO);
128
129         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
130         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
131
132         oa->o_blksize = cli_brw_size(exp->exp_obd);
133         oa->o_valid |= OBD_MD_FLBLKSZ;
134
135         EXIT;
136 out:
137         ptlrpc_req_finished(req);
138
139         return rc;
140 }
141
142 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
143                        struct obdo *oa)
144 {
145         struct ptlrpc_request   *req;
146         struct ost_body         *body;
147         int                      rc;
148
149         ENTRY;
150         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
151
152         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
153         if (req == NULL)
154                 RETURN(-ENOMEM);
155
156         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
157         if (rc) {
158                 ptlrpc_request_free(req);
159                 RETURN(rc);
160         }
161
162         osc_pack_req_body(req, oa);
163
164         ptlrpc_request_set_replen(req);
165
166         rc = ptlrpc_queue_wait(req);
167         if (rc)
168                 GOTO(out, rc);
169
170         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
171         if (body == NULL)
172                 GOTO(out, rc = -EPROTO);
173
174         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
175
176         EXIT;
177 out:
178         ptlrpc_req_finished(req);
179
180         RETURN(rc);
181 }
182
183 static int osc_setattr_interpret(const struct lu_env *env,
184                                  struct ptlrpc_request *req, void *args, int rc)
185 {
186         struct osc_setattr_args *sa = args;
187         struct ost_body *body;
188
189         ENTRY;
190
191         if (rc != 0)
192                 GOTO(out, rc);
193
194         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
195         if (body == NULL)
196                 GOTO(out, rc = -EPROTO);
197
198         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
199                              &body->oa);
200 out:
201         rc = sa->sa_upcall(sa->sa_cookie, rc);
202         RETURN(rc);
203 }
204
205 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
206                       obd_enqueue_update_f upcall, void *cookie,
207                       struct ptlrpc_request_set *rqset)
208 {
209         struct ptlrpc_request   *req;
210         struct osc_setattr_args *sa;
211         int                      rc;
212
213         ENTRY;
214
215         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
216         if (req == NULL)
217                 RETURN(-ENOMEM);
218
219         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
220         if (rc) {
221                 ptlrpc_request_free(req);
222                 RETURN(rc);
223         }
224
225         osc_pack_req_body(req, oa);
226
227         ptlrpc_request_set_replen(req);
228
229         /* do mds to ost setattr asynchronously */
230         if (!rqset) {
231                 /* Do not wait for response. */
232                 ptlrpcd_add_req(req);
233         } else {
234                 req->rq_interpret_reply = osc_setattr_interpret;
235
236                 sa = ptlrpc_req_async_args(sa, req);
237                 sa->sa_oa = oa;
238                 sa->sa_upcall = upcall;
239                 sa->sa_cookie = cookie;
240
241                 ptlrpc_set_add_req(rqset, req);
242         }
243
244         RETURN(0);
245 }
246
247 static int osc_ladvise_interpret(const struct lu_env *env,
248                                  struct ptlrpc_request *req,
249                                  void *arg, int rc)
250 {
251         struct osc_ladvise_args *la = arg;
252         struct ost_body *body;
253         ENTRY;
254
255         if (rc != 0)
256                 GOTO(out, rc);
257
258         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
259         if (body == NULL)
260                 GOTO(out, rc = -EPROTO);
261
262         *la->la_oa = body->oa;
263 out:
264         rc = la->la_upcall(la->la_cookie, rc);
265         RETURN(rc);
266 }
267
268 /**
269  * If rqset is NULL, do not wait for response. Upcall and cookie could also
270  * be NULL in this case
271  */
272 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
273                      struct ladvise_hdr *ladvise_hdr,
274                      obd_enqueue_update_f upcall, void *cookie,
275                      struct ptlrpc_request_set *rqset)
276 {
277         struct ptlrpc_request   *req;
278         struct ost_body         *body;
279         struct osc_ladvise_args *la;
280         int                      rc;
281         struct lu_ladvise       *req_ladvise;
282         struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
283         int                      num_advise = ladvise_hdr->lah_count;
284         struct ladvise_hdr      *req_ladvise_hdr;
285         ENTRY;
286
287         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
288         if (req == NULL)
289                 RETURN(-ENOMEM);
290
291         req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
292                              num_advise * sizeof(*ladvise));
293         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
294         if (rc != 0) {
295                 ptlrpc_request_free(req);
296                 RETURN(rc);
297         }
298         req->rq_request_portal = OST_IO_PORTAL;
299         ptlrpc_at_set_req_timeout(req);
300
301         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
302         LASSERT(body);
303         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
304                              oa);
305
306         req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
307                                                  &RMF_OST_LADVISE_HDR);
308         memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
309
310         req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
311         memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
312         ptlrpc_request_set_replen(req);
313
314         if (rqset == NULL) {
315                 /* Do not wait for response. */
316                 ptlrpcd_add_req(req);
317                 RETURN(0);
318         }
319
320         req->rq_interpret_reply = osc_ladvise_interpret;
321         la = ptlrpc_req_async_args(la, req);
322         la->la_oa = oa;
323         la->la_upcall = upcall;
324         la->la_cookie = cookie;
325
326         ptlrpc_set_add_req(rqset, req);
327
328         RETURN(0);
329 }
330
331 static int osc_create(const struct lu_env *env, struct obd_export *exp,
332                       struct obdo *oa)
333 {
334         struct ptlrpc_request *req;
335         struct ost_body       *body;
336         int                    rc;
337         ENTRY;
338
339         LASSERT(oa != NULL);
340         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
341         LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
342
343         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
344         if (req == NULL)
345                 GOTO(out, rc = -ENOMEM);
346
347         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
348         if (rc) {
349                 ptlrpc_request_free(req);
350                 GOTO(out, rc);
351         }
352
353         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
354         LASSERT(body);
355
356         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
357
358         ptlrpc_request_set_replen(req);
359
360         rc = ptlrpc_queue_wait(req);
361         if (rc)
362                 GOTO(out_req, rc);
363
364         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365         if (body == NULL)
366                 GOTO(out_req, rc = -EPROTO);
367
368         CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
369         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
370
371         oa->o_blksize = cli_brw_size(exp->exp_obd);
372         oa->o_valid |= OBD_MD_FLBLKSZ;
373
374         CDEBUG(D_HA, "transno: %lld\n",
375                lustre_msg_get_transno(req->rq_repmsg));
376 out_req:
377         ptlrpc_req_finished(req);
378 out:
379         RETURN(rc);
380 }
381
382 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
383                    obd_enqueue_update_f upcall, void *cookie)
384 {
385         struct ptlrpc_request *req;
386         struct osc_setattr_args *sa;
387         struct obd_import *imp = class_exp2cliimp(exp);
388         struct ost_body *body;
389         int rc;
390
391         ENTRY;
392
393         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
394         if (req == NULL)
395                 RETURN(-ENOMEM);
396
397         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
398         if (rc < 0) {
399                 ptlrpc_request_free(req);
400                 RETURN(rc);
401         }
402
403         osc_set_io_portal(req);
404
405         ptlrpc_at_set_req_timeout(req);
406
407         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
408
409         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
410
411         ptlrpc_request_set_replen(req);
412
413         req->rq_interpret_reply = osc_setattr_interpret;
414         sa = ptlrpc_req_async_args(sa, req);
415         sa->sa_oa = oa;
416         sa->sa_upcall = upcall;
417         sa->sa_cookie = cookie;
418
419         ptlrpcd_add_req(req);
420
421         RETURN(0);
422 }
423 EXPORT_SYMBOL(osc_punch_send);
424
425 /**
426  * osc_fallocate_base() - Handles fallocate request.
427  *
428  * @exp:        Export structure
429  * @oa:         Attributes passed to OSS from client (obdo structure)
430  * @upcall:     Primary & supplementary group information
431  * @cookie:     Exclusive identifier
432  * @rqset:      Request list.
433  * @mode:       Operation done on given range.
434  *
435  * osc_fallocate_base() - Handles fallocate requests only. Only block
436  * allocation or standard preallocate operation is supported currently.
437  * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
438  * is supported via SETATTR request.
439  *
440  * Return: Non-zero on failure and O on success.
441  */
442 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
443                        obd_enqueue_update_f upcall, void *cookie, int mode)
444 {
445         struct ptlrpc_request *req;
446         struct osc_setattr_args *sa;
447         struct ost_body *body;
448         struct obd_import *imp = class_exp2cliimp(exp);
449         int rc;
450         ENTRY;
451
452         oa->o_falloc_mode = mode;
453         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
454                                    &RQF_OST_FALLOCATE);
455         if (req == NULL)
456                 RETURN(-ENOMEM);
457
458         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
459         if (rc != 0) {
460                 ptlrpc_request_free(req);
461                 RETURN(rc);
462         }
463
464         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
465         LASSERT(body);
466
467         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
468
469         ptlrpc_request_set_replen(req);
470
471         req->rq_interpret_reply = osc_setattr_interpret;
472         BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
473         sa = ptlrpc_req_async_args(sa, req);
474         sa->sa_oa = oa;
475         sa->sa_upcall = upcall;
476         sa->sa_cookie = cookie;
477
478         ptlrpcd_add_req(req);
479
480         RETURN(0);
481 }
482 EXPORT_SYMBOL(osc_fallocate_base);
483
484 static int osc_sync_interpret(const struct lu_env *env,
485                               struct ptlrpc_request *req, void *args, int rc)
486 {
487         struct osc_fsync_args *fa = args;
488         struct ost_body *body;
489         struct cl_attr *attr = &osc_env_info(env)->oti_attr;
490         unsigned long valid = 0;
491         struct cl_object *obj;
492         ENTRY;
493
494         if (rc != 0)
495                 GOTO(out, rc);
496
497         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
498         if (body == NULL) {
499                 CERROR("can't unpack ost_body\n");
500                 GOTO(out, rc = -EPROTO);
501         }
502
503         *fa->fa_oa = body->oa;
504         obj = osc2cl(fa->fa_obj);
505
506         /* Update osc object's blocks attribute */
507         cl_object_attr_lock(obj);
508         if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
509                 attr->cat_blocks = body->oa.o_blocks;
510                 valid |= CAT_BLOCKS;
511         }
512
513         if (valid != 0)
514                 cl_object_attr_update(env, obj, attr, valid);
515         cl_object_attr_unlock(obj);
516
517 out:
518         rc = fa->fa_upcall(fa->fa_cookie, rc);
519         RETURN(rc);
520 }
521
522 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
523                   obd_enqueue_update_f upcall, void *cookie,
524                   struct ptlrpc_request_set *rqset)
525 {
526         struct obd_export     *exp = osc_export(obj);
527         struct ptlrpc_request *req;
528         struct ost_body       *body;
529         struct osc_fsync_args *fa;
530         int                    rc;
531         ENTRY;
532
533         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
534         if (req == NULL)
535                 RETURN(-ENOMEM);
536
537         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
538         if (rc) {
539                 ptlrpc_request_free(req);
540                 RETURN(rc);
541         }
542
543         /* overload the size and blocks fields in the oa with start/end */
544         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
545         LASSERT(body);
546         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
547
548         ptlrpc_request_set_replen(req);
549         req->rq_interpret_reply = osc_sync_interpret;
550
551         fa = ptlrpc_req_async_args(fa, req);
552         fa->fa_obj = obj;
553         fa->fa_oa = oa;
554         fa->fa_upcall = upcall;
555         fa->fa_cookie = cookie;
556
557         ptlrpc_set_add_req(rqset, req);
558
559         RETURN (0);
560 }
561
562 /* Find and cancel locally locks matched by @mode in the resource found by
563  * @objid. Found locks are added into @cancel list. Returns the amount of
564  * locks added to @cancels list. */
565 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
566                                    struct list_head *cancels,
567                                    enum ldlm_mode mode, __u64 lock_flags)
568 {
569         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
570         struct ldlm_res_id res_id;
571         struct ldlm_resource *res;
572         int count;
573         ENTRY;
574
575         /* Return, i.e. cancel nothing, only if ELC is supported (flag in
576          * export) but disabled through procfs (flag in NS).
577          *
578          * This distinguishes from a case when ELC is not supported originally,
579          * when we still want to cancel locks in advance and just cancel them
580          * locally, without sending any RPC. */
581         if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
582                 RETURN(0);
583
584         ostid_build_res_name(&oa->o_oi, &res_id);
585         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
586         if (IS_ERR(res))
587                 RETURN(0);
588
589         LDLM_RESOURCE_ADDREF(res);
590         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
591                                            lock_flags, 0, NULL);
592         LDLM_RESOURCE_DELREF(res);
593         ldlm_resource_putref(res);
594         RETURN(count);
595 }
596
597 static int osc_destroy_interpret(const struct lu_env *env,
598                                  struct ptlrpc_request *req, void *args, int rc)
599 {
600         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
601
602         atomic_dec(&cli->cl_destroy_in_flight);
603         wake_up(&cli->cl_destroy_waitq);
604
605         return 0;
606 }
607
608 static int osc_can_send_destroy(struct client_obd *cli)
609 {
610         if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
611             cli->cl_max_rpcs_in_flight) {
612                 /* The destroy request can be sent */
613                 return 1;
614         }
615         if (atomic_dec_return(&cli->cl_destroy_in_flight) <
616             cli->cl_max_rpcs_in_flight) {
617                 /*
618                  * The counter has been modified between the two atomic
619                  * operations.
620                  */
621                 wake_up(&cli->cl_destroy_waitq);
622         }
623         return 0;
624 }
625
626 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
627                        struct obdo *oa)
628 {
629         struct client_obd     *cli = &exp->exp_obd->u.cli;
630         struct ptlrpc_request *req;
631         struct ost_body       *body;
632         LIST_HEAD(cancels);
633         int rc, count;
634         ENTRY;
635
636         if (!oa) {
637                 CDEBUG(D_INFO, "oa NULL\n");
638                 RETURN(-EINVAL);
639         }
640
641         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
642                                         LDLM_FL_DISCARD_DATA);
643
644         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
645         if (req == NULL) {
646                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
647                 RETURN(-ENOMEM);
648         }
649
650         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
651                                0, &cancels, count);
652         if (rc) {
653                 ptlrpc_request_free(req);
654                 RETURN(rc);
655         }
656
657         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
658         ptlrpc_at_set_req_timeout(req);
659
660         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
661         LASSERT(body);
662         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
663
664         ptlrpc_request_set_replen(req);
665
666         req->rq_interpret_reply = osc_destroy_interpret;
667         if (!osc_can_send_destroy(cli)) {
668                 /*
669                  * Wait until the number of on-going destroy RPCs drops
670                  * under max_rpc_in_flight
671                  */
672                 rc = l_wait_event_abortable_exclusive(
673                         cli->cl_destroy_waitq,
674                         osc_can_send_destroy(cli));
675                 if (rc) {
676                         ptlrpc_req_finished(req);
677                         RETURN(-EINTR);
678                 }
679         }
680
681         /* Do not wait for response */
682         ptlrpcd_add_req(req);
683         RETURN(0);
684 }
685
686 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
687                                 long writing_bytes)
688 {
689         u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
690
691         LASSERT(!(oa->o_valid & bits));
692
693         oa->o_valid |= bits;
694         spin_lock(&cli->cl_loi_list_lock);
695         if (cli->cl_ocd_grant_param)
696                 oa->o_dirty = cli->cl_dirty_grant;
697         else
698                 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
699         if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
700                 CERROR("dirty %lu > dirty_max %lu\n",
701                        cli->cl_dirty_pages,
702                        cli->cl_dirty_max_pages);
703                 oa->o_undirty = 0;
704         } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
705                             (long)(obd_max_dirty_pages + 1))) {
706                 /* The atomic_read() allowing the atomic_inc() are
707                  * not covered by a lock thus they may safely race and trip
708                  * this CERROR() unless we add in a small fudge factor (+1). */
709                 CERROR("%s: dirty %ld > system dirty_max %ld\n",
710                        cli_name(cli), atomic_long_read(&obd_dirty_pages),
711                        obd_max_dirty_pages);
712                 oa->o_undirty = 0;
713         } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
714                             0x7fffffff)) {
715                 CERROR("dirty %lu - dirty_max %lu too big???\n",
716                        cli->cl_dirty_pages, cli->cl_dirty_max_pages);
717                 oa->o_undirty = 0;
718         } else {
719                 unsigned long nrpages;
720                 unsigned long undirty;
721
722                 nrpages = cli->cl_max_pages_per_rpc;
723                 nrpages *= cli->cl_max_rpcs_in_flight + 1;
724                 nrpages = max(nrpages, cli->cl_dirty_max_pages);
725                 undirty = nrpages << PAGE_SHIFT;
726                 if (cli->cl_ocd_grant_param) {
727                         int nrextents;
728
729                         /* take extent tax into account when asking for more
730                          * grant space */
731                         nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
732                                      cli->cl_max_extent_pages;
733                         undirty += nrextents * cli->cl_grant_extent_tax;
734                 }
735                 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
736                  * to add extent tax, etc.
737                  */
738                 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
739                                     ~(PTLRPC_MAX_BRW_SIZE * 4UL));
740         }
741         oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
742         /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
743         if (cli->cl_lost_grant > INT_MAX) {
744                 CDEBUG(D_CACHE,
745                       "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
746                       cli_name(cli), cli->cl_lost_grant);
747                 oa->o_dropped = INT_MAX;
748         } else {
749                 oa->o_dropped = cli->cl_lost_grant;
750         }
751         cli->cl_lost_grant -= oa->o_dropped;
752         spin_unlock(&cli->cl_loi_list_lock);
753         CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
754                " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
755                oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
756 }
757
758 void osc_update_next_shrink(struct client_obd *cli)
759 {
760         cli->cl_next_shrink_grant = ktime_get_seconds() +
761                                     cli->cl_grant_shrink_interval;
762
763         CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
764                cli->cl_next_shrink_grant);
765 }
766
767 static void __osc_update_grant(struct client_obd *cli, u64 grant)
768 {
769         spin_lock(&cli->cl_loi_list_lock);
770         cli->cl_avail_grant += grant;
771         spin_unlock(&cli->cl_loi_list_lock);
772 }
773
774 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
775 {
776         if (body->oa.o_valid & OBD_MD_FLGRANT) {
777                 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
778                 __osc_update_grant(cli, body->oa.o_grant);
779         }
780 }
781
782 /**
783  * grant thread data for shrinking space.
784  */
785 struct grant_thread_data {
786         struct list_head        gtd_clients;
787         struct mutex            gtd_mutex;
788         unsigned long           gtd_stopped:1;
789 };
790 static struct grant_thread_data client_gtd;
791
792 static int osc_shrink_grant_interpret(const struct lu_env *env,
793                                       struct ptlrpc_request *req,
794                                       void *args, int rc)
795 {
796         struct osc_grant_args *aa = args;
797         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
798         struct ost_body *body;
799
800         if (rc != 0) {
801                 __osc_update_grant(cli, aa->aa_oa->o_grant);
802                 GOTO(out, rc);
803         }
804
805         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
806         LASSERT(body);
807         osc_update_grant(cli, body);
808 out:
809         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
810         aa->aa_oa = NULL;
811
812         return rc;
813 }
814
815 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
816 {
817         spin_lock(&cli->cl_loi_list_lock);
818         oa->o_grant = cli->cl_avail_grant / 4;
819         cli->cl_avail_grant -= oa->o_grant;
820         spin_unlock(&cli->cl_loi_list_lock);
821         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
822                 oa->o_valid |= OBD_MD_FLFLAGS;
823                 oa->o_flags = 0;
824         }
825         oa->o_flags |= OBD_FL_SHRINK_GRANT;
826         osc_update_next_shrink(cli);
827 }
828
829 /* Shrink the current grant, either from some large amount to enough for a
830  * full set of in-flight RPCs, or if we have already shrunk to that limit
831  * then to enough for a single RPC.  This avoids keeping more grant than
832  * needed, and avoids shrinking the grant piecemeal. */
833 static int osc_shrink_grant(struct client_obd *cli)
834 {
835         __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
836                              (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
837
838         spin_lock(&cli->cl_loi_list_lock);
839         if (cli->cl_avail_grant <= target_bytes)
840                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
841         spin_unlock(&cli->cl_loi_list_lock);
842
843         return osc_shrink_grant_to_target(cli, target_bytes);
844 }
845
846 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
847 {
848         int                     rc = 0;
849         struct ost_body        *body;
850         ENTRY;
851
852         spin_lock(&cli->cl_loi_list_lock);
853         /* Don't shrink if we are already above or below the desired limit
854          * We don't want to shrink below a single RPC, as that will negatively
855          * impact block allocation and long-term performance. */
856         if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
857                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
858
859         if (target_bytes >= cli->cl_avail_grant) {
860                 spin_unlock(&cli->cl_loi_list_lock);
861                 RETURN(0);
862         }
863         spin_unlock(&cli->cl_loi_list_lock);
864
865         OBD_ALLOC_PTR(body);
866         if (!body)
867                 RETURN(-ENOMEM);
868
869         osc_announce_cached(cli, &body->oa, 0);
870
871         spin_lock(&cli->cl_loi_list_lock);
872         if (target_bytes >= cli->cl_avail_grant) {
873                 /* available grant has changed since target calculation */
874                 spin_unlock(&cli->cl_loi_list_lock);
875                 GOTO(out_free, rc = 0);
876         }
877         body->oa.o_grant = cli->cl_avail_grant - target_bytes;
878         cli->cl_avail_grant = target_bytes;
879         spin_unlock(&cli->cl_loi_list_lock);
880         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
881                 body->oa.o_valid |= OBD_MD_FLFLAGS;
882                 body->oa.o_flags = 0;
883         }
884         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
885         osc_update_next_shrink(cli);
886
887         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
888                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
889                                 sizeof(*body), body, NULL);
890         if (rc != 0)
891                 __osc_update_grant(cli, body->oa.o_grant);
892 out_free:
893         OBD_FREE_PTR(body);
894         RETURN(rc);
895 }
896
897 static int osc_should_shrink_grant(struct client_obd *client)
898 {
899         time64_t next_shrink = client->cl_next_shrink_grant;
900
901         if (client->cl_import == NULL)
902                 return 0;
903
904         if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
905             client->cl_import->imp_grant_shrink_disabled) {
906                 osc_update_next_shrink(client);
907                 return 0;
908         }
909
910         if (ktime_get_seconds() >= next_shrink - 5) {
911                 /* Get the current RPC size directly, instead of going via:
912                  * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
913                  * Keep comment here so that it can be found by searching. */
914                 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
915
916                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
917                     client->cl_avail_grant > brw_size)
918                         return 1;
919                 else
920                         osc_update_next_shrink(client);
921         }
922         return 0;
923 }
924
925 #define GRANT_SHRINK_RPC_BATCH  100
926
927 static struct delayed_work work;
928
929 static void osc_grant_work_handler(struct work_struct *data)
930 {
931         struct client_obd *cli;
932         int rpc_sent;
933         bool init_next_shrink = true;
934         time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
935
936         rpc_sent = 0;
937         mutex_lock(&client_gtd.gtd_mutex);
938         list_for_each_entry(cli, &client_gtd.gtd_clients,
939                             cl_grant_chain) {
940                 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
941                     osc_should_shrink_grant(cli)) {
942                         osc_shrink_grant(cli);
943                         rpc_sent++;
944                 }
945
946                 if (!init_next_shrink) {
947                         if (cli->cl_next_shrink_grant < next_shrink &&
948                             cli->cl_next_shrink_grant > ktime_get_seconds())
949                                 next_shrink = cli->cl_next_shrink_grant;
950                 } else {
951                         init_next_shrink = false;
952                         next_shrink = cli->cl_next_shrink_grant;
953                 }
954         }
955         mutex_unlock(&client_gtd.gtd_mutex);
956
957         if (client_gtd.gtd_stopped == 1)
958                 return;
959
960         if (next_shrink > ktime_get_seconds()) {
961                 time64_t delay = next_shrink - ktime_get_seconds();
962
963                 schedule_delayed_work(&work, cfs_time_seconds(delay));
964         } else {
965                 schedule_work(&work.work);
966         }
967 }
968
969 void osc_schedule_grant_work(void)
970 {
971         cancel_delayed_work_sync(&work);
972         schedule_work(&work.work);
973 }
974
975 /**
976  * Start grant thread for returing grant to server for idle clients.
977  */
978 static int osc_start_grant_work(void)
979 {
980         client_gtd.gtd_stopped = 0;
981         mutex_init(&client_gtd.gtd_mutex);
982         INIT_LIST_HEAD(&client_gtd.gtd_clients);
983
984         INIT_DELAYED_WORK(&work, osc_grant_work_handler);
985         schedule_work(&work.work);
986
987         return 0;
988 }
989
990 static void osc_stop_grant_work(void)
991 {
992         client_gtd.gtd_stopped = 1;
993         cancel_delayed_work_sync(&work);
994 }
995
996 static void osc_add_grant_list(struct client_obd *client)
997 {
998         mutex_lock(&client_gtd.gtd_mutex);
999         list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
1000         mutex_unlock(&client_gtd.gtd_mutex);
1001 }
1002
1003 static void osc_del_grant_list(struct client_obd *client)
1004 {
1005         if (list_empty(&client->cl_grant_chain))
1006                 return;
1007
1008         mutex_lock(&client_gtd.gtd_mutex);
1009         list_del_init(&client->cl_grant_chain);
1010         mutex_unlock(&client_gtd.gtd_mutex);
1011 }
1012
1013 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1014 {
1015         /*
1016          * ocd_grant is the total grant amount we're expect to hold: if we've
1017          * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1018          * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1019          * dirty.
1020          *
1021          * race is tolerable here: if we're evicted, but imp_state already
1022          * left EVICTED state, then cl_dirty_pages must be 0 already.
1023          */
1024         spin_lock(&cli->cl_loi_list_lock);
1025         cli->cl_avail_grant = ocd->ocd_grant;
1026         if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1027                 unsigned long consumed = cli->cl_reserved_grant;
1028
1029                 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1030                         consumed += cli->cl_dirty_grant;
1031                 else
1032                         consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1033                 if (cli->cl_avail_grant < consumed) {
1034                         CERROR("%s: granted %ld but already consumed %ld\n",
1035                                cli_name(cli), cli->cl_avail_grant, consumed);
1036                         cli->cl_avail_grant = 0;
1037                 } else {
1038                         cli->cl_avail_grant -= consumed;
1039                 }
1040         }
1041
1042         if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1043                 u64 size;
1044                 int chunk_mask;
1045
1046                 /* overhead for each extent insertion */
1047                 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1048                 /* determine the appropriate chunk size used by osc_extent. */
1049                 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1050                                           ocd->ocd_grant_blkbits);
1051                 /* max_pages_per_rpc must be chunk aligned */
1052                 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1053                 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1054                                              ~chunk_mask) & chunk_mask;
1055                 /* determine maximum extent size, in #pages */
1056                 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1057                 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1058                 cli->cl_ocd_grant_param = 1;
1059         } else {
1060                 cli->cl_ocd_grant_param = 0;
1061                 cli->cl_grant_extent_tax = 0;
1062                 cli->cl_chunkbits = PAGE_SHIFT;
1063                 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1064         }
1065         spin_unlock(&cli->cl_loi_list_lock);
1066
1067         CDEBUG(D_CACHE,
1068                "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1069                cli_name(cli),
1070                cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1071                cli->cl_max_extent_pages);
1072
1073         if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1074                 osc_add_grant_list(cli);
1075 }
1076 EXPORT_SYMBOL(osc_init_grant);
1077
1078 /* We assume that the reason this OSC got a short read is because it read
1079  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1080  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1081  * this stripe never got written at or beyond this stripe offset yet. */
1082 static void handle_short_read(int nob_read, size_t page_count,
1083                               struct brw_page **pga)
1084 {
1085         char *ptr;
1086         int i = 0;
1087
1088         /* skip bytes read OK */
1089         while (nob_read > 0) {
1090                 LASSERT (page_count > 0);
1091
1092                 if (pga[i]->count > nob_read) {
1093                         /* EOF inside this page */
1094                         ptr = kmap(pga[i]->pg) +
1095                                 (pga[i]->off & ~PAGE_MASK);
1096                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1097                         kunmap(pga[i]->pg);
1098                         page_count--;
1099                         i++;
1100                         break;
1101                 }
1102
1103                 nob_read -= pga[i]->count;
1104                 page_count--;
1105                 i++;
1106         }
1107
1108         /* zero remaining pages */
1109         while (page_count-- > 0) {
1110                 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1111                 memset(ptr, 0, pga[i]->count);
1112                 kunmap(pga[i]->pg);
1113                 i++;
1114         }
1115 }
1116
1117 static int check_write_rcs(struct ptlrpc_request *req,
1118                            int requested_nob, int niocount,
1119                            size_t page_count, struct brw_page **pga)
1120 {
1121         int     i;
1122         __u32   *remote_rcs;
1123
1124         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1125                                                   sizeof(*remote_rcs) *
1126                                                   niocount);
1127         if (remote_rcs == NULL) {
1128                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1129                 return(-EPROTO);
1130         }
1131
1132         /* return error if any niobuf was in error */
1133         for (i = 0; i < niocount; i++) {
1134                 if ((int)remote_rcs[i] < 0) {
1135                         CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1136                                i, remote_rcs[i], req);
1137                         return remote_rcs[i];
1138                 }
1139
1140                 if (remote_rcs[i] != 0) {
1141                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1142                                 i, remote_rcs[i], req);
1143                         return(-EPROTO);
1144                 }
1145         }
1146         if (req->rq_bulk != NULL &&
1147             req->rq_bulk->bd_nob_transferred != requested_nob) {
1148                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1149                        req->rq_bulk->bd_nob_transferred, requested_nob);
1150                 return(-EPROTO);
1151         }
1152
1153         return (0);
1154 }
1155
1156 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1157 {
1158         if (p1->flag != p2->flag) {
1159                 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1160                                   OBD_BRW_SYNC       | OBD_BRW_ASYNC   |
1161                                   OBD_BRW_NOQUOTA    | OBD_BRW_SOFT_SYNC |
1162                                   OBD_BRW_SYS_RESOURCE);
1163
1164                 /* warn if we try to combine flags that we don't know to be
1165                  * safe to combine */
1166                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1167                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1168                               "report this at https://jira.whamcloud.com/\n",
1169                               p1->flag, p2->flag);
1170                 }
1171                 return 0;
1172         }
1173
1174         return (p1->off + p1->count == p2->off);
1175 }
1176
1177 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1178 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1179                                    size_t pg_count, struct brw_page **pga,
1180                                    int opc, obd_dif_csum_fn *fn,
1181                                    int sector_size,
1182                                    u32 *check_sum, bool resend)
1183 {
1184         struct ahash_request *req;
1185         /* Used Adler as the default checksum type on top of DIF tags */
1186         unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1187         struct page *__page;
1188         unsigned char *buffer;
1189         __u16 *guard_start;
1190         unsigned int bufsize;
1191         int guard_number;
1192         int used_number = 0;
1193         int used;
1194         u32 cksum;
1195         int rc = 0;
1196         int i = 0;
1197
1198         LASSERT(pg_count > 0);
1199
1200         __page = alloc_page(GFP_KERNEL);
1201         if (__page == NULL)
1202                 return -ENOMEM;
1203
1204         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1205         if (IS_ERR(req)) {
1206                 rc = PTR_ERR(req);
1207                 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1208                        obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1209                 GOTO(out, rc);
1210         }
1211
1212         buffer = kmap(__page);
1213         guard_start = (__u16 *)buffer;
1214         guard_number = PAGE_SIZE / sizeof(*guard_start);
1215         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1216                "GRD tags per page=%u, resend=%u, bytes=%u, pages=%zu\n",
1217                guard_number, resend, nob, pg_count);
1218
1219         while (nob > 0 && pg_count > 0) {
1220                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1221
1222                 /* corrupt the data before we compute the checksum, to
1223                  * simulate an OST->client data error */
1224                 if (unlikely(i == 0 && opc == OST_READ &&
1225                              OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1226                         unsigned char *ptr = kmap(pga[i]->pg);
1227                         int off = pga[i]->off & ~PAGE_MASK;
1228
1229                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1230                         kunmap(pga[i]->pg);
1231                 }
1232
1233                 /*
1234                  * The left guard number should be able to hold checksums of a
1235                  * whole page
1236                  */
1237                 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1238                                                   pga[i]->off & ~PAGE_MASK,
1239                                                   count,
1240                                                   guard_start + used_number,
1241                                                   guard_number - used_number,
1242                                                   &used, sector_size,
1243                                                   fn);
1244                 if (unlikely(resend))
1245                         CDEBUG(D_PAGE | D_HA,
1246                                "pga[%u]: used %u off %llu+%u gen checksum: %*phN\n",
1247                                i, used, pga[i]->off & ~PAGE_MASK, count,
1248                                (int)(used * sizeof(*guard_start)),
1249                                guard_start + used_number);
1250                 if (rc)
1251                         break;
1252
1253                 used_number += used;
1254                 if (used_number == guard_number) {
1255                         cfs_crypto_hash_update_page(req, __page, 0,
1256                                 used_number * sizeof(*guard_start));
1257                         used_number = 0;
1258                 }
1259
1260                 nob -= pga[i]->count;
1261                 pg_count--;
1262                 i++;
1263         }
1264         kunmap(__page);
1265         if (rc)
1266                 GOTO(out, rc);
1267
1268         if (used_number != 0)
1269                 cfs_crypto_hash_update_page(req, __page, 0,
1270                         used_number * sizeof(*guard_start));
1271
1272         bufsize = sizeof(cksum);
1273         cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1274
1275         /* For sending we only compute the wrong checksum instead
1276          * of corrupting the data so it is still correct on a redo */
1277         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1278                 cksum++;
1279
1280         *check_sum = cksum;
1281 out:
1282         __free_page(__page);
1283         return rc;
1284 }
1285 #else /* !CONFIG_CRC_T10DIF */
1286 #define obd_dif_ip_fn NULL
1287 #define obd_dif_crc_fn NULL
1288 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum, re) \
1289         -EOPNOTSUPP
1290 #endif /* CONFIG_CRC_T10DIF */
1291
1292 static int osc_checksum_bulk(int nob, size_t pg_count,
1293                              struct brw_page **pga, int opc,
1294                              enum cksum_types cksum_type,
1295                              u32 *cksum)
1296 {
1297         int                             i = 0;
1298         struct ahash_request           *req;
1299         unsigned int                    bufsize;
1300         unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
1301
1302         LASSERT(pg_count > 0);
1303
1304         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1305         if (IS_ERR(req)) {
1306                 CERROR("Unable to initialize checksum hash %s\n",
1307                        cfs_crypto_hash_name(cfs_alg));
1308                 return PTR_ERR(req);
1309         }
1310
1311         while (nob > 0 && pg_count > 0) {
1312                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1313
1314                 /* corrupt the data before we compute the checksum, to
1315                  * simulate an OST->client data error */
1316                 if (i == 0 && opc == OST_READ &&
1317                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1318                         unsigned char *ptr = kmap(pga[i]->pg);
1319                         int off = pga[i]->off & ~PAGE_MASK;
1320
1321                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1322                         kunmap(pga[i]->pg);
1323                 }
1324                 cfs_crypto_hash_update_page(req, pga[i]->pg,
1325                                             pga[i]->off & ~PAGE_MASK,
1326                                             count);
1327                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1328                                (int)(pga[i]->off & ~PAGE_MASK));
1329
1330                 nob -= pga[i]->count;
1331                 pg_count--;
1332                 i++;
1333         }
1334
1335         bufsize = sizeof(*cksum);
1336         cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1337
1338         /* For sending we only compute the wrong checksum instead
1339          * of corrupting the data so it is still correct on a redo */
1340         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1341                 (*cksum)++;
1342
1343         return 0;
1344 }
1345
1346 static int osc_checksum_bulk_rw(const char *obd_name,
1347                                 enum cksum_types cksum_type,
1348                                 int nob, size_t pg_count,
1349                                 struct brw_page **pga, int opc,
1350                                 u32 *check_sum, bool resend)
1351 {
1352         obd_dif_csum_fn *fn = NULL;
1353         int sector_size = 0;
1354         int rc;
1355
1356         ENTRY;
1357         obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
1358
1359         if (fn)
1360                 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1361                                              opc, fn, sector_size, check_sum,
1362                                              resend);
1363         else
1364                 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1365                                        check_sum);
1366
1367         RETURN(rc);
1368 }
1369
1370 static inline void osc_release_bounce_pages(struct brw_page **pga,
1371                                             u32 page_count)
1372 {
1373 #ifdef HAVE_LUSTRE_CRYPTO
1374         int i;
1375
1376         for (i = 0; i < page_count; i++) {
1377                 /* Bounce pages allocated by a call to
1378                  * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1379                  * are identified thanks to the PageChecked flag.
1380                  */
1381                 if (PageChecked(pga[i]->pg))
1382                         llcrypt_finalize_bounce_page(&pga[i]->pg);
1383                 pga[i]->count -= pga[i]->bp_count_diff;
1384                 pga[i]->off += pga[i]->bp_off_diff;
1385         }
1386 #endif
1387 }
1388
1389 static int
1390 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1391                      u32 page_count, struct brw_page **pga,
1392                      struct ptlrpc_request **reqp, int resend)
1393 {
1394         struct ptlrpc_request *req;
1395         struct ptlrpc_bulk_desc *desc;
1396         struct ost_body *body;
1397         struct obd_ioobj *ioobj;
1398         struct niobuf_remote *niobuf;
1399         int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1400         struct osc_brw_async_args *aa;
1401         struct req_capsule *pill;
1402         struct brw_page *pg_prev;
1403         void *short_io_buf;
1404         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1405         struct inode *inode = NULL;
1406         bool directio = false;
1407         bool enable_checksum = true;
1408
1409         ENTRY;
1410         if (pga[0]->pg) {
1411                 inode = page2inode(pga[0]->pg);
1412                 if (inode == NULL) {
1413                         /* Try to get reference to inode from cl_page if we are
1414                          * dealing with direct IO, as handled pages are not
1415                          * actual page cache pages.
1416                          */
1417                         struct osc_async_page *oap = brw_page2oap(pga[0]);
1418                         struct cl_page *clpage = oap2cl_page(oap);
1419
1420                         inode = clpage->cp_inode;
1421                         if (inode)
1422                                 directio = true;
1423                 }
1424         }
1425         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1426                 RETURN(-ENOMEM); /* Recoverable */
1427         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1428                 RETURN(-EINVAL); /* Fatal */
1429
1430         if ((cmd & OBD_BRW_WRITE) != 0) {
1431                 opc = OST_WRITE;
1432                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1433                                                 osc_rq_pool,
1434                                                 &RQF_OST_BRW_WRITE);
1435         } else {
1436                 opc = OST_READ;
1437                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1438         }
1439         if (req == NULL)
1440                 RETURN(-ENOMEM);
1441
1442         if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1443                 for (i = 0; i < page_count; i++) {
1444                         struct brw_page *pg = pga[i];
1445                         struct page *data_page = NULL;
1446                         bool retried = false;
1447                         bool lockedbymyself;
1448                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1449                         struct address_space *map_orig = NULL;
1450                         pgoff_t index_orig;
1451
1452 retry_encrypt:
1453                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1454                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1455                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1456                         /* The page can already be locked when we arrive here.
1457                          * This is possible when cl_page_assume/vvp_page_assume
1458                          * is stuck on wait_on_page_writeback with page lock
1459                          * held. In this case there is no risk for the lock to
1460                          * be released while we are doing our encryption
1461                          * processing, because writeback against that page will
1462                          * end in vvp_page_completion_write/cl_page_completion,
1463                          * which means only once the page is fully processed.
1464                          */
1465                         lockedbymyself = trylock_page(pg->pg);
1466                         if (directio) {
1467                                 map_orig = pg->pg->mapping;
1468                                 pg->pg->mapping = inode->i_mapping;
1469                                 index_orig = pg->pg->index;
1470                                 pg->pg->index = pg->off >> PAGE_SHIFT;
1471                         }
1472                         data_page =
1473                                 llcrypt_encrypt_pagecache_blocks(pg->pg,
1474                                                                  nunits, 0,
1475                                                                  GFP_NOFS);
1476                         if (directio) {
1477                                 pg->pg->mapping = map_orig;
1478                                 pg->pg->index = index_orig;
1479                         }
1480                         if (lockedbymyself)
1481                                 unlock_page(pg->pg);
1482                         if (IS_ERR(data_page)) {
1483                                 rc = PTR_ERR(data_page);
1484                                 if (rc == -ENOMEM && !retried) {
1485                                         retried = true;
1486                                         rc = 0;
1487                                         goto retry_encrypt;
1488                                 }
1489                                 ptlrpc_request_free(req);
1490                                 RETURN(rc);
1491                         }
1492                         /* Set PageChecked flag on bounce page for
1493                          * disambiguation in osc_release_bounce_pages().
1494                          */
1495                         SetPageChecked(data_page);
1496                         pg->pg = data_page;
1497                         /* there should be no gap in the middle of page array */
1498                         if (i == page_count - 1) {
1499                                 struct osc_async_page *oap = brw_page2oap(pg);
1500
1501                                 oa->o_size = oap->oap_count +
1502                                         oap->oap_obj_off + oap->oap_page_off;
1503                         }
1504                         /* len is forced to nunits, and relative offset to 0
1505                          * so store the old, clear text info
1506                          */
1507                         pg->bp_count_diff = nunits - pg->count;
1508                         pg->count = nunits;
1509                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1510                         pg->off = pg->off & PAGE_MASK;
1511                 }
1512         } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
1513                 for (i = 0; i < page_count; i++) {
1514                         struct brw_page *pg = pga[i];
1515                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1516
1517                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1518                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1519                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1520                         /* count/off are forced to cover the whole encryption
1521                          * unit size so that all encrypted data is stored on the
1522                          * OST, so adjust bp_{count,off}_diff for the size of
1523                          * the clear text.
1524                          */
1525                         pg->bp_count_diff = nunits - pg->count;
1526                         pg->count = nunits;
1527                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1528                         pg->off = pg->off & PAGE_MASK;
1529                 }
1530         }
1531
1532         for (niocount = i = 1; i < page_count; i++) {
1533                 if (!can_merge_pages(pga[i - 1], pga[i]))
1534                         niocount++;
1535         }
1536
1537         pill = &req->rq_pill;
1538         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1539                              sizeof(*ioobj));
1540         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1541                              niocount * sizeof(*niobuf));
1542
1543         for (i = 0; i < page_count; i++) {
1544                 short_io_size += pga[i]->count;
1545                 if (!inode || !IS_ENCRYPTED(inode)) {
1546                         pga[i]->bp_count_diff = 0;
1547                         pga[i]->bp_off_diff = 0;
1548                 }
1549         }
1550
1551         if (lnet_is_rdma_only_page(pga[0]->pg)) {
1552                 enable_checksum = false;
1553                 short_io_size = 0;
1554         }
1555
1556         /* Check if read/write is small enough to be a short io. */
1557         if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1558             !imp_connect_shortio(cli->cl_import))
1559                 short_io_size = 0;
1560
1561         /* If this is an empty RPC to old server, just ignore it */
1562         if (!short_io_size && !pga[0]->pg) {
1563                 ptlrpc_request_free(req);
1564                 RETURN(-ENODATA);
1565         }
1566
1567         req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1568                              opc == OST_READ ? 0 : short_io_size);
1569         if (opc == OST_READ)
1570                 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1571                                      short_io_size);
1572
1573         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1574         if (rc) {
1575                 ptlrpc_request_free(req);
1576                 RETURN(rc);
1577         }
1578         osc_set_io_portal(req);
1579
1580         ptlrpc_at_set_req_timeout(req);
1581         /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1582          * retry logic */
1583         req->rq_no_retry_einprogress = 1;
1584
1585         if (short_io_size != 0) {
1586                 desc = NULL;
1587                 short_io_buf = NULL;
1588                 goto no_bulk;
1589         }
1590
1591         desc = ptlrpc_prep_bulk_imp(req, page_count,
1592                 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1593                 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1594                         PTLRPC_BULK_PUT_SINK),
1595                 OST_BULK_PORTAL,
1596                 &ptlrpc_bulk_kiov_pin_ops);
1597
1598         if (desc == NULL)
1599                 GOTO(out, rc = -ENOMEM);
1600         /* NB request now owns desc and will free it when it gets freed */
1601 no_bulk:
1602         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1603         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1604         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1605         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1606
1607         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1608
1609         /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1610          * and from_kgid(), because they are asynchronous. Fortunately, variable
1611          * oa contains valid o_uid and o_gid in these two operations.
1612          * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1613          * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1614          * other process logic */
1615         body->oa.o_uid = oa->o_uid;
1616         body->oa.o_gid = oa->o_gid;
1617
1618         obdo_to_ioobj(oa, ioobj);
1619         ioobj->ioo_bufcnt = niocount;
1620         /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1621          * that might be send for this request.  The actual number is decided
1622          * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1623          * "max - 1" for old client compatibility sending "0", and also so the
1624          * the actual maximum is a power-of-two number, not one less. LU-1431 */
1625         if (desc != NULL)
1626                 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1627         else /* short io */
1628                 ioobj_max_brw_set(ioobj, 0);
1629
1630         if (short_io_size != 0) {
1631                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1632                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1633                         body->oa.o_flags = 0;
1634                 }
1635                 body->oa.o_flags |= OBD_FL_SHORT_IO;
1636                 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1637                        short_io_size);
1638                 if (opc == OST_WRITE) {
1639                         short_io_buf = req_capsule_client_get(pill,
1640                                                               &RMF_SHORT_IO);
1641                         LASSERT(short_io_buf != NULL);
1642                 }
1643         }
1644
1645         LASSERT(page_count > 0);
1646         pg_prev = pga[0];
1647         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1648                 struct brw_page *pg = pga[i];
1649                 int poff = pg->off & ~PAGE_MASK;
1650
1651                 LASSERT(pg->count > 0);
1652                 /* make sure there is no gap in the middle of page array */
1653                 LASSERTF(page_count == 1 ||
1654                          (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1655                           ergo(i > 0 && i < page_count - 1,
1656                                poff == 0 && pg->count == PAGE_SIZE)   &&
1657                           ergo(i == page_count - 1, poff == 0)),
1658                          "i: %d/%d pg: %p off: %llu, count: %u\n",
1659                          i, page_count, pg, pg->off, pg->count);
1660                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1661                          "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1662                          " prev_pg %p [pri %lu ind %lu] off %llu\n",
1663                          i, page_count,
1664                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1665                          pg_prev->pg, page_private(pg_prev->pg),
1666                          pg_prev->pg->index, pg_prev->off);
1667                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1668                         (pg->flag & OBD_BRW_SRVLOCK));
1669                 if (short_io_size != 0 && opc == OST_WRITE) {
1670                         unsigned char *ptr = kmap_atomic(pg->pg);
1671
1672                         LASSERT(short_io_size >= requested_nob + pg->count);
1673                         memcpy(short_io_buf + requested_nob,
1674                                ptr + poff,
1675                                pg->count);
1676                         kunmap_atomic(ptr);
1677                 } else if (short_io_size == 0) {
1678                         desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1679                                                          pg->count);
1680                 }
1681                 requested_nob += pg->count;
1682
1683                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1684                         niobuf--;
1685                         niobuf->rnb_len += pg->count;
1686                 } else {
1687                         niobuf->rnb_offset = pg->off;
1688                         niobuf->rnb_len    = pg->count;
1689                         niobuf->rnb_flags  = pg->flag;
1690                 }
1691                 pg_prev = pg;
1692         }
1693
1694         LASSERTF((void *)(niobuf - niocount) ==
1695                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1696                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1697                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1698
1699         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1700         if (resend) {
1701                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1702                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1703                         body->oa.o_flags = 0;
1704                 }
1705                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1706         }
1707
1708         if (osc_should_shrink_grant(cli))
1709                 osc_shrink_grant_local(cli, &body->oa);
1710
1711         if (!cli->cl_checksum || sptlrpc_flavor_has_bulk(&req->rq_flvr))
1712                 enable_checksum = false;
1713
1714         /* size[REQ_REC_OFF] still sizeof (*body) */
1715         if (opc == OST_WRITE) {
1716                 if (enable_checksum) {
1717                         /* store cl_cksum_type in a local variable since
1718                          * it can be changed via lprocfs */
1719                         enum cksum_types cksum_type = cli->cl_cksum_type;
1720
1721                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1722                                 body->oa.o_flags = 0;
1723
1724                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1725                                                                 cksum_type);
1726                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1727
1728                         rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1729                                                   requested_nob, page_count,
1730                                                   pga, OST_WRITE,
1731                                                   &body->oa.o_cksum, resend);
1732                         if (rc < 0) {
1733                                 CDEBUG(D_PAGE, "failed to checksum: rc = %d\n",
1734                                        rc);
1735                                 GOTO(out, rc);
1736                         }
1737                         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1738                                "checksum at write origin: %x (%x)\n",
1739                                body->oa.o_cksum, cksum_type);
1740
1741                         /* save this in 'oa', too, for later checking */
1742                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1743                         oa->o_flags |= obd_cksum_type_pack(obd_name,
1744                                                            cksum_type);
1745                 } else {
1746                         /* clear out the checksum flag, in case this is a
1747                          * resend but cl_checksum is no longer set. b=11238 */
1748                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1749                 }
1750                 oa->o_cksum = body->oa.o_cksum;
1751                 /* 1 RC per niobuf */
1752                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1753                                      sizeof(__u32) * niocount);
1754         } else {
1755                 if (enable_checksum) {
1756                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1757                                 body->oa.o_flags = 0;
1758                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1759                                 cli->cl_cksum_type);
1760                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1761                 }
1762
1763                 /* Client cksum has been already copied to wire obdo in previous
1764                  * lustre_set_wire_obdo(), and in the case a bulk-read is being
1765                  * resent due to cksum error, this will allow Server to
1766                  * check+dump pages on its side */
1767         }
1768         ptlrpc_request_set_replen(req);
1769
1770         aa = ptlrpc_req_async_args(aa, req);
1771         aa->aa_oa = oa;
1772         aa->aa_requested_nob = requested_nob;
1773         aa->aa_nio_count = niocount;
1774         aa->aa_page_count = page_count;
1775         aa->aa_resends = 0;
1776         aa->aa_ppga = pga;
1777         aa->aa_cli = cli;
1778         INIT_LIST_HEAD(&aa->aa_oaps);
1779
1780         *reqp = req;
1781         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1782         CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1783                 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1784                 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1785         RETURN(0);
1786
1787  out:
1788         ptlrpc_req_finished(req);
1789         RETURN(rc);
1790 }
1791
1792 char dbgcksum_file_name[PATH_MAX];
1793
1794 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1795                                 struct brw_page **pga, __u32 server_cksum,
1796                                 __u32 client_cksum)
1797 {
1798         struct file *filp;
1799         int rc, i;
1800         unsigned int len;
1801         char *buf;
1802
1803         /* will only keep dump of pages on first error for the same range in
1804          * file/fid, not during the resends/retries. */
1805         snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1806                  "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1807                  (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1808                   libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1809                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1810                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1811                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1812                  pga[0]->off,
1813                  pga[page_count-1]->off + pga[page_count-1]->count - 1,
1814                  client_cksum, server_cksum);
1815         CWARN("dumping checksum data to %s\n", dbgcksum_file_name);
1816         filp = filp_open(dbgcksum_file_name,
1817                          O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1818         if (IS_ERR(filp)) {
1819                 rc = PTR_ERR(filp);
1820                 if (rc == -EEXIST)
1821                         CDEBUG(D_INFO, "%s: can't open to dump pages with "
1822                                "checksum error: rc = %d\n", dbgcksum_file_name,
1823                                rc);
1824                 else
1825                         CERROR("%s: can't open to dump pages with checksum "
1826                                "error: rc = %d\n", dbgcksum_file_name, rc);
1827                 return;
1828         }
1829
1830         for (i = 0; i < page_count; i++) {
1831                 len = pga[i]->count;
1832                 buf = kmap(pga[i]->pg);
1833                 while (len != 0) {
1834                         rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1835                         if (rc < 0) {
1836                                 CERROR("%s: wanted to write %u but got %d "
1837                                        "error\n", dbgcksum_file_name, len, rc);
1838                                 break;
1839                         }
1840                         len -= rc;
1841                         buf += rc;
1842                 }
1843                 kunmap(pga[i]->pg);
1844         }
1845
1846         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1847         if (rc)
1848                 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1849         filp_close(filp, NULL);
1850
1851         libcfs_debug_dumplog();
1852 }
1853
1854 static int
1855 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1856                      __u32 client_cksum, __u32 server_cksum,
1857                      struct osc_brw_async_args *aa)
1858 {
1859         const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1860         enum cksum_types cksum_type;
1861         obd_dif_csum_fn *fn = NULL;
1862         int sector_size = 0;
1863         __u32 new_cksum;
1864         char *msg;
1865         int rc;
1866
1867         if (server_cksum == client_cksum) {
1868                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1869                 return 0;
1870         }
1871
1872         if (aa->aa_cli->cl_checksum_dump)
1873                 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1874                                     server_cksum, client_cksum);
1875
1876         cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1877                                            oa->o_flags : 0);
1878
1879         switch (cksum_type) {
1880         case OBD_CKSUM_T10IP512:
1881                 fn = obd_dif_ip_fn;
1882                 sector_size = 512;
1883                 break;
1884         case OBD_CKSUM_T10IP4K:
1885                 fn = obd_dif_ip_fn;
1886                 sector_size = 4096;
1887                 break;
1888         case OBD_CKSUM_T10CRC512:
1889                 fn = obd_dif_crc_fn;
1890                 sector_size = 512;
1891                 break;
1892         case OBD_CKSUM_T10CRC4K:
1893                 fn = obd_dif_crc_fn;
1894                 sector_size = 4096;
1895                 break;
1896         default:
1897                 break;
1898         }
1899
1900         if (fn)
1901                 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1902                                              aa->aa_page_count, aa->aa_ppga,
1903                                              OST_WRITE, fn, sector_size,
1904                                              &new_cksum, true);
1905         else
1906                 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1907                                        aa->aa_ppga, OST_WRITE, cksum_type,
1908                                        &new_cksum);
1909
1910         if (rc < 0)
1911                 msg = "failed to calculate the client write checksum";
1912         else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1913                 msg = "the server did not use the checksum type specified in "
1914                       "the original request - likely a protocol problem";
1915         else if (new_cksum == server_cksum)
1916                 msg = "changed on the client after we checksummed it - "
1917                       "likely false positive due to mmap IO (bug 11742)";
1918         else if (new_cksum == client_cksum)
1919                 msg = "changed in transit before arrival at OST";
1920         else
1921                 msg = "changed in transit AND doesn't match the original - "
1922                       "likely false positive due to mmap IO (bug 11742)";
1923
1924         LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1925                            DFID " object "DOSTID" extent [%llu-%llu], original "
1926                            "client csum %x (type %x), server csum %x (type %x),"
1927                            " client csum now %x\n",
1928                            obd_name, msg, libcfs_nid2str(peer->nid),
1929                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1930                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1931                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1932                            POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1933                            aa->aa_ppga[aa->aa_page_count - 1]->off +
1934                                 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1935                            client_cksum,
1936                            obd_cksum_type_unpack(aa->aa_oa->o_flags),
1937                            server_cksum, cksum_type, new_cksum);
1938         return 1;
1939 }
1940
1941 /* Note rc enters this function as number of bytes transferred */
1942 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1943 {
1944         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1945         struct client_obd *cli = aa->aa_cli;
1946         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1947         const struct lnet_process_id *peer =
1948                 &req->rq_import->imp_connection->c_peer;
1949         struct ost_body *body;
1950         u32 client_cksum = 0;
1951         struct inode *inode;
1952         unsigned int blockbits = 0, blocksize = 0;
1953
1954         ENTRY;
1955
1956         if (rc < 0 && rc != -EDQUOT) {
1957                 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1958                 RETURN(rc);
1959         }
1960
1961         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1962         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1963         if (body == NULL) {
1964                 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1965                 RETURN(-EPROTO);
1966         }
1967
1968         /* set/clear over quota flag for a uid/gid/projid */
1969         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1970             body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1971                 unsigned qid[LL_MAXQUOTAS] = {
1972                                          body->oa.o_uid, body->oa.o_gid,
1973                                          body->oa.o_projid };
1974                 CDEBUG(D_QUOTA,
1975                        "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1976                        body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1977                        body->oa.o_valid, body->oa.o_flags);
1978                 osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1979                                 body->oa.o_flags);
1980         }
1981
1982         osc_update_grant(cli, body);
1983
1984         if (rc < 0)
1985                 RETURN(rc);
1986
1987         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1988                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1989
1990         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1991                 if (rc > 0) {
1992                         CERROR("%s: unexpected positive size %d\n",
1993                                obd_name, rc);
1994                         RETURN(-EPROTO);
1995                 }
1996
1997                 if (req->rq_bulk != NULL &&
1998                     sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1999                         RETURN(-EAGAIN);
2000
2001                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
2002                     check_write_checksum(&body->oa, peer, client_cksum,
2003                                          body->oa.o_cksum, aa))
2004                         RETURN(-EAGAIN);
2005
2006                 rc = check_write_rcs(req, aa->aa_requested_nob,
2007                                      aa->aa_nio_count, aa->aa_page_count,
2008                                      aa->aa_ppga);
2009                 GOTO(out, rc);
2010         }
2011
2012         /* The rest of this function executes only for OST_READs */
2013
2014         if (req->rq_bulk == NULL) {
2015                 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
2016                                           RCL_SERVER);
2017                 LASSERT(rc == req->rq_status);
2018         } else {
2019                 /* if unwrap_bulk failed, return -EAGAIN to retry */
2020                 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
2021         }
2022         if (rc < 0)
2023                 GOTO(out, rc = -EAGAIN);
2024
2025         if (rc > aa->aa_requested_nob) {
2026                 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
2027                        rc, aa->aa_requested_nob);
2028                 RETURN(-EPROTO);
2029         }
2030
2031         if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2032                 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2033                        rc, req->rq_bulk->bd_nob_transferred);
2034                 RETURN(-EPROTO);
2035         }
2036
2037         if (req->rq_bulk == NULL) {
2038                 /* short io */
2039                 int nob, pg_count, i = 0;
2040                 unsigned char *buf;
2041
2042                 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2043                 pg_count = aa->aa_page_count;
2044                 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2045                                                    rc);
2046                 nob = rc;
2047                 while (nob > 0 && pg_count > 0) {
2048                         unsigned char *ptr;
2049                         int count = aa->aa_ppga[i]->count > nob ?
2050                                     nob : aa->aa_ppga[i]->count;
2051
2052                         CDEBUG(D_CACHE, "page %p count %d\n",
2053                                aa->aa_ppga[i]->pg, count);
2054                         ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2055                         memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2056                                count);
2057                         kunmap_atomic((void *) ptr);
2058
2059                         buf += count;
2060                         nob -= count;
2061                         i++;
2062                         pg_count--;
2063                 }
2064         }
2065
2066         if (rc < aa->aa_requested_nob)
2067                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2068
2069         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2070                 static int cksum_counter;
2071                 u32 server_cksum = body->oa.o_cksum;
2072                 int nob = rc;
2073                 char *via = "";
2074                 char *router = "";
2075                 enum cksum_types cksum_type;
2076                 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2077                         body->oa.o_flags : 0;
2078
2079                 cksum_type = obd_cksum_type_unpack(o_flags);
2080                 rc = osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2081                                           aa->aa_page_count, aa->aa_ppga,
2082                                           OST_READ, &client_cksum, false);
2083                 if (rc < 0)
2084                         GOTO(out, rc);
2085
2086                 if (req->rq_bulk != NULL &&
2087                     peer->nid != req->rq_bulk->bd_sender) {
2088                         via = " via ";
2089                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
2090                 }
2091
2092                 if (server_cksum != client_cksum) {
2093                         struct ost_body *clbody;
2094                         __u32 client_cksum2;
2095                         u32 page_count = aa->aa_page_count;
2096
2097                         osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2098                                              page_count, aa->aa_ppga,
2099                                              OST_READ, &client_cksum2, true);
2100                         clbody = req_capsule_client_get(&req->rq_pill,
2101                                                         &RMF_OST_BODY);
2102                         if (cli->cl_checksum_dump)
2103                                 dump_all_bulk_pages(&clbody->oa, page_count,
2104                                                     aa->aa_ppga, server_cksum,
2105                                                     client_cksum);
2106
2107                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2108                                            "%s%s%s inode "DFID" object "DOSTID
2109                                            " extent [%llu-%llu], client %x/%x, "
2110                                            "server %x, cksum_type %x\n",
2111                                            obd_name,
2112                                            libcfs_nid2str(peer->nid),
2113                                            via, router,
2114                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2115                                                 clbody->oa.o_parent_seq : 0ULL,
2116                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2117                                                 clbody->oa.o_parent_oid : 0,
2118                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2119                                                 clbody->oa.o_parent_ver : 0,
2120                                            POSTID(&body->oa.o_oi),
2121                                            aa->aa_ppga[0]->off,
2122                                            aa->aa_ppga[page_count-1]->off +
2123                                            aa->aa_ppga[page_count-1]->count - 1,
2124                                            client_cksum, client_cksum2,
2125                                            server_cksum, cksum_type);
2126                         cksum_counter = 0;
2127                         aa->aa_oa->o_cksum = client_cksum;
2128                         rc = -EAGAIN;
2129                 } else {
2130                         cksum_counter++;
2131                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2132                         rc = 0;
2133                 }
2134         } else if (unlikely(client_cksum)) {
2135                 static int cksum_missed;
2136
2137                 cksum_missed++;
2138                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2139                         CERROR("%s: checksum %u requested from %s but not sent\n",
2140                                obd_name, cksum_missed,
2141                                libcfs_nid2str(peer->nid));
2142         } else {
2143                 rc = 0;
2144         }
2145
2146         inode = page2inode(aa->aa_ppga[0]->pg);
2147         if (inode == NULL) {
2148                 /* Try to get reference to inode from cl_page if we are
2149                  * dealing with direct IO, as handled pages are not
2150                  * actual page cache pages.
2151                  */
2152                 struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
2153
2154                 inode = oap2cl_page(oap)->cp_inode;
2155                 if (inode) {
2156                         blockbits = inode->i_blkbits;
2157                         blocksize = 1 << blockbits;
2158                 }
2159         }
2160         if (inode && IS_ENCRYPTED(inode)) {
2161                 int idx;
2162
2163                 if (!llcrypt_has_encryption_key(inode)) {
2164                         CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2165                         GOTO(out, rc);
2166                 }
2167                 for (idx = 0; idx < aa->aa_page_count; idx++) {
2168                         struct brw_page *pg = aa->aa_ppga[idx];
2169                         unsigned int offs = 0;
2170
2171                         while (offs < PAGE_SIZE) {
2172                                 /* do not decrypt if page is all 0s */
2173                                 if (memchr_inv(page_address(pg->pg) + offs, 0,
2174                                          LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2175                                         /* if page is empty forward info to
2176                                          * upper layers (ll_io_zero_page) by
2177                                          * clearing PagePrivate2
2178                                          */
2179                                         if (!offs)
2180                                                 ClearPagePrivate2(pg->pg);
2181                                         break;
2182                                 }
2183
2184                                 if (blockbits) {
2185                                         /* This is direct IO case. Directly call
2186                                          * decrypt function that takes inode as
2187                                          * input parameter. Page does not need
2188                                          * to be locked.
2189                                          */
2190                                         u64 lblk_num =
2191                                                 ((u64)(pg->off >> PAGE_SHIFT) <<
2192                                                      (PAGE_SHIFT - blockbits)) +
2193                                                        (offs >> blockbits);
2194                                         unsigned int i;
2195
2196                                         for (i = offs;
2197                                              i < offs +
2198                                                     LUSTRE_ENCRYPTION_UNIT_SIZE;
2199                                              i += blocksize, lblk_num++) {
2200                                                 rc =
2201                                                   llcrypt_decrypt_block_inplace(
2202                                                           inode, pg->pg,
2203                                                           blocksize, i,
2204                                                           lblk_num);
2205                                                 if (rc)
2206                                                         break;
2207                                         }
2208                                 } else {
2209                                         rc = llcrypt_decrypt_pagecache_blocks(
2210                                                 pg->pg,
2211                                                 LUSTRE_ENCRYPTION_UNIT_SIZE,
2212                                                 offs);
2213                                 }
2214                                 if (rc)
2215                                         GOTO(out, rc);
2216
2217                                 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2218                         }
2219                 }
2220         }
2221
2222 out:
2223         if (rc >= 0)
2224                 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2225                                      aa->aa_oa, &body->oa);
2226
2227         RETURN(rc);
2228 }
2229
2230 static int osc_brw_redo_request(struct ptlrpc_request *request,
2231                                 struct osc_brw_async_args *aa, int rc)
2232 {
2233         struct ptlrpc_request *new_req;
2234         struct osc_brw_async_args *new_aa;
2235         struct osc_async_page *oap;
2236         ENTRY;
2237
2238         /* The below message is checked in replay-ost-single.sh test_8ae*/
2239         DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2240                   "redo for recoverable error %d", rc);
2241
2242         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2243                                 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2244                                   aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2245                                   aa->aa_ppga, &new_req, 1);
2246         if (rc)
2247                 RETURN(rc);
2248
2249         list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2250                 if (oap->oap_request != NULL) {
2251                         LASSERTF(request == oap->oap_request,
2252                                  "request %p != oap_request %p\n",
2253                                  request, oap->oap_request);
2254                 }
2255         }
2256         /*
2257          * New request takes over pga and oaps from old request.
2258          * Note that copying a list_head doesn't work, need to move it...
2259          */
2260         aa->aa_resends++;
2261         new_req->rq_interpret_reply = request->rq_interpret_reply;
2262         new_req->rq_async_args = request->rq_async_args;
2263         new_req->rq_commit_cb = request->rq_commit_cb;
2264         /* cap resend delay to the current request timeout, this is similar to
2265          * what ptlrpc does (see after_reply()) */
2266         if (aa->aa_resends > new_req->rq_timeout)
2267                 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2268         else
2269                 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2270         new_req->rq_generation_set = 1;
2271         new_req->rq_import_generation = request->rq_import_generation;
2272
2273         new_aa = ptlrpc_req_async_args(new_aa, new_req);
2274
2275         INIT_LIST_HEAD(&new_aa->aa_oaps);
2276         list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2277         INIT_LIST_HEAD(&new_aa->aa_exts);
2278         list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2279         new_aa->aa_resends = aa->aa_resends;
2280
2281         list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2282                 if (oap->oap_request) {
2283                         ptlrpc_req_finished(oap->oap_request);
2284                         oap->oap_request = ptlrpc_request_addref(new_req);
2285                 }
2286         }
2287
2288         /* XXX: This code will run into problem if we're going to support
2289          * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2290          * and wait for all of them to be finished. We should inherit request
2291          * set from old request. */
2292         ptlrpcd_add_req(new_req);
2293
2294         DEBUG_REQ(D_INFO, new_req, "new request");
2295         RETURN(0);
2296 }
2297
2298 /*
2299  * ugh, we want disk allocation on the target to happen in offset order.  we'll
2300  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2301  * fine for our small page arrays and doesn't require allocation.  its an
2302  * insertion sort that swaps elements that are strides apart, shrinking the
2303  * stride down until its '1' and the array is sorted.
2304  */
2305 static void sort_brw_pages(struct brw_page **array, int num)
2306 {
2307         int stride, i, j;
2308         struct brw_page *tmp;
2309
2310         if (num == 1)
2311                 return;
2312         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2313                 ;
2314
2315         do {
2316                 stride /= 3;
2317                 for (i = stride ; i < num ; i++) {
2318                         tmp = array[i];
2319                         j = i;
2320                         while (j >= stride && array[j - stride]->off > tmp->off) {
2321                                 array[j] = array[j - stride];
2322                                 j -= stride;
2323                         }
2324                         array[j] = tmp;
2325                 }
2326         } while (stride > 1);
2327 }
2328
2329 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2330 {
2331         LASSERT(ppga != NULL);
2332         OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2333 }
2334
2335 static int brw_interpret(const struct lu_env *env,
2336                          struct ptlrpc_request *req, void *args, int rc)
2337 {
2338         struct osc_brw_async_args *aa = args;
2339         struct osc_extent *ext;
2340         struct osc_extent *tmp;
2341         struct client_obd *cli = aa->aa_cli;
2342         unsigned long transferred = 0;
2343
2344         ENTRY;
2345
2346         rc = osc_brw_fini_request(req, rc);
2347         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2348
2349         /* restore clear text pages */
2350         osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2351
2352         /*
2353          * When server returns -EINPROGRESS, client should always retry
2354          * regardless of the number of times the bulk was resent already.
2355          */
2356         if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2357                 if (req->rq_import_generation !=
2358                     req->rq_import->imp_generation) {
2359                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2360                                ""DOSTID", rc = %d.\n",
2361                                req->rq_import->imp_obd->obd_name,
2362                                POSTID(&aa->aa_oa->o_oi), rc);
2363                 } else if (rc == -EINPROGRESS ||
2364                            client_should_resend(aa->aa_resends, aa->aa_cli)) {
2365                         rc = osc_brw_redo_request(req, aa, rc);
2366                 } else {
2367                         CERROR("%s: too many resent retries for object: "
2368                                "%llu:%llu, rc = %d.\n",
2369                                req->rq_import->imp_obd->obd_name,
2370                                POSTID(&aa->aa_oa->o_oi), rc);
2371                 }
2372
2373                 if (rc == 0)
2374                         RETURN(0);
2375                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2376                         rc = -EIO;
2377         }
2378
2379         if (rc == 0) {
2380                 struct obdo *oa = aa->aa_oa;
2381                 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2382                 unsigned long valid = 0;
2383                 struct cl_object *obj;
2384                 struct osc_async_page *last;
2385
2386                 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2387                 obj = osc2cl(last->oap_obj);
2388
2389                 cl_object_attr_lock(obj);
2390                 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2391                         attr->cat_blocks = oa->o_blocks;
2392                         valid |= CAT_BLOCKS;
2393                 }
2394                 if (oa->o_valid & OBD_MD_FLMTIME) {
2395                         attr->cat_mtime = oa->o_mtime;
2396                         valid |= CAT_MTIME;
2397                 }
2398                 if (oa->o_valid & OBD_MD_FLATIME) {
2399                         attr->cat_atime = oa->o_atime;
2400                         valid |= CAT_ATIME;
2401                 }
2402                 if (oa->o_valid & OBD_MD_FLCTIME) {
2403                         attr->cat_ctime = oa->o_ctime;
2404                         valid |= CAT_CTIME;
2405                 }
2406
2407                 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2408                         struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2409                         loff_t last_off = last->oap_count + last->oap_obj_off +
2410                                 last->oap_page_off;
2411
2412                         /* Change file size if this is an out of quota or
2413                          * direct IO write and it extends the file size */
2414                         if (loi->loi_lvb.lvb_size < last_off) {
2415                                 attr->cat_size = last_off;
2416                                 valid |= CAT_SIZE;
2417                         }
2418                         /* Extend KMS if it's not a lockless write */
2419                         if (loi->loi_kms < last_off &&
2420                             oap2osc_page(last)->ops_srvlock == 0) {
2421                                 attr->cat_kms = last_off;
2422                                 valid |= CAT_KMS;
2423                         }
2424                 }
2425
2426                 if (valid != 0)
2427                         cl_object_attr_update(env, obj, attr, valid);
2428                 cl_object_attr_unlock(obj);
2429         }
2430         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2431         aa->aa_oa = NULL;
2432
2433         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2434                 osc_inc_unstable_pages(req);
2435
2436         list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2437                 list_del_init(&ext->oe_link);
2438                 osc_extent_finish(env, ext, 1,
2439                                   rc && req->rq_no_delay ? -EAGAIN : rc);
2440         }
2441         LASSERT(list_empty(&aa->aa_exts));
2442         LASSERT(list_empty(&aa->aa_oaps));
2443
2444         transferred = (req->rq_bulk == NULL ? /* short io */
2445                        aa->aa_requested_nob :
2446                        req->rq_bulk->bd_nob_transferred);
2447
2448         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2449         ptlrpc_lprocfs_brw(req, transferred);
2450
2451         spin_lock(&cli->cl_loi_list_lock);
2452         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2453          * is called so we know whether to go to sync BRWs or wait for more
2454          * RPCs to complete */
2455         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2456                 cli->cl_w_in_flight--;
2457         else
2458                 cli->cl_r_in_flight--;
2459         osc_wake_cache_waiters(cli);
2460         spin_unlock(&cli->cl_loi_list_lock);
2461
2462         osc_io_unplug(env, cli, NULL);
2463         RETURN(rc);
2464 }
2465
2466 static void brw_commit(struct ptlrpc_request *req)
2467 {
2468         /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2469          * this called via the rq_commit_cb, I need to ensure
2470          * osc_dec_unstable_pages is still called. Otherwise unstable
2471          * pages may be leaked. */
2472         spin_lock(&req->rq_lock);
2473         if (likely(req->rq_unstable)) {
2474                 req->rq_unstable = 0;
2475                 spin_unlock(&req->rq_lock);
2476
2477                 osc_dec_unstable_pages(req);
2478         } else {
2479                 req->rq_committed = 1;
2480                 spin_unlock(&req->rq_lock);
2481         }
2482 }
2483
2484 /**
2485  * Build an RPC by the list of extent @ext_list. The caller must ensure
2486  * that the total pages in this list are NOT over max pages per RPC.
2487  * Extents in the list must be in OES_RPC state.
2488  */
2489 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2490                   struct list_head *ext_list, int cmd)
2491 {
2492         struct ptlrpc_request           *req = NULL;
2493         struct osc_extent               *ext;
2494         struct brw_page                 **pga = NULL;
2495         struct osc_brw_async_args       *aa = NULL;
2496         struct obdo                     *oa = NULL;
2497         struct osc_async_page           *oap;
2498         struct osc_object               *obj = NULL;
2499         struct cl_req_attr              *crattr = NULL;
2500         loff_t                          starting_offset = OBD_OBJECT_EOF;
2501         loff_t                          ending_offset = 0;
2502         /* '1' for consistency with code that checks !mpflag to restore */
2503         int mpflag = 1;
2504         int                             mem_tight = 0;
2505         int                             page_count = 0;
2506         bool                            soft_sync = false;
2507         bool                            ndelay = false;
2508         int                             i;
2509         int                             grant = 0;
2510         int                             rc;
2511         __u32                           layout_version = 0;
2512         LIST_HEAD(rpc_list);
2513         struct ost_body                 *body;
2514         ENTRY;
2515         LASSERT(!list_empty(ext_list));
2516
2517         /* add pages into rpc_list to build BRW rpc */
2518         list_for_each_entry(ext, ext_list, oe_link) {
2519                 LASSERT(ext->oe_state == OES_RPC);
2520                 mem_tight |= ext->oe_memalloc;
2521                 grant += ext->oe_grants;
2522                 page_count += ext->oe_nr_pages;
2523                 layout_version = max(layout_version, ext->oe_layout_version);
2524                 if (obj == NULL)
2525                         obj = ext->oe_obj;
2526         }
2527
2528         soft_sync = osc_over_unstable_soft_limit(cli);
2529         if (mem_tight)
2530                 mpflag = memalloc_noreclaim_save();
2531
2532         OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2533         if (pga == NULL)
2534                 GOTO(out, rc = -ENOMEM);
2535
2536         OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2537         if (oa == NULL)
2538                 GOTO(out, rc = -ENOMEM);
2539
2540         i = 0;
2541         list_for_each_entry(ext, ext_list, oe_link) {
2542                 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2543                         if (mem_tight)
2544                                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2545                         if (soft_sync)
2546                                 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2547                         pga[i] = &oap->oap_brw_page;
2548                         pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2549                         i++;
2550
2551                         list_add_tail(&oap->oap_rpc_item, &rpc_list);
2552                         if (starting_offset == OBD_OBJECT_EOF ||
2553                             starting_offset > oap->oap_obj_off)
2554                                 starting_offset = oap->oap_obj_off;
2555                         else
2556                                 LASSERT(oap->oap_page_off == 0);
2557                         if (ending_offset < oap->oap_obj_off + oap->oap_count)
2558                                 ending_offset = oap->oap_obj_off +
2559                                                 oap->oap_count;
2560                         else
2561                                 LASSERT(oap->oap_page_off + oap->oap_count ==
2562                                         PAGE_SIZE);
2563                 }
2564                 if (ext->oe_ndelay)
2565                         ndelay = true;
2566         }
2567
2568         /* first page in the list */
2569         oap = list_first_entry(&rpc_list, typeof(*oap), oap_rpc_item);
2570
2571         crattr = &osc_env_info(env)->oti_req_attr;
2572         memset(crattr, 0, sizeof(*crattr));
2573         crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2574         crattr->cra_flags = ~0ULL;
2575         crattr->cra_page = oap2cl_page(oap);
2576         crattr->cra_oa = oa;
2577         cl_req_attr_set(env, osc2cl(obj), crattr);
2578
2579         if (cmd == OBD_BRW_WRITE) {
2580                 oa->o_grant_used = grant;
2581                 if (layout_version > 0) {
2582                         CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2583                                PFID(&oa->o_oi.oi_fid), layout_version);
2584
2585                         oa->o_layout_version = layout_version;
2586                         oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2587                 }
2588         }
2589
2590         sort_brw_pages(pga, page_count);
2591         rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2592         if (rc != 0) {
2593                 CERROR("prep_req failed: %d\n", rc);
2594                 GOTO(out, rc);
2595         }
2596
2597         req->rq_commit_cb = brw_commit;
2598         req->rq_interpret_reply = brw_interpret;
2599         req->rq_memalloc = mem_tight != 0;
2600         oap->oap_request = ptlrpc_request_addref(req);
2601         if (ndelay) {
2602                 req->rq_no_resend = req->rq_no_delay = 1;
2603                 /* probably set a shorter timeout value.
2604                  * to handle ETIMEDOUT in brw_interpret() correctly. */
2605                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2606         }
2607
2608         /* Need to update the timestamps after the request is built in case
2609          * we race with setattr (locally or in queue at OST).  If OST gets
2610          * later setattr before earlier BRW (as determined by the request xid),
2611          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2612          * way to do this in a single call.  bug 10150 */
2613         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2614         crattr->cra_oa = &body->oa;
2615         crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2616         cl_req_attr_set(env, osc2cl(obj), crattr);
2617         lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2618
2619         aa = ptlrpc_req_async_args(aa, req);
2620         INIT_LIST_HEAD(&aa->aa_oaps);
2621         list_splice_init(&rpc_list, &aa->aa_oaps);
2622         INIT_LIST_HEAD(&aa->aa_exts);
2623         list_splice_init(ext_list, &aa->aa_exts);
2624
2625         spin_lock(&cli->cl_loi_list_lock);
2626         starting_offset >>= PAGE_SHIFT;
2627         if (cmd == OBD_BRW_READ) {
2628                 cli->cl_r_in_flight++;
2629                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2630                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2631                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2632                                       starting_offset + 1);
2633         } else {
2634                 cli->cl_w_in_flight++;
2635                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2636                 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2637                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2638                                       starting_offset + 1);
2639         }
2640         spin_unlock(&cli->cl_loi_list_lock);
2641
2642         DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2643                   page_count, aa, cli->cl_r_in_flight,
2644                   cli->cl_w_in_flight);
2645         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2646
2647         ptlrpcd_add_req(req);
2648         rc = 0;
2649         EXIT;
2650
2651 out:
2652         if (mem_tight)
2653                 memalloc_noreclaim_restore(mpflag);
2654
2655         if (rc != 0) {
2656                 LASSERT(req == NULL);
2657
2658                 if (oa)
2659                         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2660                 if (pga) {
2661                         osc_release_bounce_pages(pga, page_count);
2662                         osc_release_ppga(pga, page_count);
2663                 }
2664                 /* this should happen rarely and is pretty bad, it makes the
2665                  * pending list not follow the dirty order
2666                  */
2667                 while ((ext = list_first_entry_or_null(ext_list,
2668                                                        struct osc_extent,
2669                                                        oe_link)) != NULL) {
2670                         list_del_init(&ext->oe_link);
2671                         osc_extent_finish(env, ext, 0, rc);
2672                 }
2673         }
2674         RETURN(rc);
2675 }
2676
2677 /* This is to refresh our lock in face of no RPCs. */
2678 void osc_send_empty_rpc(struct osc_object *osc, pgoff_t start)
2679 {
2680         struct ptlrpc_request *req;
2681         struct obdo oa;
2682         struct brw_page bpg = { .off = start, .count = 1};
2683         struct brw_page *pga = &bpg;
2684         int rc;
2685
2686         memset(&oa, 0, sizeof(oa));
2687         oa.o_oi = osc->oo_oinfo->loi_oi;
2688         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
2689         /* For updated servers - don't do a read */
2690         oa.o_flags = OBD_FL_NORPC;
2691
2692         rc = osc_brw_prep_request(OBD_BRW_READ, osc_cli(osc), &oa, 1, &pga,
2693                                   &req, 0);
2694
2695         /* If we succeeded we ship it off, if not there's no point in doing
2696          * anything. Also no resends.
2697          * No interpret callback, no commit callback.
2698          */
2699         if (!rc) {
2700                 req->rq_no_resend = 1;
2701                 ptlrpcd_add_req(req);
2702         }
2703 }
2704
2705 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2706 {
2707         int set = 0;
2708
2709         LASSERT(lock != NULL);
2710
2711         lock_res_and_lock(lock);
2712
2713         if (lock->l_ast_data == NULL)
2714                 lock->l_ast_data = data;
2715         if (lock->l_ast_data == data)
2716                 set = 1;
2717
2718         unlock_res_and_lock(lock);
2719
2720         return set;
2721 }
2722
2723 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2724                      void *cookie, struct lustre_handle *lockh,
2725                      enum ldlm_mode mode, __u64 *flags, bool speculative,
2726                      int errcode)
2727 {
2728         bool intent = *flags & LDLM_FL_HAS_INTENT;
2729         int rc;
2730         ENTRY;
2731
2732         /* The request was created before ldlm_cli_enqueue call. */
2733         if (intent && errcode == ELDLM_LOCK_ABORTED) {
2734                 struct ldlm_reply *rep;
2735
2736                 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2737                 LASSERT(rep != NULL);
2738
2739                 rep->lock_policy_res1 =
2740                         ptlrpc_status_ntoh(rep->lock_policy_res1);
2741                 if (rep->lock_policy_res1)
2742                         errcode = rep->lock_policy_res1;
2743                 if (!speculative)
2744                         *flags |= LDLM_FL_LVB_READY;
2745         } else if (errcode == ELDLM_OK) {
2746                 *flags |= LDLM_FL_LVB_READY;
2747         }
2748
2749         /* Call the update callback. */
2750         rc = (*upcall)(cookie, lockh, errcode);
2751
2752         /* release the reference taken in ldlm_cli_enqueue() */
2753         if (errcode == ELDLM_LOCK_MATCHED)
2754                 errcode = ELDLM_OK;
2755         if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2756                 ldlm_lock_decref(lockh, mode);
2757
2758         RETURN(rc);
2759 }
2760
2761 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2762                           void *args, int rc)
2763 {
2764         struct osc_enqueue_args *aa = args;
2765         struct ldlm_lock *lock;
2766         struct lustre_handle *lockh = &aa->oa_lockh;
2767         enum ldlm_mode mode = aa->oa_mode;
2768         struct ost_lvb *lvb = aa->oa_lvb;
2769         __u32 lvb_len = sizeof(*lvb);
2770         __u64 flags = 0;
2771         struct ldlm_enqueue_info einfo = {
2772                 .ei_type = aa->oa_type,
2773                 .ei_mode = mode,
2774         };
2775
2776         ENTRY;
2777
2778         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2779          * be valid. */
2780         lock = ldlm_handle2lock(lockh);
2781         LASSERTF(lock != NULL,
2782                  "lockh %#llx, req %p, aa %p - client evicted?\n",
2783                  lockh->cookie, req, aa);
2784
2785         /* Take an additional reference so that a blocking AST that
2786          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2787          * to arrive after an upcall has been executed by
2788          * osc_enqueue_fini(). */
2789         ldlm_lock_addref(lockh, mode);
2790
2791         /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2792         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2793
2794         /* Let CP AST to grant the lock first. */
2795         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2796
2797         if (aa->oa_speculative) {
2798                 LASSERT(aa->oa_lvb == NULL);
2799                 LASSERT(aa->oa_flags == NULL);
2800                 aa->oa_flags = &flags;
2801         }
2802
2803         /* Complete obtaining the lock procedure. */
2804         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2805                                    lvb, lvb_len, lockh, rc);
2806         /* Complete osc stuff. */
2807         rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2808                               aa->oa_flags, aa->oa_speculative, rc);
2809
2810         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2811
2812         ldlm_lock_decref(lockh, mode);
2813         LDLM_LOCK_PUT(lock);
2814         RETURN(rc);
2815 }
2816
2817 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2818  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2819  * other synchronous requests, however keeping some locks and trying to obtain
2820  * others may take a considerable amount of time in a case of ost failure; and
2821  * when other sync requests do not get released lock from a client, the client
2822  * is evicted from the cluster -- such scenarious make the life difficult, so
2823  * release locks just after they are obtained. */
2824 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2825                      __u64 *flags, union ldlm_policy_data *policy,
2826                      struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2827                      void *cookie, struct ldlm_enqueue_info *einfo,
2828                      struct ptlrpc_request_set *rqset, int async,
2829                      bool speculative)
2830 {
2831         struct obd_device *obd = exp->exp_obd;
2832         struct lustre_handle lockh = { 0 };
2833         struct ptlrpc_request *req = NULL;
2834         int intent = *flags & LDLM_FL_HAS_INTENT;
2835         __u64 match_flags = *flags;
2836         enum ldlm_mode mode;
2837         int rc;
2838         ENTRY;
2839
2840         /* Filesystem lock extents are extended to page boundaries so that
2841          * dealing with the page cache is a little smoother.  */
2842         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2843         policy->l_extent.end |= ~PAGE_MASK;
2844
2845         /* Next, search for already existing extent locks that will cover us */
2846         /* If we're trying to read, we also search for an existing PW lock.  The
2847          * VFS and page cache already protect us locally, so lots of readers/
2848          * writers can share a single PW lock.
2849          *
2850          * There are problems with conversion deadlocks, so instead of
2851          * converting a read lock to a write lock, we'll just enqueue a new
2852          * one.
2853          *
2854          * At some point we should cancel the read lock instead of making them
2855          * send us a blocking callback, but there are problems with canceling
2856          * locks out from other users right now, too. */
2857         mode = einfo->ei_mode;
2858         if (einfo->ei_mode == LCK_PR)
2859                 mode |= LCK_PW;
2860         /* Normal lock requests must wait for the LVB to be ready before
2861          * matching a lock; speculative lock requests do not need to,
2862          * because they will not actually use the lock. */
2863         if (!speculative)
2864                 match_flags |= LDLM_FL_LVB_READY;
2865         if (intent != 0)
2866                 match_flags |= LDLM_FL_BLOCK_GRANTED;
2867         mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2868                                einfo->ei_type, policy, mode, &lockh);
2869         if (mode) {
2870                 struct ldlm_lock *matched;
2871
2872                 if (*flags & LDLM_FL_TEST_LOCK)
2873                         RETURN(ELDLM_OK);
2874
2875                 matched = ldlm_handle2lock(&lockh);
2876                 if (speculative) {
2877                         /* This DLM lock request is speculative, and does not
2878                          * have an associated IO request. Therefore if there
2879                          * is already a DLM lock, it wll just inform the
2880                          * caller to cancel the request for this stripe.*/
2881                         lock_res_and_lock(matched);
2882                         if (ldlm_extent_equal(&policy->l_extent,
2883                             &matched->l_policy_data.l_extent))
2884                                 rc = -EEXIST;
2885                         else
2886                                 rc = -ECANCELED;
2887                         unlock_res_and_lock(matched);
2888
2889                         ldlm_lock_decref(&lockh, mode);
2890                         LDLM_LOCK_PUT(matched);
2891                         RETURN(rc);
2892                 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2893                         *flags |= LDLM_FL_LVB_READY;
2894
2895                         /* We already have a lock, and it's referenced. */
2896                         (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2897
2898                         ldlm_lock_decref(&lockh, mode);
2899                         LDLM_LOCK_PUT(matched);
2900                         RETURN(ELDLM_OK);
2901                 } else {
2902                         ldlm_lock_decref(&lockh, mode);
2903                         LDLM_LOCK_PUT(matched);
2904                 }
2905         }
2906
2907         if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2908                 RETURN(-ENOLCK);
2909
2910         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2911         *flags &= ~LDLM_FL_BLOCK_GRANTED;
2912
2913         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2914                               sizeof(*lvb), LVB_T_OST, &lockh, async);
2915         if (async) {
2916                 if (!rc) {
2917                         struct osc_enqueue_args *aa;
2918                         aa = ptlrpc_req_async_args(aa, req);
2919                         aa->oa_exp         = exp;
2920                         aa->oa_mode        = einfo->ei_mode;
2921                         aa->oa_type        = einfo->ei_type;
2922                         lustre_handle_copy(&aa->oa_lockh, &lockh);
2923                         aa->oa_upcall      = upcall;
2924                         aa->oa_cookie      = cookie;
2925                         aa->oa_speculative = speculative;
2926                         if (!speculative) {
2927                                 aa->oa_flags  = flags;
2928                                 aa->oa_lvb    = lvb;
2929                         } else {
2930                                 /* speculative locks are essentially to enqueue
2931                                  * a DLM lock  in advance, so we don't care
2932                                  * about the result of the enqueue. */
2933                                 aa->oa_lvb    = NULL;
2934                                 aa->oa_flags  = NULL;
2935                         }
2936
2937                         req->rq_interpret_reply = osc_enqueue_interpret;
2938                         ptlrpc_set_add_req(rqset, req);
2939                 }
2940                 RETURN(rc);
2941         }
2942
2943         rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2944                               flags, speculative, rc);
2945
2946         RETURN(rc);
2947 }
2948
2949 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2950                    struct ldlm_res_id *res_id, enum ldlm_type type,
2951                    union ldlm_policy_data *policy, enum ldlm_mode mode,
2952                    __u64 *flags, struct osc_object *obj,
2953                    struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2954 {
2955         struct obd_device *obd = exp->exp_obd;
2956         __u64 lflags = *flags;
2957         enum ldlm_mode rc;
2958         ENTRY;
2959
2960         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2961                 RETURN(-EIO);
2962
2963         /* Filesystem lock extents are extended to page boundaries so that
2964          * dealing with the page cache is a little smoother */
2965         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2966         policy->l_extent.end |= ~PAGE_MASK;
2967
2968         /* Next, search for already existing extent locks that will cover us */
2969         rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
2970                                         res_id, type, policy, mode, lockh,
2971                                         match_flags);
2972         if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2973                 RETURN(rc);
2974
2975         if (obj != NULL) {
2976                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2977
2978                 LASSERT(lock != NULL);
2979                 if (osc_set_lock_data(lock, obj)) {
2980                         lock_res_and_lock(lock);
2981                         if (!ldlm_is_lvb_cached(lock)) {
2982                                 LASSERT(lock->l_ast_data == obj);
2983                                 osc_lock_lvb_update(env, obj, lock, NULL);
2984                                 ldlm_set_lvb_cached(lock);
2985                         }
2986                         unlock_res_and_lock(lock);
2987                 } else {
2988                         ldlm_lock_decref(lockh, rc);
2989                         rc = 0;
2990                 }
2991                 LDLM_LOCK_PUT(lock);
2992         }
2993         RETURN(rc);
2994 }
2995
2996 static int osc_statfs_interpret(const struct lu_env *env,
2997                                 struct ptlrpc_request *req, void *args, int rc)
2998 {
2999         struct osc_async_args *aa = args;
3000         struct obd_statfs *msfs;
3001
3002         ENTRY;
3003         if (rc == -EBADR)
3004                 /*
3005                  * The request has in fact never been sent due to issues at
3006                  * a higher level (LOV).  Exit immediately since the caller
3007                  * is aware of the problem and takes care of the clean up.
3008                  */
3009                 RETURN(rc);
3010
3011         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3012             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3013                 GOTO(out, rc = 0);
3014
3015         if (rc != 0)
3016                 GOTO(out, rc);
3017
3018         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3019         if (msfs == NULL)
3020                 GOTO(out, rc = -EPROTO);
3021
3022         *aa->aa_oi->oi_osfs = *msfs;
3023 out:
3024         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3025
3026         RETURN(rc);
3027 }
3028
3029 static int osc_statfs_async(struct obd_export *exp,
3030                             struct obd_info *oinfo, time64_t max_age,
3031                             struct ptlrpc_request_set *rqset)
3032 {
3033         struct obd_device     *obd = class_exp2obd(exp);
3034         struct ptlrpc_request *req;
3035         struct osc_async_args *aa;
3036         int rc;
3037         ENTRY;
3038
3039         if (obd->obd_osfs_age >= max_age) {
3040                 CDEBUG(D_SUPER,
3041                        "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
3042                        obd->obd_name, &obd->obd_osfs,
3043                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
3044                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
3045                 spin_lock(&obd->obd_osfs_lock);
3046                 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
3047                 spin_unlock(&obd->obd_osfs_lock);
3048                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
3049                 if (oinfo->oi_cb_up)
3050                         oinfo->oi_cb_up(oinfo, 0);
3051
3052                 RETURN(0);
3053         }
3054
3055         /* We could possibly pass max_age in the request (as an absolute
3056          * timestamp or a "seconds.usec ago") so the target can avoid doing
3057          * extra calls into the filesystem if that isn't necessary (e.g.
3058          * during mount that would help a bit).  Having relative timestamps
3059          * is not so great if request processing is slow, while absolute
3060          * timestamps are not ideal because they need time synchronization. */
3061         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3062         if (req == NULL)
3063                 RETURN(-ENOMEM);
3064
3065         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3066         if (rc) {
3067                 ptlrpc_request_free(req);
3068                 RETURN(rc);
3069         }
3070         ptlrpc_request_set_replen(req);
3071         req->rq_request_portal = OST_CREATE_PORTAL;
3072         ptlrpc_at_set_req_timeout(req);
3073
3074         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3075                 /* procfs requests not want stat in wait for avoid deadlock */
3076                 req->rq_no_resend = 1;
3077                 req->rq_no_delay = 1;
3078         }
3079
3080         req->rq_interpret_reply = osc_statfs_interpret;
3081         aa = ptlrpc_req_async_args(aa, req);
3082         aa->aa_oi = oinfo;
3083
3084         ptlrpc_set_add_req(rqset, req);
3085         RETURN(0);
3086 }
3087
3088 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3089                       struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3090 {
3091         struct obd_device     *obd = class_exp2obd(exp);
3092         struct obd_statfs     *msfs;
3093         struct ptlrpc_request *req;
3094         struct obd_import     *imp, *imp0;
3095         int rc;
3096         ENTRY;
3097
3098         /*Since the request might also come from lprocfs, so we need
3099          *sync this with client_disconnect_export Bug15684
3100          */
3101         with_imp_locked(obd, imp0, rc)
3102                 imp = class_import_get(imp0);
3103         if (rc)
3104                 RETURN(rc);
3105
3106         /* We could possibly pass max_age in the request (as an absolute
3107          * timestamp or a "seconds.usec ago") so the target can avoid doing
3108          * extra calls into the filesystem if that isn't necessary (e.g.
3109          * during mount that would help a bit).  Having relative timestamps
3110          * is not so great if request processing is slow, while absolute
3111          * timestamps are not ideal because they need time synchronization. */
3112         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3113
3114         class_import_put(imp);
3115
3116         if (req == NULL)
3117                 RETURN(-ENOMEM);
3118
3119         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3120         if (rc) {
3121                 ptlrpc_request_free(req);
3122                 RETURN(rc);
3123         }
3124         ptlrpc_request_set_replen(req);
3125         req->rq_request_portal = OST_CREATE_PORTAL;
3126         ptlrpc_at_set_req_timeout(req);
3127
3128         if (flags & OBD_STATFS_NODELAY) {
3129                 /* procfs requests not want stat in wait for avoid deadlock */
3130                 req->rq_no_resend = 1;
3131                 req->rq_no_delay = 1;
3132         }
3133
3134         rc = ptlrpc_queue_wait(req);
3135         if (rc)
3136                 GOTO(out, rc);
3137
3138         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3139         if (msfs == NULL)
3140                 GOTO(out, rc = -EPROTO);
3141
3142         *osfs = *msfs;
3143
3144         EXIT;
3145 out:
3146         ptlrpc_req_finished(req);
3147         return rc;
3148 }
3149
3150 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3151                          void *karg, void __user *uarg)
3152 {
3153         struct obd_device *obd = exp->exp_obd;
3154         struct obd_ioctl_data *data = karg;
3155         int rc = 0;
3156
3157         ENTRY;
3158         if (!try_module_get(THIS_MODULE)) {
3159                 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3160                        module_name(THIS_MODULE));
3161                 return -EINVAL;
3162         }
3163         switch (cmd) {
3164         case OBD_IOC_CLIENT_RECOVER:
3165                 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3166                                            data->ioc_inlbuf1, 0);
3167                 if (rc > 0)
3168                         rc = 0;
3169                 break;
3170         case IOC_OSC_SET_ACTIVE:
3171                 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3172                                               data->ioc_offset);
3173                 break;
3174         default:
3175                 rc = -ENOTTY;
3176                 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3177                        obd->obd_name, cmd, current->comm, rc);
3178                 break;
3179         }
3180
3181         module_put(THIS_MODULE);
3182         return rc;
3183 }
3184
3185 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3186                        u32 keylen, void *key, u32 vallen, void *val,
3187                        struct ptlrpc_request_set *set)
3188 {
3189         struct ptlrpc_request *req;
3190         struct obd_device     *obd = exp->exp_obd;
3191         struct obd_import     *imp = class_exp2cliimp(exp);
3192         char                  *tmp;
3193         int                    rc;
3194         ENTRY;
3195
3196         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3197
3198         if (KEY_IS(KEY_CHECKSUM)) {
3199                 if (vallen != sizeof(int))
3200                         RETURN(-EINVAL);
3201                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3202                 RETURN(0);
3203         }
3204
3205         if (KEY_IS(KEY_SPTLRPC_CONF)) {
3206                 sptlrpc_conf_client_adapt(obd);
3207                 RETURN(0);
3208         }
3209
3210         if (KEY_IS(KEY_FLUSH_CTX)) {
3211                 sptlrpc_import_flush_my_ctx(imp);
3212                 RETURN(0);
3213         }
3214
3215         if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3216                 struct client_obd *cli = &obd->u.cli;
3217                 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3218                 long target = *(long *)val;
3219
3220                 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3221                 *(long *)val -= nr;
3222                 RETURN(0);
3223         }
3224
3225         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3226                 RETURN(-EINVAL);
3227
3228         /* We pass all other commands directly to OST. Since nobody calls osc
3229            methods directly and everybody is supposed to go through LOV, we
3230            assume lov checked invalid values for us.
3231            The only recognised values so far are evict_by_nid and mds_conn.
3232            Even if something bad goes through, we'd get a -EINVAL from OST
3233            anyway. */
3234
3235         req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3236                                                 &RQF_OST_SET_GRANT_INFO :
3237                                                 &RQF_OBD_SET_INFO);
3238         if (req == NULL)
3239                 RETURN(-ENOMEM);
3240
3241         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3242                              RCL_CLIENT, keylen);
3243         if (!KEY_IS(KEY_GRANT_SHRINK))
3244                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3245                                      RCL_CLIENT, vallen);
3246         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3247         if (rc) {
3248                 ptlrpc_request_free(req);
3249                 RETURN(rc);
3250         }
3251
3252         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3253         memcpy(tmp, key, keylen);
3254         tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3255                                                         &RMF_OST_BODY :
3256                                                         &RMF_SETINFO_VAL);
3257         memcpy(tmp, val, vallen);
3258
3259         if (KEY_IS(KEY_GRANT_SHRINK)) {
3260                 struct osc_grant_args *aa;
3261                 struct obdo *oa;
3262
3263                 aa = ptlrpc_req_async_args(aa, req);
3264                 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3265                 if (!oa) {
3266                         ptlrpc_req_finished(req);
3267                         RETURN(-ENOMEM);
3268                 }
3269                 *oa = ((struct ost_body *)val)->oa;
3270                 aa->aa_oa = oa;
3271                 req->rq_interpret_reply = osc_shrink_grant_interpret;
3272         }
3273
3274         ptlrpc_request_set_replen(req);
3275         if (!KEY_IS(KEY_GRANT_SHRINK)) {
3276                 LASSERT(set != NULL);
3277                 ptlrpc_set_add_req(set, req);
3278                 ptlrpc_check_set(NULL, set);
3279         } else {
3280                 ptlrpcd_add_req(req);
3281         }
3282
3283         RETURN(0);
3284 }
3285 EXPORT_SYMBOL(osc_set_info_async);
3286
3287 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3288                   struct obd_device *obd, struct obd_uuid *cluuid,
3289                   struct obd_connect_data *data, void *localdata)
3290 {
3291         struct client_obd *cli = &obd->u.cli;
3292
3293         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3294                 long lost_grant;
3295                 long grant;
3296
3297                 spin_lock(&cli->cl_loi_list_lock);
3298                 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3299                 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3300                         /* restore ocd_grant_blkbits as client page bits */
3301                         data->ocd_grant_blkbits = PAGE_SHIFT;
3302                         grant += cli->cl_dirty_grant;
3303                 } else {
3304                         grant += cli->cl_dirty_pages << PAGE_SHIFT;
3305                 }
3306                 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3307                 lost_grant = cli->cl_lost_grant;
3308                 cli->cl_lost_grant = 0;
3309                 spin_unlock(&cli->cl_loi_list_lock);
3310
3311                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3312                        " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3313                        data->ocd_version, data->ocd_grant, lost_grant);
3314         }
3315
3316         RETURN(0);
3317 }
3318 EXPORT_SYMBOL(osc_reconnect);
3319
3320 int osc_disconnect(struct obd_export *exp)
3321 {
3322         struct obd_device *obd = class_exp2obd(exp);
3323         int rc;
3324
3325         rc = client_disconnect_export(exp);
3326         /**
3327          * Initially we put del_shrink_grant before disconnect_export, but it
3328          * causes the following problem if setup (connect) and cleanup
3329          * (disconnect) are tangled together.
3330          *      connect p1                     disconnect p2
3331          *   ptlrpc_connect_import
3332          *     ...............               class_manual_cleanup
3333          *                                     osc_disconnect
3334          *                                     del_shrink_grant
3335          *   ptlrpc_connect_interrupt
3336          *     osc_init_grant
3337          *   add this client to shrink list
3338          *                                      cleanup_osc
3339          * Bang! grant shrink thread trigger the shrink. BUG18662
3340          */
3341         osc_del_grant_list(&obd->u.cli);
3342         return rc;
3343 }
3344 EXPORT_SYMBOL(osc_disconnect);
3345
3346 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3347                                  struct hlist_node *hnode, void *arg)
3348 {
3349         struct lu_env *env = arg;
3350         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3351         struct ldlm_lock *lock;
3352         struct osc_object *osc = NULL;
3353         ENTRY;
3354
3355         lock_res(res);
3356         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3357                 if (lock->l_ast_data != NULL && osc == NULL) {
3358                         osc = lock->l_ast_data;
3359                         cl_object_get(osc2cl(osc));
3360                 }
3361
3362                 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3363                  * by the 2nd round of ldlm_namespace_clean() call in
3364                  * osc_import_event(). */
3365                 ldlm_clear_cleaned(lock);
3366         }
3367         unlock_res(res);
3368
3369         if (osc != NULL) {
3370                 osc_object_invalidate(env, osc);
3371                 cl_object_put(env, osc2cl(osc));
3372         }
3373
3374         RETURN(0);
3375 }
3376 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3377
3378 static int osc_import_event(struct obd_device *obd,
3379                             struct obd_import *imp,
3380                             enum obd_import_event event)
3381 {
3382         struct client_obd *cli;
3383         int rc = 0;
3384
3385         ENTRY;
3386         LASSERT(imp->imp_obd == obd);
3387
3388         switch (event) {
3389         case IMP_EVENT_DISCON: {
3390                 cli = &obd->u.cli;
3391                 spin_lock(&cli->cl_loi_list_lock);
3392                 cli->cl_avail_grant = 0;
3393                 cli->cl_lost_grant = 0;
3394                 spin_unlock(&cli->cl_loi_list_lock);
3395                 break;
3396         }
3397         case IMP_EVENT_INACTIVE: {
3398                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3399                 break;
3400         }
3401         case IMP_EVENT_INVALIDATE: {
3402                 struct ldlm_namespace *ns = obd->obd_namespace;
3403                 struct lu_env         *env;
3404                 __u16                  refcheck;
3405
3406                 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3407
3408                 env = cl_env_get(&refcheck);
3409                 if (!IS_ERR(env)) {
3410                         osc_io_unplug(env, &obd->u.cli, NULL);
3411
3412                         cfs_hash_for_each_nolock(ns->ns_rs_hash,
3413                                                  osc_ldlm_resource_invalidate,
3414                                                  env, 0);
3415                         cl_env_put(env, &refcheck);
3416
3417                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3418                 } else
3419                         rc = PTR_ERR(env);
3420                 break;
3421         }
3422         case IMP_EVENT_ACTIVE: {
3423                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3424                 break;
3425         }
3426         case IMP_EVENT_OCD: {
3427                 struct obd_connect_data *ocd = &imp->imp_connect_data;
3428
3429                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3430                         osc_init_grant(&obd->u.cli, ocd);
3431
3432                 /* See bug 7198 */
3433                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3434                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3435
3436                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3437                 break;
3438         }
3439         case IMP_EVENT_DEACTIVATE: {
3440                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3441                 break;
3442         }
3443         case IMP_EVENT_ACTIVATE: {
3444                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3445                 break;
3446         }
3447         default:
3448                 CERROR("Unknown import event %d\n", event);
3449                 LBUG();
3450         }
3451         RETURN(rc);
3452 }
3453
3454 /**
3455  * Determine whether the lock can be canceled before replaying the lock
3456  * during recovery, see bug16774 for detailed information.
3457  *
3458  * \retval zero the lock can't be canceled
3459  * \retval other ok to cancel
3460  */
3461 static int osc_cancel_weight(struct ldlm_lock *lock)
3462 {
3463         /*
3464          * Cancel all unused and granted extent lock.
3465          */
3466         if (lock->l_resource->lr_type == LDLM_EXTENT &&
3467             ldlm_is_granted(lock) &&
3468             osc_ldlm_weigh_ast(lock) == 0)
3469                 RETURN(1);
3470
3471         RETURN(0);
3472 }
3473
3474 static int brw_queue_work(const struct lu_env *env, void *data)
3475 {
3476         struct client_obd *cli = data;
3477
3478         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3479
3480         osc_io_unplug(env, cli, NULL);
3481         RETURN(0);
3482 }
3483
3484 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3485 {
3486         struct client_obd *cli = &obd->u.cli;
3487         void *handler;
3488         int rc;
3489
3490         ENTRY;
3491
3492         rc = ptlrpcd_addref();
3493         if (rc)
3494                 RETURN(rc);
3495
3496         rc = client_obd_setup(obd, lcfg);
3497         if (rc)
3498                 GOTO(out_ptlrpcd, rc);
3499
3500
3501         handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3502         if (IS_ERR(handler))
3503                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3504         cli->cl_writeback_work = handler;
3505
3506         handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3507         if (IS_ERR(handler))
3508                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3509         cli->cl_lru_work = handler;
3510
3511         rc = osc_quota_setup(obd);
3512         if (rc)
3513                 GOTO(out_ptlrpcd_work, rc);
3514
3515         cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3516         cli->cl_root_squash = 0;
3517         osc_update_next_shrink(cli);
3518
3519         RETURN(rc);
3520
3521 out_ptlrpcd_work:
3522         if (cli->cl_writeback_work != NULL) {
3523                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3524                 cli->cl_writeback_work = NULL;
3525         }
3526         if (cli->cl_lru_work != NULL) {
3527                 ptlrpcd_destroy_work(cli->cl_lru_work);
3528                 cli->cl_lru_work = NULL;
3529         }
3530         client_obd_cleanup(obd);
3531 out_ptlrpcd:
3532         ptlrpcd_decref();
3533         RETURN(rc);
3534 }
3535 EXPORT_SYMBOL(osc_setup_common);
3536
3537 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3538 {
3539         struct client_obd *cli = &obd->u.cli;
3540         int                adding;
3541         int                added;
3542         int                req_count;
3543         int                rc;
3544
3545         ENTRY;
3546
3547         rc = osc_setup_common(obd, lcfg);
3548         if (rc < 0)
3549                 RETURN(rc);
3550
3551         rc = osc_tunables_init(obd);
3552         if (rc)
3553                 RETURN(rc);
3554
3555         /*
3556          * We try to control the total number of requests with a upper limit
3557          * osc_reqpool_maxreqcount. There might be some race which will cause
3558          * over-limit allocation, but it is fine.
3559          */
3560         req_count = atomic_read(&osc_pool_req_count);
3561         if (req_count < osc_reqpool_maxreqcount) {
3562                 adding = cli->cl_max_rpcs_in_flight + 2;
3563                 if (req_count + adding > osc_reqpool_maxreqcount)
3564                         adding = osc_reqpool_maxreqcount - req_count;
3565
3566                 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3567                 atomic_add(added, &osc_pool_req_count);
3568         }
3569
3570         ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3571
3572         spin_lock(&osc_shrink_lock);
3573         list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3574         spin_unlock(&osc_shrink_lock);
3575         cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3576         cli->cl_import->imp_idle_debug = D_HA;
3577
3578         RETURN(0);
3579 }
3580
3581 int osc_precleanup_common(struct obd_device *obd)
3582 {
3583         struct client_obd *cli = &obd->u.cli;
3584         ENTRY;
3585
3586         /* LU-464
3587          * for echo client, export may be on zombie list, wait for
3588          * zombie thread to cull it, because cli.cl_import will be
3589          * cleared in client_disconnect_export():
3590          *   class_export_destroy() -> obd_cleanup() ->
3591          *   echo_device_free() -> echo_client_cleanup() ->
3592          *   obd_disconnect() -> osc_disconnect() ->
3593          *   client_disconnect_export()
3594          */
3595         obd_zombie_barrier();
3596         if (cli->cl_writeback_work) {
3597                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3598                 cli->cl_writeback_work = NULL;
3599         }
3600
3601         if (cli->cl_lru_work) {
3602                 ptlrpcd_destroy_work(cli->cl_lru_work);
3603                 cli->cl_lru_work = NULL;
3604         }
3605
3606         obd_cleanup_client_import(obd);
3607         RETURN(0);
3608 }
3609 EXPORT_SYMBOL(osc_precleanup_common);
3610
3611 static int osc_precleanup(struct obd_device *obd)
3612 {
3613         ENTRY;
3614
3615         osc_precleanup_common(obd);
3616
3617         ptlrpc_lprocfs_unregister_obd(obd);
3618         RETURN(0);
3619 }
3620
3621 int osc_cleanup_common(struct obd_device *obd)
3622 {
3623         struct client_obd *cli = &obd->u.cli;
3624         int rc;
3625
3626         ENTRY;
3627
3628         spin_lock(&osc_shrink_lock);
3629         list_del(&cli->cl_shrink_list);
3630         spin_unlock(&osc_shrink_lock);
3631
3632         /* lru cleanup */
3633         if (cli->cl_cache != NULL) {
3634                 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3635                 spin_lock(&cli->cl_cache->ccc_lru_lock);
3636                 list_del_init(&cli->cl_lru_osc);
3637                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3638                 cli->cl_lru_left = NULL;
3639                 cl_cache_decref(cli->cl_cache);
3640                 cli->cl_cache = NULL;
3641         }
3642
3643         /* free memory of osc quota cache */
3644         osc_quota_cleanup(obd);
3645
3646         rc = client_obd_cleanup(obd);
3647
3648         ptlrpcd_decref();
3649         RETURN(rc);
3650 }
3651 EXPORT_SYMBOL(osc_cleanup_common);
3652
3653 static const struct obd_ops osc_obd_ops = {
3654         .o_owner                = THIS_MODULE,
3655         .o_setup                = osc_setup,
3656         .o_precleanup           = osc_precleanup,
3657         .o_cleanup              = osc_cleanup_common,
3658         .o_add_conn             = client_import_add_conn,
3659         .o_del_conn             = client_import_del_conn,
3660         .o_connect              = client_connect_import,
3661         .o_reconnect            = osc_reconnect,
3662         .o_disconnect           = osc_disconnect,
3663         .o_statfs               = osc_statfs,
3664         .o_statfs_async         = osc_statfs_async,
3665         .o_create               = osc_create,
3666         .o_destroy              = osc_destroy,
3667         .o_getattr              = osc_getattr,
3668         .o_setattr              = osc_setattr,
3669         .o_iocontrol            = osc_iocontrol,
3670         .o_set_info_async       = osc_set_info_async,
3671         .o_import_event         = osc_import_event,
3672         .o_quotactl             = osc_quotactl,
3673 };
3674
3675 LIST_HEAD(osc_shrink_list);
3676 DEFINE_SPINLOCK(osc_shrink_lock);
3677
3678 #ifdef HAVE_SHRINKER_COUNT
3679 static struct shrinker osc_cache_shrinker = {
3680         .count_objects  = osc_cache_shrink_count,
3681         .scan_objects   = osc_cache_shrink_scan,
3682         .seeks          = DEFAULT_SEEKS,
3683 };
3684 #else
3685 static int osc_cache_shrink(struct shrinker *shrinker,
3686                             struct shrink_control *sc)
3687 {
3688         (void)osc_cache_shrink_scan(shrinker, sc);
3689
3690         return osc_cache_shrink_count(shrinker, sc);
3691 }
3692
3693 static struct shrinker osc_cache_shrinker = {
3694         .shrink   = osc_cache_shrink,
3695         .seeks    = DEFAULT_SEEKS,
3696 };
3697 #endif
3698
3699 static int __init osc_init(void)
3700 {
3701         unsigned int reqpool_size;
3702         unsigned int reqsize;
3703         int rc;
3704         ENTRY;
3705
3706         /* print an address of _any_ initialized kernel symbol from this
3707          * module, to allow debugging with gdb that doesn't support data
3708          * symbols from modules.*/
3709         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3710
3711         rc = lu_kmem_init(osc_caches);
3712         if (rc)
3713                 RETURN(rc);
3714
3715         rc = class_register_type(&osc_obd_ops, NULL, true,
3716                                  LUSTRE_OSC_NAME, &osc_device_type);
3717         if (rc)
3718                 GOTO(out_kmem, rc);
3719
3720         rc = register_shrinker(&osc_cache_shrinker);
3721         if (rc)
3722                 GOTO(out_type, rc);
3723
3724         /* This is obviously too much memory, only prevent overflow here */
3725         if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3726                 GOTO(out_shrinker, rc = -EINVAL);
3727
3728         reqpool_size = osc_reqpool_mem_max << 20;
3729
3730         reqsize = 1;
3731         while (reqsize < OST_IO_MAXREQSIZE)
3732                 reqsize = reqsize << 1;
3733
3734         /*
3735          * We don't enlarge the request count in OSC pool according to
3736          * cl_max_rpcs_in_flight. The allocation from the pool will only be
3737          * tried after normal allocation failed. So a small OSC pool won't
3738          * cause much performance degression in most of cases.
3739          */
3740         osc_reqpool_maxreqcount = reqpool_size / reqsize;
3741
3742         atomic_set(&osc_pool_req_count, 0);
3743         osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3744                                           ptlrpc_add_rqs_to_pool);
3745
3746         if (osc_rq_pool == NULL)
3747                 GOTO(out_shrinker, rc = -ENOMEM);
3748
3749         rc = osc_start_grant_work();
3750         if (rc != 0)
3751                 GOTO(out_req_pool, rc);
3752
3753         RETURN(rc);
3754
3755 out_req_pool:
3756         ptlrpc_free_rq_pool(osc_rq_pool);
3757 out_shrinker:
3758         unregister_shrinker(&osc_cache_shrinker);
3759 out_type:
3760         class_unregister_type(LUSTRE_OSC_NAME);
3761 out_kmem:
3762         lu_kmem_fini(osc_caches);
3763
3764         RETURN(rc);
3765 }
3766
3767 static void __exit osc_exit(void)
3768 {
3769         osc_stop_grant_work();
3770         unregister_shrinker(&osc_cache_shrinker);
3771         class_unregister_type(LUSTRE_OSC_NAME);
3772         lu_kmem_fini(osc_caches);
3773         ptlrpc_free_rq_pool(osc_rq_pool);
3774 }
3775
3776 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3777 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3778 MODULE_VERSION(LUSTRE_VERSION_STRING);
3779 MODULE_LICENSE("GPL");
3780
3781 module_init(osc_init);
3782 module_exit(osc_exit);