Whamcloud - gitweb
7a3045b489f2f094a0711b3ade78e60cb2bcfeb4
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #define DEBUG_SUBSYSTEM S_OSC
33
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
44 #include <obd.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
49
50 #include "osc_internal.h"
51
52 atomic_t osc_pool_req_count;
53 unsigned int osc_reqpool_maxreqcount;
54 struct ptlrpc_request_pool *osc_rq_pool;
55
56 /* max memory used for request pool, unit is MB */
57 static unsigned int osc_reqpool_mem_max = 5;
58 module_param(osc_reqpool_mem_max, uint, 0444);
59
60 static int osc_idle_timeout = 20;
61 module_param(osc_idle_timeout, uint, 0644);
62
63 #define osc_grant_args osc_brw_async_args
64
65 struct osc_setattr_args {
66         struct obdo             *sa_oa;
67         obd_enqueue_update_f     sa_upcall;
68         void                    *sa_cookie;
69 };
70
71 struct osc_fsync_args {
72         struct osc_object       *fa_obj;
73         struct obdo             *fa_oa;
74         obd_enqueue_update_f    fa_upcall;
75         void                    *fa_cookie;
76 };
77
78 struct osc_ladvise_args {
79         struct obdo             *la_oa;
80         obd_enqueue_update_f     la_upcall;
81         void                    *la_cookie;
82 };
83
84 static void osc_release_ppga(struct brw_page **ppga, size_t count);
85 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
86                          void *data, int rc);
87
88 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
89 {
90         struct ost_body *body;
91
92         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
93         LASSERT(body);
94
95         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
96 }
97
98 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
99                        struct obdo *oa)
100 {
101         struct ptlrpc_request   *req;
102         struct ost_body         *body;
103         int                      rc;
104
105         ENTRY;
106         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
107         if (req == NULL)
108                 RETURN(-ENOMEM);
109
110         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
111         if (rc) {
112                 ptlrpc_request_free(req);
113                 RETURN(rc);
114         }
115
116         osc_pack_req_body(req, oa);
117
118         ptlrpc_request_set_replen(req);
119
120         rc = ptlrpc_queue_wait(req);
121         if (rc)
122                 GOTO(out, rc);
123
124         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
125         if (body == NULL)
126                 GOTO(out, rc = -EPROTO);
127
128         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
129         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
130
131         oa->o_blksize = cli_brw_size(exp->exp_obd);
132         oa->o_valid |= OBD_MD_FLBLKSZ;
133
134         EXIT;
135 out:
136         ptlrpc_req_finished(req);
137
138         return rc;
139 }
140
141 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
142                        struct obdo *oa)
143 {
144         struct ptlrpc_request   *req;
145         struct ost_body         *body;
146         int                      rc;
147
148         ENTRY;
149         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
150
151         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
152         if (req == NULL)
153                 RETURN(-ENOMEM);
154
155         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
156         if (rc) {
157                 ptlrpc_request_free(req);
158                 RETURN(rc);
159         }
160
161         osc_pack_req_body(req, oa);
162
163         ptlrpc_request_set_replen(req);
164
165         rc = ptlrpc_queue_wait(req);
166         if (rc)
167                 GOTO(out, rc);
168
169         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
170         if (body == NULL)
171                 GOTO(out, rc = -EPROTO);
172
173         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
174
175         EXIT;
176 out:
177         ptlrpc_req_finished(req);
178
179         RETURN(rc);
180 }
181
182 static int osc_setattr_interpret(const struct lu_env *env,
183                                  struct ptlrpc_request *req, void *args, int rc)
184 {
185         struct osc_setattr_args *sa = args;
186         struct ost_body *body;
187
188         ENTRY;
189
190         if (rc != 0)
191                 GOTO(out, rc);
192
193         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
194         if (body == NULL)
195                 GOTO(out, rc = -EPROTO);
196
197         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
198                              &body->oa);
199 out:
200         rc = sa->sa_upcall(sa->sa_cookie, rc);
201         RETURN(rc);
202 }
203
204 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
205                       obd_enqueue_update_f upcall, void *cookie,
206                       struct ptlrpc_request_set *rqset)
207 {
208         struct ptlrpc_request   *req;
209         struct osc_setattr_args *sa;
210         int                      rc;
211
212         ENTRY;
213
214         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
215         if (req == NULL)
216                 RETURN(-ENOMEM);
217
218         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
219         if (rc) {
220                 ptlrpc_request_free(req);
221                 RETURN(rc);
222         }
223
224         osc_pack_req_body(req, oa);
225
226         ptlrpc_request_set_replen(req);
227
228         /* do mds to ost setattr asynchronously */
229         if (!rqset) {
230                 /* Do not wait for response. */
231                 ptlrpcd_add_req(req);
232         } else {
233                 req->rq_interpret_reply = osc_setattr_interpret;
234
235                 sa = ptlrpc_req_async_args(sa, req);
236                 sa->sa_oa = oa;
237                 sa->sa_upcall = upcall;
238                 sa->sa_cookie = cookie;
239
240                 ptlrpc_set_add_req(rqset, req);
241         }
242
243         RETURN(0);
244 }
245
246 static int osc_ladvise_interpret(const struct lu_env *env,
247                                  struct ptlrpc_request *req,
248                                  void *arg, int rc)
249 {
250         struct osc_ladvise_args *la = arg;
251         struct ost_body *body;
252         ENTRY;
253
254         if (rc != 0)
255                 GOTO(out, rc);
256
257         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
258         if (body == NULL)
259                 GOTO(out, rc = -EPROTO);
260
261         *la->la_oa = body->oa;
262 out:
263         rc = la->la_upcall(la->la_cookie, rc);
264         RETURN(rc);
265 }
266
267 /**
268  * If rqset is NULL, do not wait for response. Upcall and cookie could also
269  * be NULL in this case
270  */
271 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
272                      struct ladvise_hdr *ladvise_hdr,
273                      obd_enqueue_update_f upcall, void *cookie,
274                      struct ptlrpc_request_set *rqset)
275 {
276         struct ptlrpc_request   *req;
277         struct ost_body         *body;
278         struct osc_ladvise_args *la;
279         int                      rc;
280         struct lu_ladvise       *req_ladvise;
281         struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
282         int                      num_advise = ladvise_hdr->lah_count;
283         struct ladvise_hdr      *req_ladvise_hdr;
284         ENTRY;
285
286         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
287         if (req == NULL)
288                 RETURN(-ENOMEM);
289
290         req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
291                              num_advise * sizeof(*ladvise));
292         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
293         if (rc != 0) {
294                 ptlrpc_request_free(req);
295                 RETURN(rc);
296         }
297         req->rq_request_portal = OST_IO_PORTAL;
298         ptlrpc_at_set_req_timeout(req);
299
300         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
301         LASSERT(body);
302         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
303                              oa);
304
305         req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
306                                                  &RMF_OST_LADVISE_HDR);
307         memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
308
309         req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
310         memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
311         ptlrpc_request_set_replen(req);
312
313         if (rqset == NULL) {
314                 /* Do not wait for response. */
315                 ptlrpcd_add_req(req);
316                 RETURN(0);
317         }
318
319         req->rq_interpret_reply = osc_ladvise_interpret;
320         la = ptlrpc_req_async_args(la, req);
321         la->la_oa = oa;
322         la->la_upcall = upcall;
323         la->la_cookie = cookie;
324
325         ptlrpc_set_add_req(rqset, req);
326
327         RETURN(0);
328 }
329
330 static int osc_create(const struct lu_env *env, struct obd_export *exp,
331                       struct obdo *oa)
332 {
333         struct ptlrpc_request *req;
334         struct ost_body       *body;
335         int                    rc;
336         ENTRY;
337
338         LASSERT(oa != NULL);
339         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
340         LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
341
342         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
343         if (req == NULL)
344                 GOTO(out, rc = -ENOMEM);
345
346         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
347         if (rc) {
348                 ptlrpc_request_free(req);
349                 GOTO(out, rc);
350         }
351
352         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
353         LASSERT(body);
354
355         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
356
357         ptlrpc_request_set_replen(req);
358
359         rc = ptlrpc_queue_wait(req);
360         if (rc)
361                 GOTO(out_req, rc);
362
363         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
364         if (body == NULL)
365                 GOTO(out_req, rc = -EPROTO);
366
367         CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
368         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
369
370         oa->o_blksize = cli_brw_size(exp->exp_obd);
371         oa->o_valid |= OBD_MD_FLBLKSZ;
372
373         CDEBUG(D_HA, "transno: %lld\n",
374                lustre_msg_get_transno(req->rq_repmsg));
375 out_req:
376         ptlrpc_req_finished(req);
377 out:
378         RETURN(rc);
379 }
380
381 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
382                    obd_enqueue_update_f upcall, void *cookie)
383 {
384         struct ptlrpc_request *req;
385         struct osc_setattr_args *sa;
386         struct obd_import *imp = class_exp2cliimp(exp);
387         struct ost_body *body;
388         int rc;
389
390         ENTRY;
391
392         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
393         if (req == NULL)
394                 RETURN(-ENOMEM);
395
396         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
397         if (rc < 0) {
398                 ptlrpc_request_free(req);
399                 RETURN(rc);
400         }
401
402         osc_set_io_portal(req);
403
404         ptlrpc_at_set_req_timeout(req);
405
406         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
407
408         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
409
410         ptlrpc_request_set_replen(req);
411
412         req->rq_interpret_reply = osc_setattr_interpret;
413         sa = ptlrpc_req_async_args(sa, req);
414         sa->sa_oa = oa;
415         sa->sa_upcall = upcall;
416         sa->sa_cookie = cookie;
417
418         ptlrpcd_add_req(req);
419
420         RETURN(0);
421 }
422 EXPORT_SYMBOL(osc_punch_send);
423
424 /**
425  * osc_fallocate_base() - Handles fallocate request.
426  *
427  * @exp:        Export structure
428  * @oa:         Attributes passed to OSS from client (obdo structure)
429  * @upcall:     Primary & supplementary group information
430  * @cookie:     Exclusive identifier
431  * @rqset:      Request list.
432  * @mode:       Operation done on given range.
433  *
434  * osc_fallocate_base() - Handles fallocate requests only. Only block
435  * allocation or standard preallocate operation is supported currently.
436  * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
437  * is supported via SETATTR request.
438  *
439  * Return: Non-zero on failure and O on success.
440  */
441 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
442                        obd_enqueue_update_f upcall, void *cookie, int mode)
443 {
444         struct ptlrpc_request *req;
445         struct osc_setattr_args *sa;
446         struct ost_body *body;
447         struct obd_import *imp = class_exp2cliimp(exp);
448         int rc;
449         ENTRY;
450
451         oa->o_falloc_mode = mode;
452         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
453                                    &RQF_OST_FALLOCATE);
454         if (req == NULL)
455                 RETURN(-ENOMEM);
456
457         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
458         if (rc != 0) {
459                 ptlrpc_request_free(req);
460                 RETURN(rc);
461         }
462
463         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
464         LASSERT(body);
465
466         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
467
468         ptlrpc_request_set_replen(req);
469
470         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
471         BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
472         sa = ptlrpc_req_async_args(sa, req);
473         sa->sa_oa = oa;
474         sa->sa_upcall = upcall;
475         sa->sa_cookie = cookie;
476
477         ptlrpcd_add_req(req);
478
479         RETURN(0);
480 }
481
482 static int osc_sync_interpret(const struct lu_env *env,
483                               struct ptlrpc_request *req, void *args, int rc)
484 {
485         struct osc_fsync_args *fa = args;
486         struct ost_body *body;
487         struct cl_attr *attr = &osc_env_info(env)->oti_attr;
488         unsigned long valid = 0;
489         struct cl_object *obj;
490         ENTRY;
491
492         if (rc != 0)
493                 GOTO(out, rc);
494
495         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
496         if (body == NULL) {
497                 CERROR("can't unpack ost_body\n");
498                 GOTO(out, rc = -EPROTO);
499         }
500
501         *fa->fa_oa = body->oa;
502         obj = osc2cl(fa->fa_obj);
503
504         /* Update osc object's blocks attribute */
505         cl_object_attr_lock(obj);
506         if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
507                 attr->cat_blocks = body->oa.o_blocks;
508                 valid |= CAT_BLOCKS;
509         }
510
511         if (valid != 0)
512                 cl_object_attr_update(env, obj, attr, valid);
513         cl_object_attr_unlock(obj);
514
515 out:
516         rc = fa->fa_upcall(fa->fa_cookie, rc);
517         RETURN(rc);
518 }
519
520 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
521                   obd_enqueue_update_f upcall, void *cookie,
522                   struct ptlrpc_request_set *rqset)
523 {
524         struct obd_export     *exp = osc_export(obj);
525         struct ptlrpc_request *req;
526         struct ost_body       *body;
527         struct osc_fsync_args *fa;
528         int                    rc;
529         ENTRY;
530
531         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
532         if (req == NULL)
533                 RETURN(-ENOMEM);
534
535         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
536         if (rc) {
537                 ptlrpc_request_free(req);
538                 RETURN(rc);
539         }
540
541         /* overload the size and blocks fields in the oa with start/end */
542         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
543         LASSERT(body);
544         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
545
546         ptlrpc_request_set_replen(req);
547         req->rq_interpret_reply = osc_sync_interpret;
548
549         fa = ptlrpc_req_async_args(fa, req);
550         fa->fa_obj = obj;
551         fa->fa_oa = oa;
552         fa->fa_upcall = upcall;
553         fa->fa_cookie = cookie;
554
555         ptlrpc_set_add_req(rqset, req);
556
557         RETURN (0);
558 }
559
560 /* Find and cancel locally locks matched by @mode in the resource found by
561  * @objid. Found locks are added into @cancel list. Returns the amount of
562  * locks added to @cancels list. */
563 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
564                                    struct list_head *cancels,
565                                    enum ldlm_mode mode, __u64 lock_flags)
566 {
567         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
568         struct ldlm_res_id res_id;
569         struct ldlm_resource *res;
570         int count;
571         ENTRY;
572
573         /* Return, i.e. cancel nothing, only if ELC is supported (flag in
574          * export) but disabled through procfs (flag in NS).
575          *
576          * This distinguishes from a case when ELC is not supported originally,
577          * when we still want to cancel locks in advance and just cancel them
578          * locally, without sending any RPC. */
579         if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
580                 RETURN(0);
581
582         ostid_build_res_name(&oa->o_oi, &res_id);
583         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
584         if (IS_ERR(res))
585                 RETURN(0);
586
587         LDLM_RESOURCE_ADDREF(res);
588         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
589                                            lock_flags, 0, NULL);
590         LDLM_RESOURCE_DELREF(res);
591         ldlm_resource_putref(res);
592         RETURN(count);
593 }
594
595 static int osc_destroy_interpret(const struct lu_env *env,
596                                  struct ptlrpc_request *req, void *args, int rc)
597 {
598         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
599
600         atomic_dec(&cli->cl_destroy_in_flight);
601         wake_up(&cli->cl_destroy_waitq);
602
603         return 0;
604 }
605
606 static int osc_can_send_destroy(struct client_obd *cli)
607 {
608         if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
609             cli->cl_max_rpcs_in_flight) {
610                 /* The destroy request can be sent */
611                 return 1;
612         }
613         if (atomic_dec_return(&cli->cl_destroy_in_flight) <
614             cli->cl_max_rpcs_in_flight) {
615                 /*
616                  * The counter has been modified between the two atomic
617                  * operations.
618                  */
619                 wake_up(&cli->cl_destroy_waitq);
620         }
621         return 0;
622 }
623
624 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
625                        struct obdo *oa)
626 {
627         struct client_obd     *cli = &exp->exp_obd->u.cli;
628         struct ptlrpc_request *req;
629         struct ost_body       *body;
630         LIST_HEAD(cancels);
631         int rc, count;
632         ENTRY;
633
634         if (!oa) {
635                 CDEBUG(D_INFO, "oa NULL\n");
636                 RETURN(-EINVAL);
637         }
638
639         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
640                                         LDLM_FL_DISCARD_DATA);
641
642         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
643         if (req == NULL) {
644                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
645                 RETURN(-ENOMEM);
646         }
647
648         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
649                                0, &cancels, count);
650         if (rc) {
651                 ptlrpc_request_free(req);
652                 RETURN(rc);
653         }
654
655         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
656         ptlrpc_at_set_req_timeout(req);
657
658         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
659         LASSERT(body);
660         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
661
662         ptlrpc_request_set_replen(req);
663
664         req->rq_interpret_reply = osc_destroy_interpret;
665         if (!osc_can_send_destroy(cli)) {
666                 /*
667                  * Wait until the number of on-going destroy RPCs drops
668                  * under max_rpc_in_flight
669                  */
670                 rc = l_wait_event_abortable_exclusive(
671                         cli->cl_destroy_waitq,
672                         osc_can_send_destroy(cli));
673                 if (rc) {
674                         ptlrpc_req_finished(req);
675                         RETURN(-EINTR);
676                 }
677         }
678
679         /* Do not wait for response */
680         ptlrpcd_add_req(req);
681         RETURN(0);
682 }
683
684 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
685                                 long writing_bytes)
686 {
687         u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
688
689         LASSERT(!(oa->o_valid & bits));
690
691         oa->o_valid |= bits;
692         spin_lock(&cli->cl_loi_list_lock);
693         if (cli->cl_ocd_grant_param)
694                 oa->o_dirty = cli->cl_dirty_grant;
695         else
696                 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
697         if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
698                 CERROR("dirty %lu > dirty_max %lu\n",
699                        cli->cl_dirty_pages,
700                        cli->cl_dirty_max_pages);
701                 oa->o_undirty = 0;
702         } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
703                             (long)(obd_max_dirty_pages + 1))) {
704                 /* The atomic_read() allowing the atomic_inc() are
705                  * not covered by a lock thus they may safely race and trip
706                  * this CERROR() unless we add in a small fudge factor (+1). */
707                 CERROR("%s: dirty %ld > system dirty_max %ld\n",
708                        cli_name(cli), atomic_long_read(&obd_dirty_pages),
709                        obd_max_dirty_pages);
710                 oa->o_undirty = 0;
711         } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
712                             0x7fffffff)) {
713                 CERROR("dirty %lu - dirty_max %lu too big???\n",
714                        cli->cl_dirty_pages, cli->cl_dirty_max_pages);
715                 oa->o_undirty = 0;
716         } else {
717                 unsigned long nrpages;
718                 unsigned long undirty;
719
720                 nrpages = cli->cl_max_pages_per_rpc;
721                 nrpages *= cli->cl_max_rpcs_in_flight + 1;
722                 nrpages = max(nrpages, cli->cl_dirty_max_pages);
723                 undirty = nrpages << PAGE_SHIFT;
724                 if (cli->cl_ocd_grant_param) {
725                         int nrextents;
726
727                         /* take extent tax into account when asking for more
728                          * grant space */
729                         nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
730                                      cli->cl_max_extent_pages;
731                         undirty += nrextents * cli->cl_grant_extent_tax;
732                 }
733                 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
734                  * to add extent tax, etc.
735                  */
736                 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
737                                     ~(PTLRPC_MAX_BRW_SIZE * 4UL));
738         }
739         oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
740         /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
741         if (cli->cl_lost_grant > INT_MAX) {
742                 CDEBUG(D_CACHE,
743                       "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
744                       cli_name(cli), cli->cl_lost_grant);
745                 oa->o_dropped = INT_MAX;
746         } else {
747                 oa->o_dropped = cli->cl_lost_grant;
748         }
749         cli->cl_lost_grant -= oa->o_dropped;
750         spin_unlock(&cli->cl_loi_list_lock);
751         CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
752                " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
753                oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
754 }
755
756 void osc_update_next_shrink(struct client_obd *cli)
757 {
758         cli->cl_next_shrink_grant = ktime_get_seconds() +
759                                     cli->cl_grant_shrink_interval;
760
761         CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
762                cli->cl_next_shrink_grant);
763 }
764
765 static void __osc_update_grant(struct client_obd *cli, u64 grant)
766 {
767         spin_lock(&cli->cl_loi_list_lock);
768         cli->cl_avail_grant += grant;
769         spin_unlock(&cli->cl_loi_list_lock);
770 }
771
772 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
773 {
774         if (body->oa.o_valid & OBD_MD_FLGRANT) {
775                 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
776                 __osc_update_grant(cli, body->oa.o_grant);
777         }
778 }
779
780 /**
781  * grant thread data for shrinking space.
782  */
783 struct grant_thread_data {
784         struct list_head        gtd_clients;
785         struct mutex            gtd_mutex;
786         unsigned long           gtd_stopped:1;
787 };
788 static struct grant_thread_data client_gtd;
789
790 static int osc_shrink_grant_interpret(const struct lu_env *env,
791                                       struct ptlrpc_request *req,
792                                       void *args, int rc)
793 {
794         struct osc_grant_args *aa = args;
795         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
796         struct ost_body *body;
797
798         if (rc != 0) {
799                 __osc_update_grant(cli, aa->aa_oa->o_grant);
800                 GOTO(out, rc);
801         }
802
803         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
804         LASSERT(body);
805         osc_update_grant(cli, body);
806 out:
807         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
808         aa->aa_oa = NULL;
809
810         return rc;
811 }
812
813 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
814 {
815         spin_lock(&cli->cl_loi_list_lock);
816         oa->o_grant = cli->cl_avail_grant / 4;
817         cli->cl_avail_grant -= oa->o_grant;
818         spin_unlock(&cli->cl_loi_list_lock);
819         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
820                 oa->o_valid |= OBD_MD_FLFLAGS;
821                 oa->o_flags = 0;
822         }
823         oa->o_flags |= OBD_FL_SHRINK_GRANT;
824         osc_update_next_shrink(cli);
825 }
826
827 /* Shrink the current grant, either from some large amount to enough for a
828  * full set of in-flight RPCs, or if we have already shrunk to that limit
829  * then to enough for a single RPC.  This avoids keeping more grant than
830  * needed, and avoids shrinking the grant piecemeal. */
831 static int osc_shrink_grant(struct client_obd *cli)
832 {
833         __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
834                              (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
835
836         spin_lock(&cli->cl_loi_list_lock);
837         if (cli->cl_avail_grant <= target_bytes)
838                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
839         spin_unlock(&cli->cl_loi_list_lock);
840
841         return osc_shrink_grant_to_target(cli, target_bytes);
842 }
843
844 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
845 {
846         int                     rc = 0;
847         struct ost_body        *body;
848         ENTRY;
849
850         spin_lock(&cli->cl_loi_list_lock);
851         /* Don't shrink if we are already above or below the desired limit
852          * We don't want to shrink below a single RPC, as that will negatively
853          * impact block allocation and long-term performance. */
854         if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
855                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
856
857         if (target_bytes >= cli->cl_avail_grant) {
858                 spin_unlock(&cli->cl_loi_list_lock);
859                 RETURN(0);
860         }
861         spin_unlock(&cli->cl_loi_list_lock);
862
863         OBD_ALLOC_PTR(body);
864         if (!body)
865                 RETURN(-ENOMEM);
866
867         osc_announce_cached(cli, &body->oa, 0);
868
869         spin_lock(&cli->cl_loi_list_lock);
870         if (target_bytes >= cli->cl_avail_grant) {
871                 /* available grant has changed since target calculation */
872                 spin_unlock(&cli->cl_loi_list_lock);
873                 GOTO(out_free, rc = 0);
874         }
875         body->oa.o_grant = cli->cl_avail_grant - target_bytes;
876         cli->cl_avail_grant = target_bytes;
877         spin_unlock(&cli->cl_loi_list_lock);
878         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
879                 body->oa.o_valid |= OBD_MD_FLFLAGS;
880                 body->oa.o_flags = 0;
881         }
882         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
883         osc_update_next_shrink(cli);
884
885         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
886                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
887                                 sizeof(*body), body, NULL);
888         if (rc != 0)
889                 __osc_update_grant(cli, body->oa.o_grant);
890 out_free:
891         OBD_FREE_PTR(body);
892         RETURN(rc);
893 }
894
895 static int osc_should_shrink_grant(struct client_obd *client)
896 {
897         time64_t next_shrink = client->cl_next_shrink_grant;
898
899         if (client->cl_import == NULL)
900                 return 0;
901
902         if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
903             client->cl_import->imp_grant_shrink_disabled) {
904                 osc_update_next_shrink(client);
905                 return 0;
906         }
907
908         if (ktime_get_seconds() >= next_shrink - 5) {
909                 /* Get the current RPC size directly, instead of going via:
910                  * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
911                  * Keep comment here so that it can be found by searching. */
912                 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
913
914                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
915                     client->cl_avail_grant > brw_size)
916                         return 1;
917                 else
918                         osc_update_next_shrink(client);
919         }
920         return 0;
921 }
922
923 #define GRANT_SHRINK_RPC_BATCH  100
924
925 static struct delayed_work work;
926
927 static void osc_grant_work_handler(struct work_struct *data)
928 {
929         struct client_obd *cli;
930         int rpc_sent;
931         bool init_next_shrink = true;
932         time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
933
934         rpc_sent = 0;
935         mutex_lock(&client_gtd.gtd_mutex);
936         list_for_each_entry(cli, &client_gtd.gtd_clients,
937                             cl_grant_chain) {
938                 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
939                     osc_should_shrink_grant(cli)) {
940                         osc_shrink_grant(cli);
941                         rpc_sent++;
942                 }
943
944                 if (!init_next_shrink) {
945                         if (cli->cl_next_shrink_grant < next_shrink &&
946                             cli->cl_next_shrink_grant > ktime_get_seconds())
947                                 next_shrink = cli->cl_next_shrink_grant;
948                 } else {
949                         init_next_shrink = false;
950                         next_shrink = cli->cl_next_shrink_grant;
951                 }
952         }
953         mutex_unlock(&client_gtd.gtd_mutex);
954
955         if (client_gtd.gtd_stopped == 1)
956                 return;
957
958         if (next_shrink > ktime_get_seconds()) {
959                 time64_t delay = next_shrink - ktime_get_seconds();
960
961                 schedule_delayed_work(&work, cfs_time_seconds(delay));
962         } else {
963                 schedule_work(&work.work);
964         }
965 }
966
967 void osc_schedule_grant_work(void)
968 {
969         cancel_delayed_work_sync(&work);
970         schedule_work(&work.work);
971 }
972
973 /**
974  * Start grant thread for returing grant to server for idle clients.
975  */
976 static int osc_start_grant_work(void)
977 {
978         client_gtd.gtd_stopped = 0;
979         mutex_init(&client_gtd.gtd_mutex);
980         INIT_LIST_HEAD(&client_gtd.gtd_clients);
981
982         INIT_DELAYED_WORK(&work, osc_grant_work_handler);
983         schedule_work(&work.work);
984
985         return 0;
986 }
987
988 static void osc_stop_grant_work(void)
989 {
990         client_gtd.gtd_stopped = 1;
991         cancel_delayed_work_sync(&work);
992 }
993
994 static void osc_add_grant_list(struct client_obd *client)
995 {
996         mutex_lock(&client_gtd.gtd_mutex);
997         list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
998         mutex_unlock(&client_gtd.gtd_mutex);
999 }
1000
1001 static void osc_del_grant_list(struct client_obd *client)
1002 {
1003         if (list_empty(&client->cl_grant_chain))
1004                 return;
1005
1006         mutex_lock(&client_gtd.gtd_mutex);
1007         list_del_init(&client->cl_grant_chain);
1008         mutex_unlock(&client_gtd.gtd_mutex);
1009 }
1010
1011 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1012 {
1013         /*
1014          * ocd_grant is the total grant amount we're expect to hold: if we've
1015          * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1016          * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1017          * dirty.
1018          *
1019          * race is tolerable here: if we're evicted, but imp_state already
1020          * left EVICTED state, then cl_dirty_pages must be 0 already.
1021          */
1022         spin_lock(&cli->cl_loi_list_lock);
1023         cli->cl_avail_grant = ocd->ocd_grant;
1024         if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1025                 unsigned long consumed = cli->cl_reserved_grant;
1026
1027                 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1028                         consumed += cli->cl_dirty_grant;
1029                 else
1030                         consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1031                 if (cli->cl_avail_grant < consumed) {
1032                         CERROR("%s: granted %ld but already consumed %ld\n",
1033                                cli_name(cli), cli->cl_avail_grant, consumed);
1034                         cli->cl_avail_grant = 0;
1035                 } else {
1036                         cli->cl_avail_grant -= consumed;
1037                 }
1038         }
1039
1040         if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1041                 u64 size;
1042                 int chunk_mask;
1043
1044                 /* overhead for each extent insertion */
1045                 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1046                 /* determine the appropriate chunk size used by osc_extent. */
1047                 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1048                                           ocd->ocd_grant_blkbits);
1049                 /* max_pages_per_rpc must be chunk aligned */
1050                 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1051                 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1052                                              ~chunk_mask) & chunk_mask;
1053                 /* determine maximum extent size, in #pages */
1054                 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1055                 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1056                 cli->cl_ocd_grant_param = 1;
1057         } else {
1058                 cli->cl_ocd_grant_param = 0;
1059                 cli->cl_grant_extent_tax = 0;
1060                 cli->cl_chunkbits = PAGE_SHIFT;
1061                 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1062         }
1063         spin_unlock(&cli->cl_loi_list_lock);
1064
1065         CDEBUG(D_CACHE,
1066                "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1067                cli_name(cli),
1068                cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1069                cli->cl_max_extent_pages);
1070
1071         if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1072                 osc_add_grant_list(cli);
1073 }
1074 EXPORT_SYMBOL(osc_init_grant);
1075
1076 /* We assume that the reason this OSC got a short read is because it read
1077  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1078  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1079  * this stripe never got written at or beyond this stripe offset yet. */
1080 static void handle_short_read(int nob_read, size_t page_count,
1081                               struct brw_page **pga)
1082 {
1083         char *ptr;
1084         int i = 0;
1085
1086         /* skip bytes read OK */
1087         while (nob_read > 0) {
1088                 LASSERT (page_count > 0);
1089
1090                 if (pga[i]->count > nob_read) {
1091                         /* EOF inside this page */
1092                         ptr = kmap(pga[i]->pg) +
1093                                 (pga[i]->off & ~PAGE_MASK);
1094                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1095                         kunmap(pga[i]->pg);
1096                         page_count--;
1097                         i++;
1098                         break;
1099                 }
1100
1101                 nob_read -= pga[i]->count;
1102                 page_count--;
1103                 i++;
1104         }
1105
1106         /* zero remaining pages */
1107         while (page_count-- > 0) {
1108                 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1109                 memset(ptr, 0, pga[i]->count);
1110                 kunmap(pga[i]->pg);
1111                 i++;
1112         }
1113 }
1114
1115 static int check_write_rcs(struct ptlrpc_request *req,
1116                            int requested_nob, int niocount,
1117                            size_t page_count, struct brw_page **pga)
1118 {
1119         int     i;
1120         __u32   *remote_rcs;
1121
1122         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1123                                                   sizeof(*remote_rcs) *
1124                                                   niocount);
1125         if (remote_rcs == NULL) {
1126                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1127                 return(-EPROTO);
1128         }
1129
1130         /* return error if any niobuf was in error */
1131         for (i = 0; i < niocount; i++) {
1132                 if ((int)remote_rcs[i] < 0) {
1133                         CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1134                                i, remote_rcs[i], req);
1135                         return remote_rcs[i];
1136                 }
1137
1138                 if (remote_rcs[i] != 0) {
1139                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1140                                 i, remote_rcs[i], req);
1141                         return(-EPROTO);
1142                 }
1143         }
1144         if (req->rq_bulk != NULL &&
1145             req->rq_bulk->bd_nob_transferred != requested_nob) {
1146                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1147                        req->rq_bulk->bd_nob_transferred, requested_nob);
1148                 return(-EPROTO);
1149         }
1150
1151         return (0);
1152 }
1153
1154 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1155 {
1156         if (p1->flag != p2->flag) {
1157                 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1158                                   OBD_BRW_SYNC       | OBD_BRW_ASYNC   |
1159                                   OBD_BRW_NOQUOTA    | OBD_BRW_SOFT_SYNC);
1160
1161                 /* warn if we try to combine flags that we don't know to be
1162                  * safe to combine */
1163                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1164                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1165                               "report this at https://jira.whamcloud.com/\n",
1166                               p1->flag, p2->flag);
1167                 }
1168                 return 0;
1169         }
1170
1171         return (p1->off + p1->count == p2->off);
1172 }
1173
1174 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1175 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1176                                    size_t pg_count, struct brw_page **pga,
1177                                    int opc, obd_dif_csum_fn *fn,
1178                                    int sector_size,
1179                                    u32 *check_sum)
1180 {
1181         struct ahash_request *req;
1182         /* Used Adler as the default checksum type on top of DIF tags */
1183         unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1184         struct page *__page;
1185         unsigned char *buffer;
1186         __u16 *guard_start;
1187         unsigned int bufsize;
1188         int guard_number;
1189         int used_number = 0;
1190         int used;
1191         u32 cksum;
1192         int rc = 0;
1193         int i = 0;
1194
1195         LASSERT(pg_count > 0);
1196
1197         __page = alloc_page(GFP_KERNEL);
1198         if (__page == NULL)
1199                 return -ENOMEM;
1200
1201         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1202         if (IS_ERR(req)) {
1203                 rc = PTR_ERR(req);
1204                 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1205                        obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1206                 GOTO(out, rc);
1207         }
1208
1209         buffer = kmap(__page);
1210         guard_start = (__u16 *)buffer;
1211         guard_number = PAGE_SIZE / sizeof(*guard_start);
1212         while (nob > 0 && pg_count > 0) {
1213                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1214
1215                 /* corrupt the data before we compute the checksum, to
1216                  * simulate an OST->client data error */
1217                 if (unlikely(i == 0 && opc == OST_READ &&
1218                              OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1219                         unsigned char *ptr = kmap(pga[i]->pg);
1220                         int off = pga[i]->off & ~PAGE_MASK;
1221
1222                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1223                         kunmap(pga[i]->pg);
1224                 }
1225
1226                 /*
1227                  * The left guard number should be able to hold checksums of a
1228                  * whole page
1229                  */
1230                 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1231                                                   pga[i]->off & ~PAGE_MASK,
1232                                                   count,
1233                                                   guard_start + used_number,
1234                                                   guard_number - used_number,
1235                                                   &used, sector_size,
1236                                                   fn);
1237                 if (rc)
1238                         break;
1239
1240                 used_number += used;
1241                 if (used_number == guard_number) {
1242                         cfs_crypto_hash_update_page(req, __page, 0,
1243                                 used_number * sizeof(*guard_start));
1244                         used_number = 0;
1245                 }
1246
1247                 nob -= pga[i]->count;
1248                 pg_count--;
1249                 i++;
1250         }
1251         kunmap(__page);
1252         if (rc)
1253                 GOTO(out, rc);
1254
1255         if (used_number != 0)
1256                 cfs_crypto_hash_update_page(req, __page, 0,
1257                         used_number * sizeof(*guard_start));
1258
1259         bufsize = sizeof(cksum);
1260         cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1261
1262         /* For sending we only compute the wrong checksum instead
1263          * of corrupting the data so it is still correct on a redo */
1264         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1265                 cksum++;
1266
1267         *check_sum = cksum;
1268 out:
1269         __free_page(__page);
1270         return rc;
1271 }
1272 #else /* !CONFIG_CRC_T10DIF */
1273 #define obd_dif_ip_fn NULL
1274 #define obd_dif_crc_fn NULL
1275 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum)  \
1276         -EOPNOTSUPP
1277 #endif /* CONFIG_CRC_T10DIF */
1278
1279 static int osc_checksum_bulk(int nob, size_t pg_count,
1280                              struct brw_page **pga, int opc,
1281                              enum cksum_types cksum_type,
1282                              u32 *cksum)
1283 {
1284         int                             i = 0;
1285         struct ahash_request           *req;
1286         unsigned int                    bufsize;
1287         unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
1288
1289         LASSERT(pg_count > 0);
1290
1291         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1292         if (IS_ERR(req)) {
1293                 CERROR("Unable to initialize checksum hash %s\n",
1294                        cfs_crypto_hash_name(cfs_alg));
1295                 return PTR_ERR(req);
1296         }
1297
1298         while (nob > 0 && pg_count > 0) {
1299                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1300
1301                 /* corrupt the data before we compute the checksum, to
1302                  * simulate an OST->client data error */
1303                 if (i == 0 && opc == OST_READ &&
1304                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1305                         unsigned char *ptr = kmap(pga[i]->pg);
1306                         int off = pga[i]->off & ~PAGE_MASK;
1307
1308                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1309                         kunmap(pga[i]->pg);
1310                 }
1311                 cfs_crypto_hash_update_page(req, pga[i]->pg,
1312                                             pga[i]->off & ~PAGE_MASK,
1313                                             count);
1314                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1315                                (int)(pga[i]->off & ~PAGE_MASK));
1316
1317                 nob -= pga[i]->count;
1318                 pg_count--;
1319                 i++;
1320         }
1321
1322         bufsize = sizeof(*cksum);
1323         cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1324
1325         /* For sending we only compute the wrong checksum instead
1326          * of corrupting the data so it is still correct on a redo */
1327         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1328                 (*cksum)++;
1329
1330         return 0;
1331 }
1332
1333 static int osc_checksum_bulk_rw(const char *obd_name,
1334                                 enum cksum_types cksum_type,
1335                                 int nob, size_t pg_count,
1336                                 struct brw_page **pga, int opc,
1337                                 u32 *check_sum)
1338 {
1339         obd_dif_csum_fn *fn = NULL;
1340         int sector_size = 0;
1341         int rc;
1342
1343         ENTRY;
1344         obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
1345
1346         if (fn)
1347                 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1348                                              opc, fn, sector_size, check_sum);
1349         else
1350                 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1351                                        check_sum);
1352
1353         RETURN(rc);
1354 }
1355
1356 static inline void osc_release_bounce_pages(struct brw_page **pga,
1357                                             u32 page_count)
1358 {
1359 #ifdef HAVE_LUSTRE_CRYPTO
1360         int i;
1361
1362         for (i = 0; i < page_count; i++) {
1363                 /* Bounce pages allocated by a call to
1364                  * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1365                  * are identified thanks to the PageChecked flag.
1366                  */
1367                 if (PageChecked(pga[i]->pg))
1368                         llcrypt_finalize_bounce_page(&pga[i]->pg);
1369                 pga[i]->count -= pga[i]->bp_count_diff;
1370                 pga[i]->off += pga[i]->bp_off_diff;
1371         }
1372 #endif
1373 }
1374
1375 static int
1376 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1377                      u32 page_count, struct brw_page **pga,
1378                      struct ptlrpc_request **reqp, int resend)
1379 {
1380         struct ptlrpc_request *req;
1381         struct ptlrpc_bulk_desc *desc;
1382         struct ost_body *body;
1383         struct obd_ioobj *ioobj;
1384         struct niobuf_remote *niobuf;
1385         int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1386         struct osc_brw_async_args *aa;
1387         struct req_capsule *pill;
1388         struct brw_page *pg_prev;
1389         void *short_io_buf;
1390         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1391         struct inode *inode;
1392         bool directio = false;
1393
1394         ENTRY;
1395         inode = page2inode(pga[0]->pg);
1396         if (inode == NULL) {
1397                 /* Try to get reference to inode from cl_page if we are
1398                  * dealing with direct IO, as handled pages are not
1399                  * actual page cache pages.
1400                  */
1401                 struct osc_async_page *oap = brw_page2oap(pga[0]);
1402                 struct cl_page *clpage = oap2cl_page(oap);
1403
1404                 inode = clpage->cp_inode;
1405                 if (inode)
1406                         directio = true;
1407         }
1408         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1409                 RETURN(-ENOMEM); /* Recoverable */
1410         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1411                 RETURN(-EINVAL); /* Fatal */
1412
1413         if ((cmd & OBD_BRW_WRITE) != 0) {
1414                 opc = OST_WRITE;
1415                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1416                                                 osc_rq_pool,
1417                                                 &RQF_OST_BRW_WRITE);
1418         } else {
1419                 opc = OST_READ;
1420                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1421         }
1422         if (req == NULL)
1423                 RETURN(-ENOMEM);
1424
1425         if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1426                 for (i = 0; i < page_count; i++) {
1427                         struct brw_page *pg = pga[i];
1428                         struct page *data_page = NULL;
1429                         bool retried = false;
1430                         bool lockedbymyself;
1431                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1432                         struct address_space *map_orig = NULL;
1433                         pgoff_t index_orig;
1434
1435 retry_encrypt:
1436                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1437                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1438                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1439                         /* The page can already be locked when we arrive here.
1440                          * This is possible when cl_page_assume/vvp_page_assume
1441                          * is stuck on wait_on_page_writeback with page lock
1442                          * held. In this case there is no risk for the lock to
1443                          * be released while we are doing our encryption
1444                          * processing, because writeback against that page will
1445                          * end in vvp_page_completion_write/cl_page_completion,
1446                          * which means only once the page is fully processed.
1447                          */
1448                         lockedbymyself = trylock_page(pg->pg);
1449                         if (directio) {
1450                                 map_orig = pg->pg->mapping;
1451                                 pg->pg->mapping = inode->i_mapping;
1452                                 index_orig = pg->pg->index;
1453                                 pg->pg->index = pg->off >> PAGE_SHIFT;
1454                         }
1455                         data_page =
1456                                 llcrypt_encrypt_pagecache_blocks(pg->pg,
1457                                                                  nunits, 0,
1458                                                                  GFP_NOFS);
1459                         if (directio) {
1460                                 pg->pg->mapping = map_orig;
1461                                 pg->pg->index = index_orig;
1462                         }
1463                         if (lockedbymyself)
1464                                 unlock_page(pg->pg);
1465                         if (IS_ERR(data_page)) {
1466                                 rc = PTR_ERR(data_page);
1467                                 if (rc == -ENOMEM && !retried) {
1468                                         retried = true;
1469                                         rc = 0;
1470                                         goto retry_encrypt;
1471                                 }
1472                                 ptlrpc_request_free(req);
1473                                 RETURN(rc);
1474                         }
1475                         /* Set PageChecked flag on bounce page for
1476                          * disambiguation in osc_release_bounce_pages().
1477                          */
1478                         SetPageChecked(data_page);
1479                         pg->pg = data_page;
1480                         /* there should be no gap in the middle of page array */
1481                         if (i == page_count - 1) {
1482                                 struct osc_async_page *oap = brw_page2oap(pg);
1483
1484                                 oa->o_size = oap->oap_count +
1485                                         oap->oap_obj_off + oap->oap_page_off;
1486                         }
1487                         /* len is forced to nunits, and relative offset to 0
1488                          * so store the old, clear text info
1489                          */
1490                         pg->bp_count_diff = nunits - pg->count;
1491                         pg->count = nunits;
1492                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1493                         pg->off = pg->off & PAGE_MASK;
1494                 }
1495         } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
1496                 for (i = 0; i < page_count; i++) {
1497                         struct brw_page *pg = pga[i];
1498                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1499
1500                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1501                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1502                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1503                         /* count/off are forced to cover the whole encryption
1504                          * unit size so that all encrypted data is stored on the
1505                          * OST, so adjust bp_{count,off}_diff for the size of
1506                          * the clear text.
1507                          */
1508                         pg->bp_count_diff = nunits - pg->count;
1509                         pg->count = nunits;
1510                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1511                         pg->off = pg->off & PAGE_MASK;
1512                 }
1513         }
1514
1515         for (niocount = i = 1; i < page_count; i++) {
1516                 if (!can_merge_pages(pga[i - 1], pga[i]))
1517                         niocount++;
1518         }
1519
1520         pill = &req->rq_pill;
1521         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1522                              sizeof(*ioobj));
1523         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1524                              niocount * sizeof(*niobuf));
1525
1526         for (i = 0; i < page_count; i++) {
1527                 short_io_size += pga[i]->count;
1528                 if (!inode || !IS_ENCRYPTED(inode)) {
1529                         pga[i]->bp_count_diff = 0;
1530                         pga[i]->bp_off_diff = 0;
1531                 }
1532         }
1533
1534         /* Check if read/write is small enough to be a short io. */
1535         if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1536             !imp_connect_shortio(cli->cl_import))
1537                 short_io_size = 0;
1538
1539         req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1540                              opc == OST_READ ? 0 : short_io_size);
1541         if (opc == OST_READ)
1542                 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1543                                      short_io_size);
1544
1545         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1546         if (rc) {
1547                 ptlrpc_request_free(req);
1548                 RETURN(rc);
1549         }
1550         osc_set_io_portal(req);
1551
1552         ptlrpc_at_set_req_timeout(req);
1553         /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1554          * retry logic */
1555         req->rq_no_retry_einprogress = 1;
1556
1557         if (short_io_size != 0) {
1558                 desc = NULL;
1559                 short_io_buf = NULL;
1560                 goto no_bulk;
1561         }
1562
1563         desc = ptlrpc_prep_bulk_imp(req, page_count,
1564                 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1565                 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1566                         PTLRPC_BULK_PUT_SINK),
1567                 OST_BULK_PORTAL,
1568                 &ptlrpc_bulk_kiov_pin_ops);
1569
1570         if (desc == NULL)
1571                 GOTO(out, rc = -ENOMEM);
1572         /* NB request now owns desc and will free it when it gets freed */
1573 no_bulk:
1574         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1575         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1576         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1577         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1578
1579         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1580
1581         /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1582          * and from_kgid(), because they are asynchronous. Fortunately, variable
1583          * oa contains valid o_uid and o_gid in these two operations.
1584          * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1585          * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1586          * other process logic */
1587         body->oa.o_uid = oa->o_uid;
1588         body->oa.o_gid = oa->o_gid;
1589
1590         obdo_to_ioobj(oa, ioobj);
1591         ioobj->ioo_bufcnt = niocount;
1592         /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1593          * that might be send for this request.  The actual number is decided
1594          * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1595          * "max - 1" for old client compatibility sending "0", and also so the
1596          * the actual maximum is a power-of-two number, not one less. LU-1431 */
1597         if (desc != NULL)
1598                 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1599         else /* short io */
1600                 ioobj_max_brw_set(ioobj, 0);
1601
1602         if (short_io_size != 0) {
1603                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1604                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1605                         body->oa.o_flags = 0;
1606                 }
1607                 body->oa.o_flags |= OBD_FL_SHORT_IO;
1608                 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1609                        short_io_size);
1610                 if (opc == OST_WRITE) {
1611                         short_io_buf = req_capsule_client_get(pill,
1612                                                               &RMF_SHORT_IO);
1613                         LASSERT(short_io_buf != NULL);
1614                 }
1615         }
1616
1617         LASSERT(page_count > 0);
1618         pg_prev = pga[0];
1619         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1620                 struct brw_page *pg = pga[i];
1621                 int poff = pg->off & ~PAGE_MASK;
1622
1623                 LASSERT(pg->count > 0);
1624                 /* make sure there is no gap in the middle of page array */
1625                 LASSERTF(page_count == 1 ||
1626                          (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1627                           ergo(i > 0 && i < page_count - 1,
1628                                poff == 0 && pg->count == PAGE_SIZE)   &&
1629                           ergo(i == page_count - 1, poff == 0)),
1630                          "i: %d/%d pg: %p off: %llu, count: %u\n",
1631                          i, page_count, pg, pg->off, pg->count);
1632                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1633                          "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1634                          " prev_pg %p [pri %lu ind %lu] off %llu\n",
1635                          i, page_count,
1636                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1637                          pg_prev->pg, page_private(pg_prev->pg),
1638                          pg_prev->pg->index, pg_prev->off);
1639                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1640                         (pg->flag & OBD_BRW_SRVLOCK));
1641                 if (short_io_size != 0 && opc == OST_WRITE) {
1642                         unsigned char *ptr = kmap_atomic(pg->pg);
1643
1644                         LASSERT(short_io_size >= requested_nob + pg->count);
1645                         memcpy(short_io_buf + requested_nob,
1646                                ptr + poff,
1647                                pg->count);
1648                         kunmap_atomic(ptr);
1649                 } else if (short_io_size == 0) {
1650                         desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1651                                                          pg->count);
1652                 }
1653                 requested_nob += pg->count;
1654
1655                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1656                         niobuf--;
1657                         niobuf->rnb_len += pg->count;
1658                 } else {
1659                         niobuf->rnb_offset = pg->off;
1660                         niobuf->rnb_len    = pg->count;
1661                         niobuf->rnb_flags  = pg->flag;
1662                 }
1663                 pg_prev = pg;
1664         }
1665
1666         LASSERTF((void *)(niobuf - niocount) ==
1667                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1668                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1669                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1670
1671         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1672         if (resend) {
1673                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1674                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1675                         body->oa.o_flags = 0;
1676                 }
1677                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1678         }
1679
1680         if (osc_should_shrink_grant(cli))
1681                 osc_shrink_grant_local(cli, &body->oa);
1682
1683         /* size[REQ_REC_OFF] still sizeof (*body) */
1684         if (opc == OST_WRITE) {
1685                 if (cli->cl_checksum &&
1686                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1687                         /* store cl_cksum_type in a local variable since
1688                          * it can be changed via lprocfs */
1689                         enum cksum_types cksum_type = cli->cl_cksum_type;
1690
1691                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1692                                 body->oa.o_flags = 0;
1693
1694                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1695                                                                 cksum_type);
1696                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1697
1698                         rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1699                                                   requested_nob, page_count,
1700                                                   pga, OST_WRITE,
1701                                                   &body->oa.o_cksum);
1702                         if (rc < 0) {
1703                                 CDEBUG(D_PAGE, "failed to checksum, rc = %d\n",
1704                                        rc);
1705                                 GOTO(out, rc);
1706                         }
1707                         CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1708                                body->oa.o_cksum);
1709
1710                         /* save this in 'oa', too, for later checking */
1711                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1712                         oa->o_flags |= obd_cksum_type_pack(obd_name,
1713                                                            cksum_type);
1714                 } else {
1715                         /* clear out the checksum flag, in case this is a
1716                          * resend but cl_checksum is no longer set. b=11238 */
1717                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1718                 }
1719                 oa->o_cksum = body->oa.o_cksum;
1720                 /* 1 RC per niobuf */
1721                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1722                                      sizeof(__u32) * niocount);
1723         } else {
1724                 if (cli->cl_checksum &&
1725                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1726                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1727                                 body->oa.o_flags = 0;
1728                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1729                                 cli->cl_cksum_type);
1730                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1731                 }
1732
1733                 /* Client cksum has been already copied to wire obdo in previous
1734                  * lustre_set_wire_obdo(), and in the case a bulk-read is being
1735                  * resent due to cksum error, this will allow Server to
1736                  * check+dump pages on its side */
1737         }
1738         ptlrpc_request_set_replen(req);
1739
1740         aa = ptlrpc_req_async_args(aa, req);
1741         aa->aa_oa = oa;
1742         aa->aa_requested_nob = requested_nob;
1743         aa->aa_nio_count = niocount;
1744         aa->aa_page_count = page_count;
1745         aa->aa_resends = 0;
1746         aa->aa_ppga = pga;
1747         aa->aa_cli = cli;
1748         INIT_LIST_HEAD(&aa->aa_oaps);
1749
1750         *reqp = req;
1751         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1752         CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1753                 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1754                 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1755         RETURN(0);
1756
1757  out:
1758         ptlrpc_req_finished(req);
1759         RETURN(rc);
1760 }
1761
1762 char dbgcksum_file_name[PATH_MAX];
1763
1764 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1765                                 struct brw_page **pga, __u32 server_cksum,
1766                                 __u32 client_cksum)
1767 {
1768         struct file *filp;
1769         int rc, i;
1770         unsigned int len;
1771         char *buf;
1772
1773         /* will only keep dump of pages on first error for the same range in
1774          * file/fid, not during the resends/retries. */
1775         snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1776                  "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1777                  (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1778                   libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1779                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1780                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1781                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1782                  pga[0]->off,
1783                  pga[page_count-1]->off + pga[page_count-1]->count - 1,
1784                  client_cksum, server_cksum);
1785         filp = filp_open(dbgcksum_file_name,
1786                          O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1787         if (IS_ERR(filp)) {
1788                 rc = PTR_ERR(filp);
1789                 if (rc == -EEXIST)
1790                         CDEBUG(D_INFO, "%s: can't open to dump pages with "
1791                                "checksum error: rc = %d\n", dbgcksum_file_name,
1792                                rc);
1793                 else
1794                         CERROR("%s: can't open to dump pages with checksum "
1795                                "error: rc = %d\n", dbgcksum_file_name, rc);
1796                 return;
1797         }
1798
1799         for (i = 0; i < page_count; i++) {
1800                 len = pga[i]->count;
1801                 buf = kmap(pga[i]->pg);
1802                 while (len != 0) {
1803                         rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1804                         if (rc < 0) {
1805                                 CERROR("%s: wanted to write %u but got %d "
1806                                        "error\n", dbgcksum_file_name, len, rc);
1807                                 break;
1808                         }
1809                         len -= rc;
1810                         buf += rc;
1811                         CDEBUG(D_INFO, "%s: wrote %d bytes\n",
1812                                dbgcksum_file_name, rc);
1813                 }
1814                 kunmap(pga[i]->pg);
1815         }
1816
1817         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1818         if (rc)
1819                 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1820         filp_close(filp, NULL);
1821 }
1822
1823 static int
1824 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1825                      __u32 client_cksum, __u32 server_cksum,
1826                      struct osc_brw_async_args *aa)
1827 {
1828         const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1829         enum cksum_types cksum_type;
1830         obd_dif_csum_fn *fn = NULL;
1831         int sector_size = 0;
1832         __u32 new_cksum;
1833         char *msg;
1834         int rc;
1835
1836         if (server_cksum == client_cksum) {
1837                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1838                 return 0;
1839         }
1840
1841         if (aa->aa_cli->cl_checksum_dump)
1842                 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1843                                     server_cksum, client_cksum);
1844
1845         cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1846                                            oa->o_flags : 0);
1847
1848         switch (cksum_type) {
1849         case OBD_CKSUM_T10IP512:
1850                 fn = obd_dif_ip_fn;
1851                 sector_size = 512;
1852                 break;
1853         case OBD_CKSUM_T10IP4K:
1854                 fn = obd_dif_ip_fn;
1855                 sector_size = 4096;
1856                 break;
1857         case OBD_CKSUM_T10CRC512:
1858                 fn = obd_dif_crc_fn;
1859                 sector_size = 512;
1860                 break;
1861         case OBD_CKSUM_T10CRC4K:
1862                 fn = obd_dif_crc_fn;
1863                 sector_size = 4096;
1864                 break;
1865         default:
1866                 break;
1867         }
1868
1869         if (fn)
1870                 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1871                                              aa->aa_page_count, aa->aa_ppga,
1872                                              OST_WRITE, fn, sector_size,
1873                                              &new_cksum);
1874         else
1875                 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1876                                        aa->aa_ppga, OST_WRITE, cksum_type,
1877                                        &new_cksum);
1878
1879         if (rc < 0)
1880                 msg = "failed to calculate the client write checksum";
1881         else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1882                 msg = "the server did not use the checksum type specified in "
1883                       "the original request - likely a protocol problem";
1884         else if (new_cksum == server_cksum)
1885                 msg = "changed on the client after we checksummed it - "
1886                       "likely false positive due to mmap IO (bug 11742)";
1887         else if (new_cksum == client_cksum)
1888                 msg = "changed in transit before arrival at OST";
1889         else
1890                 msg = "changed in transit AND doesn't match the original - "
1891                       "likely false positive due to mmap IO (bug 11742)";
1892
1893         LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1894                            DFID " object "DOSTID" extent [%llu-%llu], original "
1895                            "client csum %x (type %x), server csum %x (type %x),"
1896                            " client csum now %x\n",
1897                            obd_name, msg, libcfs_nid2str(peer->nid),
1898                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1899                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1900                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1901                            POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1902                            aa->aa_ppga[aa->aa_page_count - 1]->off +
1903                                 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1904                            client_cksum,
1905                            obd_cksum_type_unpack(aa->aa_oa->o_flags),
1906                            server_cksum, cksum_type, new_cksum);
1907         return 1;
1908 }
1909
1910 /* Note rc enters this function as number of bytes transferred */
1911 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1912 {
1913         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1914         struct client_obd *cli = aa->aa_cli;
1915         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1916         const struct lnet_process_id *peer =
1917                 &req->rq_import->imp_connection->c_peer;
1918         struct ost_body *body;
1919         u32 client_cksum = 0;
1920         struct inode *inode;
1921         unsigned int blockbits = 0, blocksize = 0;
1922
1923         ENTRY;
1924
1925         if (rc < 0 && rc != -EDQUOT) {
1926                 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1927                 RETURN(rc);
1928         }
1929
1930         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1931         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1932         if (body == NULL) {
1933                 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1934                 RETURN(-EPROTO);
1935         }
1936
1937         /* set/clear over quota flag for a uid/gid/projid */
1938         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1939             body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1940                 unsigned qid[LL_MAXQUOTAS] = {
1941                                          body->oa.o_uid, body->oa.o_gid,
1942                                          body->oa.o_projid };
1943                 CDEBUG(D_QUOTA,
1944                        "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1945                        body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1946                        body->oa.o_valid, body->oa.o_flags);
1947                        osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1948                                        body->oa.o_flags);
1949         }
1950
1951         osc_update_grant(cli, body);
1952
1953         if (rc < 0)
1954                 RETURN(rc);
1955
1956         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1957                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1958
1959         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1960                 if (rc > 0) {
1961                         CERROR("%s: unexpected positive size %d\n",
1962                                obd_name, rc);
1963                         RETURN(-EPROTO);
1964                 }
1965
1966                 if (req->rq_bulk != NULL &&
1967                     sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1968                         RETURN(-EAGAIN);
1969
1970                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1971                     check_write_checksum(&body->oa, peer, client_cksum,
1972                                          body->oa.o_cksum, aa))
1973                         RETURN(-EAGAIN);
1974
1975                 rc = check_write_rcs(req, aa->aa_requested_nob,
1976                                      aa->aa_nio_count, aa->aa_page_count,
1977                                      aa->aa_ppga);
1978                 GOTO(out, rc);
1979         }
1980
1981         /* The rest of this function executes only for OST_READs */
1982
1983         if (req->rq_bulk == NULL) {
1984                 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
1985                                           RCL_SERVER);
1986                 LASSERT(rc == req->rq_status);
1987         } else {
1988                 /* if unwrap_bulk failed, return -EAGAIN to retry */
1989                 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1990         }
1991         if (rc < 0)
1992                 GOTO(out, rc = -EAGAIN);
1993
1994         if (rc > aa->aa_requested_nob) {
1995                 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
1996                        rc, aa->aa_requested_nob);
1997                 RETURN(-EPROTO);
1998         }
1999
2000         if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2001                 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2002                        rc, req->rq_bulk->bd_nob_transferred);
2003                 RETURN(-EPROTO);
2004         }
2005
2006         if (req->rq_bulk == NULL) {
2007                 /* short io */
2008                 int nob, pg_count, i = 0;
2009                 unsigned char *buf;
2010
2011                 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2012                 pg_count = aa->aa_page_count;
2013                 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2014                                                    rc);
2015                 nob = rc;
2016                 while (nob > 0 && pg_count > 0) {
2017                         unsigned char *ptr;
2018                         int count = aa->aa_ppga[i]->count > nob ?
2019                                     nob : aa->aa_ppga[i]->count;
2020
2021                         CDEBUG(D_CACHE, "page %p count %d\n",
2022                                aa->aa_ppga[i]->pg, count);
2023                         ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2024                         memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2025                                count);
2026                         kunmap_atomic((void *) ptr);
2027
2028                         buf += count;
2029                         nob -= count;
2030                         i++;
2031                         pg_count--;
2032                 }
2033         }
2034
2035         if (rc < aa->aa_requested_nob)
2036                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2037
2038         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2039                 static int cksum_counter;
2040                 u32        server_cksum = body->oa.o_cksum;
2041                 char      *via = "";
2042                 char      *router = "";
2043                 enum cksum_types cksum_type;
2044                 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2045                         body->oa.o_flags : 0;
2046
2047                 cksum_type = obd_cksum_type_unpack(o_flags);
2048                 rc = osc_checksum_bulk_rw(obd_name, cksum_type, rc,
2049                                           aa->aa_page_count, aa->aa_ppga,
2050                                           OST_READ, &client_cksum);
2051                 if (rc < 0)
2052                         GOTO(out, rc);
2053
2054                 if (req->rq_bulk != NULL &&
2055                     peer->nid != req->rq_bulk->bd_sender) {
2056                         via = " via ";
2057                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
2058                 }
2059
2060                 if (server_cksum != client_cksum) {
2061                         struct ost_body *clbody;
2062                         u32 page_count = aa->aa_page_count;
2063
2064                         clbody = req_capsule_client_get(&req->rq_pill,
2065                                                         &RMF_OST_BODY);
2066                         if (cli->cl_checksum_dump)
2067                                 dump_all_bulk_pages(&clbody->oa, page_count,
2068                                                     aa->aa_ppga, server_cksum,
2069                                                     client_cksum);
2070
2071                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2072                                            "%s%s%s inode "DFID" object "DOSTID
2073                                            " extent [%llu-%llu], client %x, "
2074                                            "server %x, cksum_type %x\n",
2075                                            obd_name,
2076                                            libcfs_nid2str(peer->nid),
2077                                            via, router,
2078                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2079                                                 clbody->oa.o_parent_seq : 0ULL,
2080                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2081                                                 clbody->oa.o_parent_oid : 0,
2082                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2083                                                 clbody->oa.o_parent_ver : 0,
2084                                            POSTID(&body->oa.o_oi),
2085                                            aa->aa_ppga[0]->off,
2086                                            aa->aa_ppga[page_count-1]->off +
2087                                            aa->aa_ppga[page_count-1]->count - 1,
2088                                            client_cksum, server_cksum,
2089                                            cksum_type);
2090                         cksum_counter = 0;
2091                         aa->aa_oa->o_cksum = client_cksum;
2092                         rc = -EAGAIN;
2093                 } else {
2094                         cksum_counter++;
2095                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2096                         rc = 0;
2097                 }
2098         } else if (unlikely(client_cksum)) {
2099                 static int cksum_missed;
2100
2101                 cksum_missed++;
2102                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2103                         CERROR("%s: checksum %u requested from %s but not sent\n",
2104                                obd_name, cksum_missed,
2105                                libcfs_nid2str(peer->nid));
2106         } else {
2107                 rc = 0;
2108         }
2109
2110         inode = page2inode(aa->aa_ppga[0]->pg);
2111         if (inode == NULL) {
2112                 /* Try to get reference to inode from cl_page if we are
2113                  * dealing with direct IO, as handled pages are not
2114                  * actual page cache pages.
2115                  */
2116                 struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
2117
2118                 inode = oap2cl_page(oap)->cp_inode;
2119                 if (inode) {
2120                         blockbits = inode->i_blkbits;
2121                         blocksize = 1 << blockbits;
2122                 }
2123         }
2124         if (inode && IS_ENCRYPTED(inode)) {
2125                 int idx;
2126
2127                 if (!llcrypt_has_encryption_key(inode)) {
2128                         CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2129                         GOTO(out, rc);
2130                 }
2131                 for (idx = 0; idx < aa->aa_page_count; idx++) {
2132                         struct brw_page *pg = aa->aa_ppga[idx];
2133                         unsigned int offs = 0;
2134
2135                         while (offs < PAGE_SIZE) {
2136                                 /* do not decrypt if page is all 0s */
2137                                 if (memchr_inv(page_address(pg->pg) + offs, 0,
2138                                          LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2139                                         /* if page is empty forward info to
2140                                          * upper layers (ll_io_zero_page) by
2141                                          * clearing PagePrivate2
2142                                          */
2143                                         if (!offs)
2144                                                 ClearPagePrivate2(pg->pg);
2145                                         break;
2146                                 }
2147
2148                                 if (blockbits) {
2149                                         /* This is direct IO case. Directly call
2150                                          * decrypt function that takes inode as
2151                                          * input parameter. Page does not need
2152                                          * to be locked.
2153                                          */
2154                                         u64 lblk_num =
2155                                                 ((u64)(pg->off >> PAGE_SHIFT) <<
2156                                                      (PAGE_SHIFT - blockbits)) +
2157                                                        (offs >> blockbits);
2158                                         unsigned int i;
2159
2160                                         for (i = offs;
2161                                              i < offs +
2162                                                     LUSTRE_ENCRYPTION_UNIT_SIZE;
2163                                              i += blocksize, lblk_num++) {
2164                                                 rc =
2165                                                   llcrypt_decrypt_block_inplace(
2166                                                           inode, pg->pg,
2167                                                           blocksize, i,
2168                                                           lblk_num);
2169                                                 if (rc)
2170                                                         break;
2171                                         }
2172                                 } else {
2173                                         rc = llcrypt_decrypt_pagecache_blocks(
2174                                                 pg->pg,
2175                                                 LUSTRE_ENCRYPTION_UNIT_SIZE,
2176                                                 offs);
2177                                 }
2178                                 if (rc)
2179                                         GOTO(out, rc);
2180
2181                                 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2182                         }
2183                 }
2184         }
2185
2186 out:
2187         if (rc >= 0)
2188                 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2189                                      aa->aa_oa, &body->oa);
2190
2191         RETURN(rc);
2192 }
2193
2194 static int osc_brw_redo_request(struct ptlrpc_request *request,
2195                                 struct osc_brw_async_args *aa, int rc)
2196 {
2197         struct ptlrpc_request *new_req;
2198         struct osc_brw_async_args *new_aa;
2199         struct osc_async_page *oap;
2200         ENTRY;
2201
2202         /* The below message is checked in replay-ost-single.sh test_8ae*/
2203         DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2204                   "redo for recoverable error %d", rc);
2205
2206         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2207                                 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2208                                   aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2209                                   aa->aa_ppga, &new_req, 1);
2210         if (rc)
2211                 RETURN(rc);
2212
2213         list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2214                 if (oap->oap_request != NULL) {
2215                         LASSERTF(request == oap->oap_request,
2216                                  "request %p != oap_request %p\n",
2217                                  request, oap->oap_request);
2218                 }
2219         }
2220         /*
2221          * New request takes over pga and oaps from old request.
2222          * Note that copying a list_head doesn't work, need to move it...
2223          */
2224         aa->aa_resends++;
2225         new_req->rq_interpret_reply = request->rq_interpret_reply;
2226         new_req->rq_async_args = request->rq_async_args;
2227         new_req->rq_commit_cb = request->rq_commit_cb;
2228         /* cap resend delay to the current request timeout, this is similar to
2229          * what ptlrpc does (see after_reply()) */
2230         if (aa->aa_resends > new_req->rq_timeout)
2231                 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2232         else
2233                 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2234         new_req->rq_generation_set = 1;
2235         new_req->rq_import_generation = request->rq_import_generation;
2236
2237         new_aa = ptlrpc_req_async_args(new_aa, new_req);
2238
2239         INIT_LIST_HEAD(&new_aa->aa_oaps);
2240         list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2241         INIT_LIST_HEAD(&new_aa->aa_exts);
2242         list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2243         new_aa->aa_resends = aa->aa_resends;
2244
2245         list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2246                 if (oap->oap_request) {
2247                         ptlrpc_req_finished(oap->oap_request);
2248                         oap->oap_request = ptlrpc_request_addref(new_req);
2249                 }
2250         }
2251
2252         /* XXX: This code will run into problem if we're going to support
2253          * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2254          * and wait for all of them to be finished. We should inherit request
2255          * set from old request. */
2256         ptlrpcd_add_req(new_req);
2257
2258         DEBUG_REQ(D_INFO, new_req, "new request");
2259         RETURN(0);
2260 }
2261
2262 /*
2263  * ugh, we want disk allocation on the target to happen in offset order.  we'll
2264  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2265  * fine for our small page arrays and doesn't require allocation.  its an
2266  * insertion sort that swaps elements that are strides apart, shrinking the
2267  * stride down until its '1' and the array is sorted.
2268  */
2269 static void sort_brw_pages(struct brw_page **array, int num)
2270 {
2271         int stride, i, j;
2272         struct brw_page *tmp;
2273
2274         if (num == 1)
2275                 return;
2276         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2277                 ;
2278
2279         do {
2280                 stride /= 3;
2281                 for (i = stride ; i < num ; i++) {
2282                         tmp = array[i];
2283                         j = i;
2284                         while (j >= stride && array[j - stride]->off > tmp->off) {
2285                                 array[j] = array[j - stride];
2286                                 j -= stride;
2287                         }
2288                         array[j] = tmp;
2289                 }
2290         } while (stride > 1);
2291 }
2292
2293 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2294 {
2295         LASSERT(ppga != NULL);
2296         OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2297 }
2298
2299 static int brw_interpret(const struct lu_env *env,
2300                          struct ptlrpc_request *req, void *args, int rc)
2301 {
2302         struct osc_brw_async_args *aa = args;
2303         struct osc_extent *ext;
2304         struct osc_extent *tmp;
2305         struct client_obd *cli = aa->aa_cli;
2306         unsigned long transferred = 0;
2307
2308         ENTRY;
2309
2310         rc = osc_brw_fini_request(req, rc);
2311         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2312
2313         /* restore clear text pages */
2314         osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2315
2316         /*
2317          * When server returns -EINPROGRESS, client should always retry
2318          * regardless of the number of times the bulk was resent already.
2319          */
2320         if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2321                 if (req->rq_import_generation !=
2322                     req->rq_import->imp_generation) {
2323                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2324                                ""DOSTID", rc = %d.\n",
2325                                req->rq_import->imp_obd->obd_name,
2326                                POSTID(&aa->aa_oa->o_oi), rc);
2327                 } else if (rc == -EINPROGRESS ||
2328                     client_should_resend(aa->aa_resends, aa->aa_cli)) {
2329                         rc = osc_brw_redo_request(req, aa, rc);
2330                 } else {
2331                         CERROR("%s: too many resent retries for object: "
2332                                "%llu:%llu, rc = %d.\n",
2333                                req->rq_import->imp_obd->obd_name,
2334                                POSTID(&aa->aa_oa->o_oi), rc);
2335                 }
2336
2337                 if (rc == 0)
2338                         RETURN(0);
2339                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2340                         rc = -EIO;
2341         }
2342
2343         if (rc == 0) {
2344                 struct obdo *oa = aa->aa_oa;
2345                 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2346                 unsigned long valid = 0;
2347                 struct cl_object *obj;
2348                 struct osc_async_page *last;
2349
2350                 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2351                 obj = osc2cl(last->oap_obj);
2352
2353                 cl_object_attr_lock(obj);
2354                 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2355                         attr->cat_blocks = oa->o_blocks;
2356                         valid |= CAT_BLOCKS;
2357                 }
2358                 if (oa->o_valid & OBD_MD_FLMTIME) {
2359                         attr->cat_mtime = oa->o_mtime;
2360                         valid |= CAT_MTIME;
2361                 }
2362                 if (oa->o_valid & OBD_MD_FLATIME) {
2363                         attr->cat_atime = oa->o_atime;
2364                         valid |= CAT_ATIME;
2365                 }
2366                 if (oa->o_valid & OBD_MD_FLCTIME) {
2367                         attr->cat_ctime = oa->o_ctime;
2368                         valid |= CAT_CTIME;
2369                 }
2370
2371                 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2372                         struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2373                         loff_t last_off = last->oap_count + last->oap_obj_off +
2374                                 last->oap_page_off;
2375
2376                         /* Change file size if this is an out of quota or
2377                          * direct IO write and it extends the file size */
2378                         if (loi->loi_lvb.lvb_size < last_off) {
2379                                 attr->cat_size = last_off;
2380                                 valid |= CAT_SIZE;
2381                         }
2382                         /* Extend KMS if it's not a lockless write */
2383                         if (loi->loi_kms < last_off &&
2384                             oap2osc_page(last)->ops_srvlock == 0) {
2385                                 attr->cat_kms = last_off;
2386                                 valid |= CAT_KMS;
2387                         }
2388                 }
2389
2390                 if (valid != 0)
2391                         cl_object_attr_update(env, obj, attr, valid);
2392                 cl_object_attr_unlock(obj);
2393         }
2394         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2395         aa->aa_oa = NULL;
2396
2397         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2398                 osc_inc_unstable_pages(req);
2399
2400         list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2401                 list_del_init(&ext->oe_link);
2402                 osc_extent_finish(env, ext, 1,
2403                                   rc && req->rq_no_delay ? -EAGAIN : rc);
2404         }
2405         LASSERT(list_empty(&aa->aa_exts));
2406         LASSERT(list_empty(&aa->aa_oaps));
2407
2408         transferred = (req->rq_bulk == NULL ? /* short io */
2409                        aa->aa_requested_nob :
2410                        req->rq_bulk->bd_nob_transferred);
2411
2412         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2413         ptlrpc_lprocfs_brw(req, transferred);
2414
2415         spin_lock(&cli->cl_loi_list_lock);
2416         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2417          * is called so we know whether to go to sync BRWs or wait for more
2418          * RPCs to complete */
2419         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2420                 cli->cl_w_in_flight--;
2421         else
2422                 cli->cl_r_in_flight--;
2423         osc_wake_cache_waiters(cli);
2424         spin_unlock(&cli->cl_loi_list_lock);
2425
2426         osc_io_unplug(env, cli, NULL);
2427         RETURN(rc);
2428 }
2429
2430 static void brw_commit(struct ptlrpc_request *req)
2431 {
2432         /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2433          * this called via the rq_commit_cb, I need to ensure
2434          * osc_dec_unstable_pages is still called. Otherwise unstable
2435          * pages may be leaked. */
2436         spin_lock(&req->rq_lock);
2437         if (likely(req->rq_unstable)) {
2438                 req->rq_unstable = 0;
2439                 spin_unlock(&req->rq_lock);
2440
2441                 osc_dec_unstable_pages(req);
2442         } else {
2443                 req->rq_committed = 1;
2444                 spin_unlock(&req->rq_lock);
2445         }
2446 }
2447
2448 /**
2449  * Build an RPC by the list of extent @ext_list. The caller must ensure
2450  * that the total pages in this list are NOT over max pages per RPC.
2451  * Extents in the list must be in OES_RPC state.
2452  */
2453 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2454                   struct list_head *ext_list, int cmd)
2455 {
2456         struct ptlrpc_request           *req = NULL;
2457         struct osc_extent               *ext;
2458         struct brw_page                 **pga = NULL;
2459         struct osc_brw_async_args       *aa = NULL;
2460         struct obdo                     *oa = NULL;
2461         struct osc_async_page           *oap;
2462         struct osc_object               *obj = NULL;
2463         struct cl_req_attr              *crattr = NULL;
2464         loff_t                          starting_offset = OBD_OBJECT_EOF;
2465         loff_t                          ending_offset = 0;
2466         /* '1' for consistency with code that checks !mpflag to restore */
2467         int mpflag = 1;
2468         int                             mem_tight = 0;
2469         int                             page_count = 0;
2470         bool                            soft_sync = false;
2471         bool                            ndelay = false;
2472         int                             i;
2473         int                             grant = 0;
2474         int                             rc;
2475         __u32                           layout_version = 0;
2476         LIST_HEAD(rpc_list);
2477         struct ost_body                 *body;
2478         ENTRY;
2479         LASSERT(!list_empty(ext_list));
2480
2481         /* add pages into rpc_list to build BRW rpc */
2482         list_for_each_entry(ext, ext_list, oe_link) {
2483                 LASSERT(ext->oe_state == OES_RPC);
2484                 mem_tight |= ext->oe_memalloc;
2485                 grant += ext->oe_grants;
2486                 page_count += ext->oe_nr_pages;
2487                 layout_version = max(layout_version, ext->oe_layout_version);
2488                 if (obj == NULL)
2489                         obj = ext->oe_obj;
2490         }
2491
2492         soft_sync = osc_over_unstable_soft_limit(cli);
2493         if (mem_tight)
2494                 mpflag = memalloc_noreclaim_save();
2495
2496         OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2497         if (pga == NULL)
2498                 GOTO(out, rc = -ENOMEM);
2499
2500         OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2501         if (oa == NULL)
2502                 GOTO(out, rc = -ENOMEM);
2503
2504         i = 0;
2505         list_for_each_entry(ext, ext_list, oe_link) {
2506                 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2507                         if (mem_tight)
2508                                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2509                         if (soft_sync)
2510                                 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2511                         pga[i] = &oap->oap_brw_page;
2512                         pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2513                         i++;
2514
2515                         list_add_tail(&oap->oap_rpc_item, &rpc_list);
2516                         if (starting_offset == OBD_OBJECT_EOF ||
2517                             starting_offset > oap->oap_obj_off)
2518                                 starting_offset = oap->oap_obj_off;
2519                         else
2520                                 LASSERT(oap->oap_page_off == 0);
2521                         if (ending_offset < oap->oap_obj_off + oap->oap_count)
2522                                 ending_offset = oap->oap_obj_off +
2523                                                 oap->oap_count;
2524                         else
2525                                 LASSERT(oap->oap_page_off + oap->oap_count ==
2526                                         PAGE_SIZE);
2527                 }
2528                 if (ext->oe_ndelay)
2529                         ndelay = true;
2530         }
2531
2532         /* first page in the list */
2533         oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
2534
2535         crattr = &osc_env_info(env)->oti_req_attr;
2536         memset(crattr, 0, sizeof(*crattr));
2537         crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2538         crattr->cra_flags = ~0ULL;
2539         crattr->cra_page = oap2cl_page(oap);
2540         crattr->cra_oa = oa;
2541         cl_req_attr_set(env, osc2cl(obj), crattr);
2542
2543         if (cmd == OBD_BRW_WRITE) {
2544                 oa->o_grant_used = grant;
2545                 if (layout_version > 0) {
2546                         CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2547                                PFID(&oa->o_oi.oi_fid), layout_version);
2548
2549                         oa->o_layout_version = layout_version;
2550                         oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2551                 }
2552         }
2553
2554         sort_brw_pages(pga, page_count);
2555         rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2556         if (rc != 0) {
2557                 CERROR("prep_req failed: %d\n", rc);
2558                 GOTO(out, rc);
2559         }
2560
2561         req->rq_commit_cb = brw_commit;
2562         req->rq_interpret_reply = brw_interpret;
2563         req->rq_memalloc = mem_tight != 0;
2564         oap->oap_request = ptlrpc_request_addref(req);
2565         if (ndelay) {
2566                 req->rq_no_resend = req->rq_no_delay = 1;
2567                 /* probably set a shorter timeout value.
2568                  * to handle ETIMEDOUT in brw_interpret() correctly. */
2569                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2570         }
2571
2572         /* Need to update the timestamps after the request is built in case
2573          * we race with setattr (locally or in queue at OST).  If OST gets
2574          * later setattr before earlier BRW (as determined by the request xid),
2575          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2576          * way to do this in a single call.  bug 10150 */
2577         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2578         crattr->cra_oa = &body->oa;
2579         crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2580         cl_req_attr_set(env, osc2cl(obj), crattr);
2581         lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2582
2583         aa = ptlrpc_req_async_args(aa, req);
2584         INIT_LIST_HEAD(&aa->aa_oaps);
2585         list_splice_init(&rpc_list, &aa->aa_oaps);
2586         INIT_LIST_HEAD(&aa->aa_exts);
2587         list_splice_init(ext_list, &aa->aa_exts);
2588
2589         spin_lock(&cli->cl_loi_list_lock);
2590         starting_offset >>= PAGE_SHIFT;
2591         if (cmd == OBD_BRW_READ) {
2592                 cli->cl_r_in_flight++;
2593                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2594                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2595                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2596                                       starting_offset + 1);
2597         } else {
2598                 cli->cl_w_in_flight++;
2599                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2600                 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2601                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2602                                       starting_offset + 1);
2603         }
2604         spin_unlock(&cli->cl_loi_list_lock);
2605
2606         DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2607                   page_count, aa, cli->cl_r_in_flight,
2608                   cli->cl_w_in_flight);
2609         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2610
2611         ptlrpcd_add_req(req);
2612         rc = 0;
2613         EXIT;
2614
2615 out:
2616         if (mem_tight)
2617                 memalloc_noreclaim_restore(mpflag);
2618
2619         if (rc != 0) {
2620                 LASSERT(req == NULL);
2621
2622                 if (oa)
2623                         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2624                 if (pga) {
2625                         osc_release_bounce_pages(pga, page_count);
2626                         osc_release_ppga(pga, page_count);
2627                 }
2628                 /* this should happen rarely and is pretty bad, it makes the
2629                  * pending list not follow the dirty order */
2630                 while (!list_empty(ext_list)) {
2631                         ext = list_entry(ext_list->next, struct osc_extent,
2632                                          oe_link);
2633                         list_del_init(&ext->oe_link);
2634                         osc_extent_finish(env, ext, 0, rc);
2635                 }
2636         }
2637         RETURN(rc);
2638 }
2639
2640 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2641 {
2642         int set = 0;
2643
2644         LASSERT(lock != NULL);
2645
2646         lock_res_and_lock(lock);
2647
2648         if (lock->l_ast_data == NULL)
2649                 lock->l_ast_data = data;
2650         if (lock->l_ast_data == data)
2651                 set = 1;
2652
2653         unlock_res_and_lock(lock);
2654
2655         return set;
2656 }
2657
2658 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2659                      void *cookie, struct lustre_handle *lockh,
2660                      enum ldlm_mode mode, __u64 *flags, bool speculative,
2661                      int errcode)
2662 {
2663         bool intent = *flags & LDLM_FL_HAS_INTENT;
2664         int rc;
2665         ENTRY;
2666
2667         /* The request was created before ldlm_cli_enqueue call. */
2668         if (intent && errcode == ELDLM_LOCK_ABORTED) {
2669                 struct ldlm_reply *rep;
2670
2671                 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2672                 LASSERT(rep != NULL);
2673
2674                 rep->lock_policy_res1 =
2675                         ptlrpc_status_ntoh(rep->lock_policy_res1);
2676                 if (rep->lock_policy_res1)
2677                         errcode = rep->lock_policy_res1;
2678                 if (!speculative)
2679                         *flags |= LDLM_FL_LVB_READY;
2680         } else if (errcode == ELDLM_OK) {
2681                 *flags |= LDLM_FL_LVB_READY;
2682         }
2683
2684         /* Call the update callback. */
2685         rc = (*upcall)(cookie, lockh, errcode);
2686
2687         /* release the reference taken in ldlm_cli_enqueue() */
2688         if (errcode == ELDLM_LOCK_MATCHED)
2689                 errcode = ELDLM_OK;
2690         if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2691                 ldlm_lock_decref(lockh, mode);
2692
2693         RETURN(rc);
2694 }
2695
2696 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2697                           void *args, int rc)
2698 {
2699         struct osc_enqueue_args *aa = args;
2700         struct ldlm_lock *lock;
2701         struct lustre_handle *lockh = &aa->oa_lockh;
2702         enum ldlm_mode mode = aa->oa_mode;
2703         struct ost_lvb *lvb = aa->oa_lvb;
2704         __u32 lvb_len = sizeof(*lvb);
2705         __u64 flags = 0;
2706         struct ldlm_enqueue_info einfo = {
2707                 .ei_type = aa->oa_type,
2708                 .ei_mode = mode,
2709         };
2710
2711         ENTRY;
2712
2713         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2714          * be valid. */
2715         lock = ldlm_handle2lock(lockh);
2716         LASSERTF(lock != NULL,
2717                  "lockh %#llx, req %p, aa %p - client evicted?\n",
2718                  lockh->cookie, req, aa);
2719
2720         /* Take an additional reference so that a blocking AST that
2721          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2722          * to arrive after an upcall has been executed by
2723          * osc_enqueue_fini(). */
2724         ldlm_lock_addref(lockh, mode);
2725
2726         /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2727         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2728
2729         /* Let CP AST to grant the lock first. */
2730         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2731
2732         if (aa->oa_speculative) {
2733                 LASSERT(aa->oa_lvb == NULL);
2734                 LASSERT(aa->oa_flags == NULL);
2735                 aa->oa_flags = &flags;
2736         }
2737
2738         /* Complete obtaining the lock procedure. */
2739         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2740                                    lvb, lvb_len, lockh, rc);
2741         /* Complete osc stuff. */
2742         rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2743                               aa->oa_flags, aa->oa_speculative, rc);
2744
2745         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2746
2747         ldlm_lock_decref(lockh, mode);
2748         LDLM_LOCK_PUT(lock);
2749         RETURN(rc);
2750 }
2751
2752 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2753  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2754  * other synchronous requests, however keeping some locks and trying to obtain
2755  * others may take a considerable amount of time in a case of ost failure; and
2756  * when other sync requests do not get released lock from a client, the client
2757  * is evicted from the cluster -- such scenarious make the life difficult, so
2758  * release locks just after they are obtained. */
2759 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2760                      __u64 *flags, union ldlm_policy_data *policy,
2761                      struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2762                      void *cookie, struct ldlm_enqueue_info *einfo,
2763                      struct ptlrpc_request_set *rqset, int async,
2764                      bool speculative)
2765 {
2766         struct obd_device *obd = exp->exp_obd;
2767         struct lustre_handle lockh = { 0 };
2768         struct ptlrpc_request *req = NULL;
2769         int intent = *flags & LDLM_FL_HAS_INTENT;
2770         __u64 match_flags = *flags;
2771         enum ldlm_mode mode;
2772         int rc;
2773         ENTRY;
2774
2775         /* Filesystem lock extents are extended to page boundaries so that
2776          * dealing with the page cache is a little smoother.  */
2777         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2778         policy->l_extent.end |= ~PAGE_MASK;
2779
2780         /* Next, search for already existing extent locks that will cover us */
2781         /* If we're trying to read, we also search for an existing PW lock.  The
2782          * VFS and page cache already protect us locally, so lots of readers/
2783          * writers can share a single PW lock.
2784          *
2785          * There are problems with conversion deadlocks, so instead of
2786          * converting a read lock to a write lock, we'll just enqueue a new
2787          * one.
2788          *
2789          * At some point we should cancel the read lock instead of making them
2790          * send us a blocking callback, but there are problems with canceling
2791          * locks out from other users right now, too. */
2792         mode = einfo->ei_mode;
2793         if (einfo->ei_mode == LCK_PR)
2794                 mode |= LCK_PW;
2795         /* Normal lock requests must wait for the LVB to be ready before
2796          * matching a lock; speculative lock requests do not need to,
2797          * because they will not actually use the lock. */
2798         if (!speculative)
2799                 match_flags |= LDLM_FL_LVB_READY;
2800         if (intent != 0)
2801                 match_flags |= LDLM_FL_BLOCK_GRANTED;
2802         mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2803                                einfo->ei_type, policy, mode, &lockh);
2804         if (mode) {
2805                 struct ldlm_lock *matched;
2806
2807                 if (*flags & LDLM_FL_TEST_LOCK)
2808                         RETURN(ELDLM_OK);
2809
2810                 matched = ldlm_handle2lock(&lockh);
2811                 if (speculative) {
2812                         /* This DLM lock request is speculative, and does not
2813                          * have an associated IO request. Therefore if there
2814                          * is already a DLM lock, it wll just inform the
2815                          * caller to cancel the request for this stripe.*/
2816                         lock_res_and_lock(matched);
2817                         if (ldlm_extent_equal(&policy->l_extent,
2818                             &matched->l_policy_data.l_extent))
2819                                 rc = -EEXIST;
2820                         else
2821                                 rc = -ECANCELED;
2822                         unlock_res_and_lock(matched);
2823
2824                         ldlm_lock_decref(&lockh, mode);
2825                         LDLM_LOCK_PUT(matched);
2826                         RETURN(rc);
2827                 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2828                         *flags |= LDLM_FL_LVB_READY;
2829
2830                         /* We already have a lock, and it's referenced. */
2831                         (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2832
2833                         ldlm_lock_decref(&lockh, mode);
2834                         LDLM_LOCK_PUT(matched);
2835                         RETURN(ELDLM_OK);
2836                 } else {
2837                         ldlm_lock_decref(&lockh, mode);
2838                         LDLM_LOCK_PUT(matched);
2839                 }
2840         }
2841
2842         if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2843                 RETURN(-ENOLCK);
2844
2845         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2846         *flags &= ~LDLM_FL_BLOCK_GRANTED;
2847
2848         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2849                               sizeof(*lvb), LVB_T_OST, &lockh, async);
2850         if (async) {
2851                 if (!rc) {
2852                         struct osc_enqueue_args *aa;
2853                         aa = ptlrpc_req_async_args(aa, req);
2854                         aa->oa_exp         = exp;
2855                         aa->oa_mode        = einfo->ei_mode;
2856                         aa->oa_type        = einfo->ei_type;
2857                         lustre_handle_copy(&aa->oa_lockh, &lockh);
2858                         aa->oa_upcall      = upcall;
2859                         aa->oa_cookie      = cookie;
2860                         aa->oa_speculative = speculative;
2861                         if (!speculative) {
2862                                 aa->oa_flags  = flags;
2863                                 aa->oa_lvb    = lvb;
2864                         } else {
2865                                 /* speculative locks are essentially to enqueue
2866                                  * a DLM lock  in advance, so we don't care
2867                                  * about the result of the enqueue. */
2868                                 aa->oa_lvb    = NULL;
2869                                 aa->oa_flags  = NULL;
2870                         }
2871
2872                         req->rq_interpret_reply = osc_enqueue_interpret;
2873                         ptlrpc_set_add_req(rqset, req);
2874                 }
2875                 RETURN(rc);
2876         }
2877
2878         rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2879                               flags, speculative, rc);
2880
2881         RETURN(rc);
2882 }
2883
2884 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2885                    struct ldlm_res_id *res_id, enum ldlm_type type,
2886                    union ldlm_policy_data *policy, enum ldlm_mode mode,
2887                    __u64 *flags, struct osc_object *obj,
2888                    struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2889 {
2890         struct obd_device *obd = exp->exp_obd;
2891         __u64 lflags = *flags;
2892         enum ldlm_mode rc;
2893         ENTRY;
2894
2895         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2896                 RETURN(-EIO);
2897
2898         /* Filesystem lock extents are extended to page boundaries so that
2899          * dealing with the page cache is a little smoother */
2900         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2901         policy->l_extent.end |= ~PAGE_MASK;
2902
2903         /* Next, search for already existing extent locks that will cover us */
2904         rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
2905                                         res_id, type, policy, mode, lockh,
2906                                         match_flags);
2907         if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2908                 RETURN(rc);
2909
2910         if (obj != NULL) {
2911                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2912
2913                 LASSERT(lock != NULL);
2914                 if (osc_set_lock_data(lock, obj)) {
2915                         lock_res_and_lock(lock);
2916                         if (!ldlm_is_lvb_cached(lock)) {
2917                                 LASSERT(lock->l_ast_data == obj);
2918                                 osc_lock_lvb_update(env, obj, lock, NULL);
2919                                 ldlm_set_lvb_cached(lock);
2920                         }
2921                         unlock_res_and_lock(lock);
2922                 } else {
2923                         ldlm_lock_decref(lockh, rc);
2924                         rc = 0;
2925                 }
2926                 LDLM_LOCK_PUT(lock);
2927         }
2928         RETURN(rc);
2929 }
2930
2931 static int osc_statfs_interpret(const struct lu_env *env,
2932                                 struct ptlrpc_request *req, void *args, int rc)
2933 {
2934         struct osc_async_args *aa = args;
2935         struct obd_statfs *msfs;
2936
2937         ENTRY;
2938         if (rc == -EBADR)
2939                 /*
2940                  * The request has in fact never been sent due to issues at
2941                  * a higher level (LOV).  Exit immediately since the caller
2942                  * is aware of the problem and takes care of the clean up.
2943                  */
2944                 RETURN(rc);
2945
2946         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2947             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
2948                 GOTO(out, rc = 0);
2949
2950         if (rc != 0)
2951                 GOTO(out, rc);
2952
2953         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2954         if (msfs == NULL)
2955                 GOTO(out, rc = -EPROTO);
2956
2957         *aa->aa_oi->oi_osfs = *msfs;
2958 out:
2959         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2960
2961         RETURN(rc);
2962 }
2963
2964 static int osc_statfs_async(struct obd_export *exp,
2965                             struct obd_info *oinfo, time64_t max_age,
2966                             struct ptlrpc_request_set *rqset)
2967 {
2968         struct obd_device     *obd = class_exp2obd(exp);
2969         struct ptlrpc_request *req;
2970         struct osc_async_args *aa;
2971         int rc;
2972         ENTRY;
2973
2974         if (obd->obd_osfs_age >= max_age) {
2975                 CDEBUG(D_SUPER,
2976                        "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
2977                        obd->obd_name, &obd->obd_osfs,
2978                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
2979                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
2980                 spin_lock(&obd->obd_osfs_lock);
2981                 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
2982                 spin_unlock(&obd->obd_osfs_lock);
2983                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
2984                 if (oinfo->oi_cb_up)
2985                         oinfo->oi_cb_up(oinfo, 0);
2986
2987                 RETURN(0);
2988         }
2989
2990         /* We could possibly pass max_age in the request (as an absolute
2991          * timestamp or a "seconds.usec ago") so the target can avoid doing
2992          * extra calls into the filesystem if that isn't necessary (e.g.
2993          * during mount that would help a bit).  Having relative timestamps
2994          * is not so great if request processing is slow, while absolute
2995          * timestamps are not ideal because they need time synchronization. */
2996         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
2997         if (req == NULL)
2998                 RETURN(-ENOMEM);
2999
3000         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3001         if (rc) {
3002                 ptlrpc_request_free(req);
3003                 RETURN(rc);
3004         }
3005         ptlrpc_request_set_replen(req);
3006         req->rq_request_portal = OST_CREATE_PORTAL;
3007         ptlrpc_at_set_req_timeout(req);
3008
3009         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3010                 /* procfs requests not want stat in wait for avoid deadlock */
3011                 req->rq_no_resend = 1;
3012                 req->rq_no_delay = 1;
3013         }
3014
3015         req->rq_interpret_reply = osc_statfs_interpret;
3016         aa = ptlrpc_req_async_args(aa, req);
3017         aa->aa_oi = oinfo;
3018
3019         ptlrpc_set_add_req(rqset, req);
3020         RETURN(0);
3021 }
3022
3023 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3024                       struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3025 {
3026         struct obd_device     *obd = class_exp2obd(exp);
3027         struct obd_statfs     *msfs;
3028         struct ptlrpc_request *req;
3029         struct obd_import     *imp, *imp0;
3030         int rc;
3031         ENTRY;
3032
3033         /*Since the request might also come from lprocfs, so we need
3034          *sync this with client_disconnect_export Bug15684
3035          */
3036         with_imp_locked(obd, imp0, rc)
3037                 imp = class_import_get(imp0);
3038         if (rc)
3039                 RETURN(rc);
3040
3041         /* We could possibly pass max_age in the request (as an absolute
3042          * timestamp or a "seconds.usec ago") so the target can avoid doing
3043          * extra calls into the filesystem if that isn't necessary (e.g.
3044          * during mount that would help a bit).  Having relative timestamps
3045          * is not so great if request processing is slow, while absolute
3046          * timestamps are not ideal because they need time synchronization. */
3047         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3048
3049         class_import_put(imp);
3050
3051         if (req == NULL)
3052                 RETURN(-ENOMEM);
3053
3054         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3055         if (rc) {
3056                 ptlrpc_request_free(req);
3057                 RETURN(rc);
3058         }
3059         ptlrpc_request_set_replen(req);
3060         req->rq_request_portal = OST_CREATE_PORTAL;
3061         ptlrpc_at_set_req_timeout(req);
3062
3063         if (flags & OBD_STATFS_NODELAY) {
3064                 /* procfs requests not want stat in wait for avoid deadlock */
3065                 req->rq_no_resend = 1;
3066                 req->rq_no_delay = 1;
3067         }
3068
3069         rc = ptlrpc_queue_wait(req);
3070         if (rc)
3071                 GOTO(out, rc);
3072
3073         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3074         if (msfs == NULL)
3075                 GOTO(out, rc = -EPROTO);
3076
3077         *osfs = *msfs;
3078
3079         EXIT;
3080 out:
3081         ptlrpc_req_finished(req);
3082         return rc;
3083 }
3084
3085 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3086                          void *karg, void __user *uarg)
3087 {
3088         struct obd_device *obd = exp->exp_obd;
3089         struct obd_ioctl_data *data = karg;
3090         int rc = 0;
3091
3092         ENTRY;
3093         if (!try_module_get(THIS_MODULE)) {
3094                 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3095                        module_name(THIS_MODULE));
3096                 return -EINVAL;
3097         }
3098         switch (cmd) {
3099         case OBD_IOC_CLIENT_RECOVER:
3100                 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3101                                            data->ioc_inlbuf1, 0);
3102                 if (rc > 0)
3103                         rc = 0;
3104                 break;
3105         case IOC_OSC_SET_ACTIVE:
3106                 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3107                                               data->ioc_offset);
3108                 break;
3109         default:
3110                 rc = -ENOTTY;
3111                 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3112                        obd->obd_name, cmd, current->comm, rc);
3113                 break;
3114         }
3115
3116         module_put(THIS_MODULE);
3117         return rc;
3118 }
3119
3120 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3121                        u32 keylen, void *key, u32 vallen, void *val,
3122                        struct ptlrpc_request_set *set)
3123 {
3124         struct ptlrpc_request *req;
3125         struct obd_device     *obd = exp->exp_obd;
3126         struct obd_import     *imp = class_exp2cliimp(exp);
3127         char                  *tmp;
3128         int                    rc;
3129         ENTRY;
3130
3131         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3132
3133         if (KEY_IS(KEY_CHECKSUM)) {
3134                 if (vallen != sizeof(int))
3135                         RETURN(-EINVAL);
3136                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3137                 RETURN(0);
3138         }
3139
3140         if (KEY_IS(KEY_SPTLRPC_CONF)) {
3141                 sptlrpc_conf_client_adapt(obd);
3142                 RETURN(0);
3143         }
3144
3145         if (KEY_IS(KEY_FLUSH_CTX)) {
3146                 sptlrpc_import_flush_my_ctx(imp);
3147                 RETURN(0);
3148         }
3149
3150         if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3151                 struct client_obd *cli = &obd->u.cli;
3152                 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3153                 long target = *(long *)val;
3154
3155                 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3156                 *(long *)val -= nr;
3157                 RETURN(0);
3158         }
3159
3160         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3161                 RETURN(-EINVAL);
3162
3163         /* We pass all other commands directly to OST. Since nobody calls osc
3164            methods directly and everybody is supposed to go through LOV, we
3165            assume lov checked invalid values for us.
3166            The only recognised values so far are evict_by_nid and mds_conn.
3167            Even if something bad goes through, we'd get a -EINVAL from OST
3168            anyway. */
3169
3170         req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3171                                                 &RQF_OST_SET_GRANT_INFO :
3172                                                 &RQF_OBD_SET_INFO);
3173         if (req == NULL)
3174                 RETURN(-ENOMEM);
3175
3176         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3177                              RCL_CLIENT, keylen);
3178         if (!KEY_IS(KEY_GRANT_SHRINK))
3179                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3180                                      RCL_CLIENT, vallen);
3181         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3182         if (rc) {
3183                 ptlrpc_request_free(req);
3184                 RETURN(rc);
3185         }
3186
3187         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3188         memcpy(tmp, key, keylen);
3189         tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3190                                                         &RMF_OST_BODY :
3191                                                         &RMF_SETINFO_VAL);
3192         memcpy(tmp, val, vallen);
3193
3194         if (KEY_IS(KEY_GRANT_SHRINK)) {
3195                 struct osc_grant_args *aa;
3196                 struct obdo *oa;
3197
3198                 aa = ptlrpc_req_async_args(aa, req);
3199                 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3200                 if (!oa) {
3201                         ptlrpc_req_finished(req);
3202                         RETURN(-ENOMEM);
3203                 }
3204                 *oa = ((struct ost_body *)val)->oa;
3205                 aa->aa_oa = oa;
3206                 req->rq_interpret_reply = osc_shrink_grant_interpret;
3207         }
3208
3209         ptlrpc_request_set_replen(req);
3210         if (!KEY_IS(KEY_GRANT_SHRINK)) {
3211                 LASSERT(set != NULL);
3212                 ptlrpc_set_add_req(set, req);
3213                 ptlrpc_check_set(NULL, set);
3214         } else {
3215                 ptlrpcd_add_req(req);
3216         }
3217
3218         RETURN(0);
3219 }
3220 EXPORT_SYMBOL(osc_set_info_async);
3221
3222 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3223                   struct obd_device *obd, struct obd_uuid *cluuid,
3224                   struct obd_connect_data *data, void *localdata)
3225 {
3226         struct client_obd *cli = &obd->u.cli;
3227
3228         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3229                 long lost_grant;
3230                 long grant;
3231
3232                 spin_lock(&cli->cl_loi_list_lock);
3233                 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3234                 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3235                         /* restore ocd_grant_blkbits as client page bits */
3236                         data->ocd_grant_blkbits = PAGE_SHIFT;
3237                         grant += cli->cl_dirty_grant;
3238                 } else {
3239                         grant += cli->cl_dirty_pages << PAGE_SHIFT;
3240                 }
3241                 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3242                 lost_grant = cli->cl_lost_grant;
3243                 cli->cl_lost_grant = 0;
3244                 spin_unlock(&cli->cl_loi_list_lock);
3245
3246                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3247                        " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3248                        data->ocd_version, data->ocd_grant, lost_grant);
3249         }
3250
3251         RETURN(0);
3252 }
3253 EXPORT_SYMBOL(osc_reconnect);
3254
3255 int osc_disconnect(struct obd_export *exp)
3256 {
3257         struct obd_device *obd = class_exp2obd(exp);
3258         int rc;
3259
3260         rc = client_disconnect_export(exp);
3261         /**
3262          * Initially we put del_shrink_grant before disconnect_export, but it
3263          * causes the following problem if setup (connect) and cleanup
3264          * (disconnect) are tangled together.
3265          *      connect p1                     disconnect p2
3266          *   ptlrpc_connect_import
3267          *     ...............               class_manual_cleanup
3268          *                                     osc_disconnect
3269          *                                     del_shrink_grant
3270          *   ptlrpc_connect_interrupt
3271          *     osc_init_grant
3272          *   add this client to shrink list
3273          *                                      cleanup_osc
3274          * Bang! grant shrink thread trigger the shrink. BUG18662
3275          */
3276         osc_del_grant_list(&obd->u.cli);
3277         return rc;
3278 }
3279 EXPORT_SYMBOL(osc_disconnect);
3280
3281 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3282                                  struct hlist_node *hnode, void *arg)
3283 {
3284         struct lu_env *env = arg;
3285         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3286         struct ldlm_lock *lock;
3287         struct osc_object *osc = NULL;
3288         ENTRY;
3289
3290         lock_res(res);
3291         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3292                 if (lock->l_ast_data != NULL && osc == NULL) {
3293                         osc = lock->l_ast_data;
3294                         cl_object_get(osc2cl(osc));
3295                 }
3296
3297                 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3298                  * by the 2nd round of ldlm_namespace_clean() call in
3299                  * osc_import_event(). */
3300                 ldlm_clear_cleaned(lock);
3301         }
3302         unlock_res(res);
3303
3304         if (osc != NULL) {
3305                 osc_object_invalidate(env, osc);
3306                 cl_object_put(env, osc2cl(osc));
3307         }
3308
3309         RETURN(0);
3310 }
3311 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3312
3313 static int osc_import_event(struct obd_device *obd,
3314                             struct obd_import *imp,
3315                             enum obd_import_event event)
3316 {
3317         struct client_obd *cli;
3318         int rc = 0;
3319
3320         ENTRY;
3321         LASSERT(imp->imp_obd == obd);
3322
3323         switch (event) {
3324         case IMP_EVENT_DISCON: {
3325                 cli = &obd->u.cli;
3326                 spin_lock(&cli->cl_loi_list_lock);
3327                 cli->cl_avail_grant = 0;
3328                 cli->cl_lost_grant = 0;
3329                 spin_unlock(&cli->cl_loi_list_lock);
3330                 break;
3331         }
3332         case IMP_EVENT_INACTIVE: {
3333                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3334                 break;
3335         }
3336         case IMP_EVENT_INVALIDATE: {
3337                 struct ldlm_namespace *ns = obd->obd_namespace;
3338                 struct lu_env         *env;
3339                 __u16                  refcheck;
3340
3341                 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3342
3343                 env = cl_env_get(&refcheck);
3344                 if (!IS_ERR(env)) {
3345                         osc_io_unplug(env, &obd->u.cli, NULL);
3346
3347                         cfs_hash_for_each_nolock(ns->ns_rs_hash,
3348                                                  osc_ldlm_resource_invalidate,
3349                                                  env, 0);
3350                         cl_env_put(env, &refcheck);
3351
3352                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3353                 } else
3354                         rc = PTR_ERR(env);
3355                 break;
3356         }
3357         case IMP_EVENT_ACTIVE: {
3358                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3359                 break;
3360         }
3361         case IMP_EVENT_OCD: {
3362                 struct obd_connect_data *ocd = &imp->imp_connect_data;
3363
3364                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3365                         osc_init_grant(&obd->u.cli, ocd);
3366
3367                 /* See bug 7198 */
3368                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3369                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3370
3371                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3372                 break;
3373         }
3374         case IMP_EVENT_DEACTIVATE: {
3375                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3376                 break;
3377         }
3378         case IMP_EVENT_ACTIVATE: {
3379                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3380                 break;
3381         }
3382         default:
3383                 CERROR("Unknown import event %d\n", event);
3384                 LBUG();
3385         }
3386         RETURN(rc);
3387 }
3388
3389 /**
3390  * Determine whether the lock can be canceled before replaying the lock
3391  * during recovery, see bug16774 for detailed information.
3392  *
3393  * \retval zero the lock can't be canceled
3394  * \retval other ok to cancel
3395  */
3396 static int osc_cancel_weight(struct ldlm_lock *lock)
3397 {
3398         /*
3399          * Cancel all unused and granted extent lock.
3400          */
3401         if (lock->l_resource->lr_type == LDLM_EXTENT &&
3402             ldlm_is_granted(lock) &&
3403             osc_ldlm_weigh_ast(lock) == 0)
3404                 RETURN(1);
3405
3406         RETURN(0);
3407 }
3408
3409 static int brw_queue_work(const struct lu_env *env, void *data)
3410 {
3411         struct client_obd *cli = data;
3412
3413         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3414
3415         osc_io_unplug(env, cli, NULL);
3416         RETURN(0);
3417 }
3418
3419 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3420 {
3421         struct client_obd *cli = &obd->u.cli;
3422         void *handler;
3423         int rc;
3424
3425         ENTRY;
3426
3427         rc = ptlrpcd_addref();
3428         if (rc)
3429                 RETURN(rc);
3430
3431         rc = client_obd_setup(obd, lcfg);
3432         if (rc)
3433                 GOTO(out_ptlrpcd, rc);
3434
3435
3436         handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3437         if (IS_ERR(handler))
3438                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3439         cli->cl_writeback_work = handler;
3440
3441         handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3442         if (IS_ERR(handler))
3443                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3444         cli->cl_lru_work = handler;
3445
3446         rc = osc_quota_setup(obd);
3447         if (rc)
3448                 GOTO(out_ptlrpcd_work, rc);
3449
3450         cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3451         osc_update_next_shrink(cli);
3452
3453         RETURN(rc);
3454
3455 out_ptlrpcd_work:
3456         if (cli->cl_writeback_work != NULL) {
3457                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3458                 cli->cl_writeback_work = NULL;
3459         }
3460         if (cli->cl_lru_work != NULL) {
3461                 ptlrpcd_destroy_work(cli->cl_lru_work);
3462                 cli->cl_lru_work = NULL;
3463         }
3464         client_obd_cleanup(obd);
3465 out_ptlrpcd:
3466         ptlrpcd_decref();
3467         RETURN(rc);
3468 }
3469 EXPORT_SYMBOL(osc_setup_common);
3470
3471 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3472 {
3473         struct client_obd *cli = &obd->u.cli;
3474         int                adding;
3475         int                added;
3476         int                req_count;
3477         int                rc;
3478
3479         ENTRY;
3480
3481         rc = osc_setup_common(obd, lcfg);
3482         if (rc < 0)
3483                 RETURN(rc);
3484
3485         rc = osc_tunables_init(obd);
3486         if (rc)
3487                 RETURN(rc);
3488
3489         /*
3490          * We try to control the total number of requests with a upper limit
3491          * osc_reqpool_maxreqcount. There might be some race which will cause
3492          * over-limit allocation, but it is fine.
3493          */
3494         req_count = atomic_read(&osc_pool_req_count);
3495         if (req_count < osc_reqpool_maxreqcount) {
3496                 adding = cli->cl_max_rpcs_in_flight + 2;
3497                 if (req_count + adding > osc_reqpool_maxreqcount)
3498                         adding = osc_reqpool_maxreqcount - req_count;
3499
3500                 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3501                 atomic_add(added, &osc_pool_req_count);
3502         }
3503
3504         ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3505
3506         spin_lock(&osc_shrink_lock);
3507         list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3508         spin_unlock(&osc_shrink_lock);
3509         cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3510         cli->cl_import->imp_idle_debug = D_HA;
3511
3512         RETURN(0);
3513 }
3514
3515 int osc_precleanup_common(struct obd_device *obd)
3516 {
3517         struct client_obd *cli = &obd->u.cli;
3518         ENTRY;
3519
3520         /* LU-464
3521          * for echo client, export may be on zombie list, wait for
3522          * zombie thread to cull it, because cli.cl_import will be
3523          * cleared in client_disconnect_export():
3524          *   class_export_destroy() -> obd_cleanup() ->
3525          *   echo_device_free() -> echo_client_cleanup() ->
3526          *   obd_disconnect() -> osc_disconnect() ->
3527          *   client_disconnect_export()
3528          */
3529         obd_zombie_barrier();
3530         if (cli->cl_writeback_work) {
3531                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3532                 cli->cl_writeback_work = NULL;
3533         }
3534
3535         if (cli->cl_lru_work) {
3536                 ptlrpcd_destroy_work(cli->cl_lru_work);
3537                 cli->cl_lru_work = NULL;
3538         }
3539
3540         obd_cleanup_client_import(obd);
3541         RETURN(0);
3542 }
3543 EXPORT_SYMBOL(osc_precleanup_common);
3544
3545 static int osc_precleanup(struct obd_device *obd)
3546 {
3547         ENTRY;
3548
3549         osc_precleanup_common(obd);
3550
3551         ptlrpc_lprocfs_unregister_obd(obd);
3552         RETURN(0);
3553 }
3554
3555 int osc_cleanup_common(struct obd_device *obd)
3556 {
3557         struct client_obd *cli = &obd->u.cli;
3558         int rc;
3559
3560         ENTRY;
3561
3562         spin_lock(&osc_shrink_lock);
3563         list_del(&cli->cl_shrink_list);
3564         spin_unlock(&osc_shrink_lock);
3565
3566         /* lru cleanup */
3567         if (cli->cl_cache != NULL) {
3568                 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3569                 spin_lock(&cli->cl_cache->ccc_lru_lock);
3570                 list_del_init(&cli->cl_lru_osc);
3571                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3572                 cli->cl_lru_left = NULL;
3573                 cl_cache_decref(cli->cl_cache);
3574                 cli->cl_cache = NULL;
3575         }
3576
3577         /* free memory of osc quota cache */
3578         osc_quota_cleanup(obd);
3579
3580         rc = client_obd_cleanup(obd);
3581
3582         ptlrpcd_decref();
3583         RETURN(rc);
3584 }
3585 EXPORT_SYMBOL(osc_cleanup_common);
3586
3587 static const struct obd_ops osc_obd_ops = {
3588         .o_owner                = THIS_MODULE,
3589         .o_setup                = osc_setup,
3590         .o_precleanup           = osc_precleanup,
3591         .o_cleanup              = osc_cleanup_common,
3592         .o_add_conn             = client_import_add_conn,
3593         .o_del_conn             = client_import_del_conn,
3594         .o_connect              = client_connect_import,
3595         .o_reconnect            = osc_reconnect,
3596         .o_disconnect           = osc_disconnect,
3597         .o_statfs               = osc_statfs,
3598         .o_statfs_async         = osc_statfs_async,
3599         .o_create               = osc_create,
3600         .o_destroy              = osc_destroy,
3601         .o_getattr              = osc_getattr,
3602         .o_setattr              = osc_setattr,
3603         .o_iocontrol            = osc_iocontrol,
3604         .o_set_info_async       = osc_set_info_async,
3605         .o_import_event         = osc_import_event,
3606         .o_quotactl             = osc_quotactl,
3607 };
3608
3609 LIST_HEAD(osc_shrink_list);
3610 DEFINE_SPINLOCK(osc_shrink_lock);
3611
3612 #ifdef HAVE_SHRINKER_COUNT
3613 static struct shrinker osc_cache_shrinker = {
3614         .count_objects  = osc_cache_shrink_count,
3615         .scan_objects   = osc_cache_shrink_scan,
3616         .seeks          = DEFAULT_SEEKS,
3617 };
3618 #else
3619 static int osc_cache_shrink(struct shrinker *shrinker,
3620                             struct shrink_control *sc)
3621 {
3622         (void)osc_cache_shrink_scan(shrinker, sc);
3623
3624         return osc_cache_shrink_count(shrinker, sc);
3625 }
3626
3627 static struct shrinker osc_cache_shrinker = {
3628         .shrink   = osc_cache_shrink,
3629         .seeks    = DEFAULT_SEEKS,
3630 };
3631 #endif
3632
3633 static int __init osc_init(void)
3634 {
3635         unsigned int reqpool_size;
3636         unsigned int reqsize;
3637         int rc;
3638         ENTRY;
3639
3640         /* print an address of _any_ initialized kernel symbol from this
3641          * module, to allow debugging with gdb that doesn't support data
3642          * symbols from modules.*/
3643         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3644
3645         rc = lu_kmem_init(osc_caches);
3646         if (rc)
3647                 RETURN(rc);
3648
3649         rc = class_register_type(&osc_obd_ops, NULL, true,
3650                                  LUSTRE_OSC_NAME, &osc_device_type);
3651         if (rc)
3652                 GOTO(out_kmem, rc);
3653
3654         rc = register_shrinker(&osc_cache_shrinker);
3655         if (rc)
3656                 GOTO(out_type, rc);
3657
3658         /* This is obviously too much memory, only prevent overflow here */
3659         if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3660                 GOTO(out_shrinker, rc = -EINVAL);
3661
3662         reqpool_size = osc_reqpool_mem_max << 20;
3663
3664         reqsize = 1;
3665         while (reqsize < OST_IO_MAXREQSIZE)
3666                 reqsize = reqsize << 1;
3667
3668         /*
3669          * We don't enlarge the request count in OSC pool according to
3670          * cl_max_rpcs_in_flight. The allocation from the pool will only be
3671          * tried after normal allocation failed. So a small OSC pool won't
3672          * cause much performance degression in most of cases.
3673          */
3674         osc_reqpool_maxreqcount = reqpool_size / reqsize;
3675
3676         atomic_set(&osc_pool_req_count, 0);
3677         osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3678                                           ptlrpc_add_rqs_to_pool);
3679
3680         if (osc_rq_pool == NULL)
3681                 GOTO(out_shrinker, rc = -ENOMEM);
3682
3683         rc = osc_start_grant_work();
3684         if (rc != 0)
3685                 GOTO(out_req_pool, rc);
3686
3687         RETURN(rc);
3688
3689 out_req_pool:
3690         ptlrpc_free_rq_pool(osc_rq_pool);
3691 out_shrinker:
3692         unregister_shrinker(&osc_cache_shrinker);
3693 out_type:
3694         class_unregister_type(LUSTRE_OSC_NAME);
3695 out_kmem:
3696         lu_kmem_fini(osc_caches);
3697
3698         RETURN(rc);
3699 }
3700
3701 static void __exit osc_exit(void)
3702 {
3703         osc_stop_grant_work();
3704         unregister_shrinker(&osc_cache_shrinker);
3705         class_unregister_type(LUSTRE_OSC_NAME);
3706         lu_kmem_fini(osc_caches);
3707         ptlrpc_free_rq_pool(osc_rq_pool);
3708 }
3709
3710 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3711 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3712 MODULE_VERSION(LUSTRE_VERSION_STRING);
3713 MODULE_LICENSE("GPL");
3714
3715 module_init(osc_init);
3716 module_exit(osc_exit);