Whamcloud - gitweb
LU-14711 osc: Notify server if cache discard takes a long time
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #define DEBUG_SUBSYSTEM S_OSC
33
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
44 #include <obd.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
49
50 #include "osc_internal.h"
51
52 atomic_t osc_pool_req_count;
53 unsigned int osc_reqpool_maxreqcount;
54 struct ptlrpc_request_pool *osc_rq_pool;
55
56 /* max memory used for request pool, unit is MB */
57 static unsigned int osc_reqpool_mem_max = 5;
58 module_param(osc_reqpool_mem_max, uint, 0444);
59
60 static int osc_idle_timeout = 20;
61 module_param(osc_idle_timeout, uint, 0644);
62
63 #define osc_grant_args osc_brw_async_args
64
65 struct osc_setattr_args {
66         struct obdo             *sa_oa;
67         obd_enqueue_update_f     sa_upcall;
68         void                    *sa_cookie;
69 };
70
71 struct osc_fsync_args {
72         struct osc_object       *fa_obj;
73         struct obdo             *fa_oa;
74         obd_enqueue_update_f    fa_upcall;
75         void                    *fa_cookie;
76 };
77
78 struct osc_ladvise_args {
79         struct obdo             *la_oa;
80         obd_enqueue_update_f     la_upcall;
81         void                    *la_cookie;
82 };
83
84 static void osc_release_ppga(struct brw_page **ppga, size_t count);
85 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
86                          void *data, int rc);
87
88 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
89 {
90         struct ost_body *body;
91
92         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
93         LASSERT(body);
94
95         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
96 }
97
98 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
99                        struct obdo *oa)
100 {
101         struct ptlrpc_request   *req;
102         struct ost_body         *body;
103         int                      rc;
104
105         ENTRY;
106         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
107         if (req == NULL)
108                 RETURN(-ENOMEM);
109
110         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
111         if (rc) {
112                 ptlrpc_request_free(req);
113                 RETURN(rc);
114         }
115
116         osc_pack_req_body(req, oa);
117
118         ptlrpc_request_set_replen(req);
119
120         rc = ptlrpc_queue_wait(req);
121         if (rc)
122                 GOTO(out, rc);
123
124         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
125         if (body == NULL)
126                 GOTO(out, rc = -EPROTO);
127
128         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
129         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
130
131         oa->o_blksize = cli_brw_size(exp->exp_obd);
132         oa->o_valid |= OBD_MD_FLBLKSZ;
133
134         EXIT;
135 out:
136         ptlrpc_req_finished(req);
137
138         return rc;
139 }
140
141 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
142                        struct obdo *oa)
143 {
144         struct ptlrpc_request   *req;
145         struct ost_body         *body;
146         int                      rc;
147
148         ENTRY;
149         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
150
151         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
152         if (req == NULL)
153                 RETURN(-ENOMEM);
154
155         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
156         if (rc) {
157                 ptlrpc_request_free(req);
158                 RETURN(rc);
159         }
160
161         osc_pack_req_body(req, oa);
162
163         ptlrpc_request_set_replen(req);
164
165         rc = ptlrpc_queue_wait(req);
166         if (rc)
167                 GOTO(out, rc);
168
169         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
170         if (body == NULL)
171                 GOTO(out, rc = -EPROTO);
172
173         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
174
175         EXIT;
176 out:
177         ptlrpc_req_finished(req);
178
179         RETURN(rc);
180 }
181
182 static int osc_setattr_interpret(const struct lu_env *env,
183                                  struct ptlrpc_request *req, void *args, int rc)
184 {
185         struct osc_setattr_args *sa = args;
186         struct ost_body *body;
187
188         ENTRY;
189
190         if (rc != 0)
191                 GOTO(out, rc);
192
193         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
194         if (body == NULL)
195                 GOTO(out, rc = -EPROTO);
196
197         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
198                              &body->oa);
199 out:
200         rc = sa->sa_upcall(sa->sa_cookie, rc);
201         RETURN(rc);
202 }
203
204 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
205                       obd_enqueue_update_f upcall, void *cookie,
206                       struct ptlrpc_request_set *rqset)
207 {
208         struct ptlrpc_request   *req;
209         struct osc_setattr_args *sa;
210         int                      rc;
211
212         ENTRY;
213
214         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
215         if (req == NULL)
216                 RETURN(-ENOMEM);
217
218         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
219         if (rc) {
220                 ptlrpc_request_free(req);
221                 RETURN(rc);
222         }
223
224         osc_pack_req_body(req, oa);
225
226         ptlrpc_request_set_replen(req);
227
228         /* do mds to ost setattr asynchronously */
229         if (!rqset) {
230                 /* Do not wait for response. */
231                 ptlrpcd_add_req(req);
232         } else {
233                 req->rq_interpret_reply = osc_setattr_interpret;
234
235                 sa = ptlrpc_req_async_args(sa, req);
236                 sa->sa_oa = oa;
237                 sa->sa_upcall = upcall;
238                 sa->sa_cookie = cookie;
239
240                 ptlrpc_set_add_req(rqset, req);
241         }
242
243         RETURN(0);
244 }
245
246 static int osc_ladvise_interpret(const struct lu_env *env,
247                                  struct ptlrpc_request *req,
248                                  void *arg, int rc)
249 {
250         struct osc_ladvise_args *la = arg;
251         struct ost_body *body;
252         ENTRY;
253
254         if (rc != 0)
255                 GOTO(out, rc);
256
257         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
258         if (body == NULL)
259                 GOTO(out, rc = -EPROTO);
260
261         *la->la_oa = body->oa;
262 out:
263         rc = la->la_upcall(la->la_cookie, rc);
264         RETURN(rc);
265 }
266
267 /**
268  * If rqset is NULL, do not wait for response. Upcall and cookie could also
269  * be NULL in this case
270  */
271 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
272                      struct ladvise_hdr *ladvise_hdr,
273                      obd_enqueue_update_f upcall, void *cookie,
274                      struct ptlrpc_request_set *rqset)
275 {
276         struct ptlrpc_request   *req;
277         struct ost_body         *body;
278         struct osc_ladvise_args *la;
279         int                      rc;
280         struct lu_ladvise       *req_ladvise;
281         struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
282         int                      num_advise = ladvise_hdr->lah_count;
283         struct ladvise_hdr      *req_ladvise_hdr;
284         ENTRY;
285
286         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
287         if (req == NULL)
288                 RETURN(-ENOMEM);
289
290         req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
291                              num_advise * sizeof(*ladvise));
292         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
293         if (rc != 0) {
294                 ptlrpc_request_free(req);
295                 RETURN(rc);
296         }
297         req->rq_request_portal = OST_IO_PORTAL;
298         ptlrpc_at_set_req_timeout(req);
299
300         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
301         LASSERT(body);
302         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
303                              oa);
304
305         req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
306                                                  &RMF_OST_LADVISE_HDR);
307         memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
308
309         req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
310         memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
311         ptlrpc_request_set_replen(req);
312
313         if (rqset == NULL) {
314                 /* Do not wait for response. */
315                 ptlrpcd_add_req(req);
316                 RETURN(0);
317         }
318
319         req->rq_interpret_reply = osc_ladvise_interpret;
320         la = ptlrpc_req_async_args(la, req);
321         la->la_oa = oa;
322         la->la_upcall = upcall;
323         la->la_cookie = cookie;
324
325         ptlrpc_set_add_req(rqset, req);
326
327         RETURN(0);
328 }
329
330 static int osc_create(const struct lu_env *env, struct obd_export *exp,
331                       struct obdo *oa)
332 {
333         struct ptlrpc_request *req;
334         struct ost_body       *body;
335         int                    rc;
336         ENTRY;
337
338         LASSERT(oa != NULL);
339         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
340         LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
341
342         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
343         if (req == NULL)
344                 GOTO(out, rc = -ENOMEM);
345
346         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
347         if (rc) {
348                 ptlrpc_request_free(req);
349                 GOTO(out, rc);
350         }
351
352         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
353         LASSERT(body);
354
355         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
356
357         ptlrpc_request_set_replen(req);
358
359         rc = ptlrpc_queue_wait(req);
360         if (rc)
361                 GOTO(out_req, rc);
362
363         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
364         if (body == NULL)
365                 GOTO(out_req, rc = -EPROTO);
366
367         CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
368         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
369
370         oa->o_blksize = cli_brw_size(exp->exp_obd);
371         oa->o_valid |= OBD_MD_FLBLKSZ;
372
373         CDEBUG(D_HA, "transno: %lld\n",
374                lustre_msg_get_transno(req->rq_repmsg));
375 out_req:
376         ptlrpc_req_finished(req);
377 out:
378         RETURN(rc);
379 }
380
381 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
382                    obd_enqueue_update_f upcall, void *cookie)
383 {
384         struct ptlrpc_request *req;
385         struct osc_setattr_args *sa;
386         struct obd_import *imp = class_exp2cliimp(exp);
387         struct ost_body *body;
388         int rc;
389
390         ENTRY;
391
392         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
393         if (req == NULL)
394                 RETURN(-ENOMEM);
395
396         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
397         if (rc < 0) {
398                 ptlrpc_request_free(req);
399                 RETURN(rc);
400         }
401
402         osc_set_io_portal(req);
403
404         ptlrpc_at_set_req_timeout(req);
405
406         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
407
408         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
409
410         ptlrpc_request_set_replen(req);
411
412         req->rq_interpret_reply = osc_setattr_interpret;
413         sa = ptlrpc_req_async_args(sa, req);
414         sa->sa_oa = oa;
415         sa->sa_upcall = upcall;
416         sa->sa_cookie = cookie;
417
418         ptlrpcd_add_req(req);
419
420         RETURN(0);
421 }
422 EXPORT_SYMBOL(osc_punch_send);
423
424 /**
425  * osc_fallocate_base() - Handles fallocate request.
426  *
427  * @exp:        Export structure
428  * @oa:         Attributes passed to OSS from client (obdo structure)
429  * @upcall:     Primary & supplementary group information
430  * @cookie:     Exclusive identifier
431  * @rqset:      Request list.
432  * @mode:       Operation done on given range.
433  *
434  * osc_fallocate_base() - Handles fallocate requests only. Only block
435  * allocation or standard preallocate operation is supported currently.
436  * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
437  * is supported via SETATTR request.
438  *
439  * Return: Non-zero on failure and O on success.
440  */
441 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
442                        obd_enqueue_update_f upcall, void *cookie, int mode)
443 {
444         struct ptlrpc_request *req;
445         struct osc_setattr_args *sa;
446         struct ost_body *body;
447         struct obd_import *imp = class_exp2cliimp(exp);
448         int rc;
449         ENTRY;
450
451         oa->o_falloc_mode = mode;
452         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
453                                    &RQF_OST_FALLOCATE);
454         if (req == NULL)
455                 RETURN(-ENOMEM);
456
457         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
458         if (rc != 0) {
459                 ptlrpc_request_free(req);
460                 RETURN(rc);
461         }
462
463         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
464         LASSERT(body);
465
466         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
467
468         ptlrpc_request_set_replen(req);
469
470         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
471         BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
472         sa = ptlrpc_req_async_args(sa, req);
473         sa->sa_oa = oa;
474         sa->sa_upcall = upcall;
475         sa->sa_cookie = cookie;
476
477         ptlrpcd_add_req(req);
478
479         RETURN(0);
480 }
481
482 static int osc_sync_interpret(const struct lu_env *env,
483                               struct ptlrpc_request *req, void *args, int rc)
484 {
485         struct osc_fsync_args *fa = args;
486         struct ost_body *body;
487         struct cl_attr *attr = &osc_env_info(env)->oti_attr;
488         unsigned long valid = 0;
489         struct cl_object *obj;
490         ENTRY;
491
492         if (rc != 0)
493                 GOTO(out, rc);
494
495         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
496         if (body == NULL) {
497                 CERROR("can't unpack ost_body\n");
498                 GOTO(out, rc = -EPROTO);
499         }
500
501         *fa->fa_oa = body->oa;
502         obj = osc2cl(fa->fa_obj);
503
504         /* Update osc object's blocks attribute */
505         cl_object_attr_lock(obj);
506         if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
507                 attr->cat_blocks = body->oa.o_blocks;
508                 valid |= CAT_BLOCKS;
509         }
510
511         if (valid != 0)
512                 cl_object_attr_update(env, obj, attr, valid);
513         cl_object_attr_unlock(obj);
514
515 out:
516         rc = fa->fa_upcall(fa->fa_cookie, rc);
517         RETURN(rc);
518 }
519
520 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
521                   obd_enqueue_update_f upcall, void *cookie,
522                   struct ptlrpc_request_set *rqset)
523 {
524         struct obd_export     *exp = osc_export(obj);
525         struct ptlrpc_request *req;
526         struct ost_body       *body;
527         struct osc_fsync_args *fa;
528         int                    rc;
529         ENTRY;
530
531         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
532         if (req == NULL)
533                 RETURN(-ENOMEM);
534
535         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
536         if (rc) {
537                 ptlrpc_request_free(req);
538                 RETURN(rc);
539         }
540
541         /* overload the size and blocks fields in the oa with start/end */
542         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
543         LASSERT(body);
544         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
545
546         ptlrpc_request_set_replen(req);
547         req->rq_interpret_reply = osc_sync_interpret;
548
549         fa = ptlrpc_req_async_args(fa, req);
550         fa->fa_obj = obj;
551         fa->fa_oa = oa;
552         fa->fa_upcall = upcall;
553         fa->fa_cookie = cookie;
554
555         ptlrpc_set_add_req(rqset, req);
556
557         RETURN (0);
558 }
559
560 /* Find and cancel locally locks matched by @mode in the resource found by
561  * @objid. Found locks are added into @cancel list. Returns the amount of
562  * locks added to @cancels list. */
563 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
564                                    struct list_head *cancels,
565                                    enum ldlm_mode mode, __u64 lock_flags)
566 {
567         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
568         struct ldlm_res_id res_id;
569         struct ldlm_resource *res;
570         int count;
571         ENTRY;
572
573         /* Return, i.e. cancel nothing, only if ELC is supported (flag in
574          * export) but disabled through procfs (flag in NS).
575          *
576          * This distinguishes from a case when ELC is not supported originally,
577          * when we still want to cancel locks in advance and just cancel them
578          * locally, without sending any RPC. */
579         if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
580                 RETURN(0);
581
582         ostid_build_res_name(&oa->o_oi, &res_id);
583         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
584         if (IS_ERR(res))
585                 RETURN(0);
586
587         LDLM_RESOURCE_ADDREF(res);
588         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
589                                            lock_flags, 0, NULL);
590         LDLM_RESOURCE_DELREF(res);
591         ldlm_resource_putref(res);
592         RETURN(count);
593 }
594
595 static int osc_destroy_interpret(const struct lu_env *env,
596                                  struct ptlrpc_request *req, void *args, int rc)
597 {
598         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
599
600         atomic_dec(&cli->cl_destroy_in_flight);
601         wake_up(&cli->cl_destroy_waitq);
602
603         return 0;
604 }
605
606 static int osc_can_send_destroy(struct client_obd *cli)
607 {
608         if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
609             cli->cl_max_rpcs_in_flight) {
610                 /* The destroy request can be sent */
611                 return 1;
612         }
613         if (atomic_dec_return(&cli->cl_destroy_in_flight) <
614             cli->cl_max_rpcs_in_flight) {
615                 /*
616                  * The counter has been modified between the two atomic
617                  * operations.
618                  */
619                 wake_up(&cli->cl_destroy_waitq);
620         }
621         return 0;
622 }
623
624 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
625                        struct obdo *oa)
626 {
627         struct client_obd     *cli = &exp->exp_obd->u.cli;
628         struct ptlrpc_request *req;
629         struct ost_body       *body;
630         LIST_HEAD(cancels);
631         int rc, count;
632         ENTRY;
633
634         if (!oa) {
635                 CDEBUG(D_INFO, "oa NULL\n");
636                 RETURN(-EINVAL);
637         }
638
639         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
640                                         LDLM_FL_DISCARD_DATA);
641
642         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
643         if (req == NULL) {
644                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
645                 RETURN(-ENOMEM);
646         }
647
648         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
649                                0, &cancels, count);
650         if (rc) {
651                 ptlrpc_request_free(req);
652                 RETURN(rc);
653         }
654
655         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
656         ptlrpc_at_set_req_timeout(req);
657
658         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
659         LASSERT(body);
660         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
661
662         ptlrpc_request_set_replen(req);
663
664         req->rq_interpret_reply = osc_destroy_interpret;
665         if (!osc_can_send_destroy(cli)) {
666                 /*
667                  * Wait until the number of on-going destroy RPCs drops
668                  * under max_rpc_in_flight
669                  */
670                 rc = l_wait_event_abortable_exclusive(
671                         cli->cl_destroy_waitq,
672                         osc_can_send_destroy(cli));
673                 if (rc) {
674                         ptlrpc_req_finished(req);
675                         RETURN(-EINTR);
676                 }
677         }
678
679         /* Do not wait for response */
680         ptlrpcd_add_req(req);
681         RETURN(0);
682 }
683
684 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
685                                 long writing_bytes)
686 {
687         u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
688
689         LASSERT(!(oa->o_valid & bits));
690
691         oa->o_valid |= bits;
692         spin_lock(&cli->cl_loi_list_lock);
693         if (cli->cl_ocd_grant_param)
694                 oa->o_dirty = cli->cl_dirty_grant;
695         else
696                 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
697         if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
698                 CERROR("dirty %lu > dirty_max %lu\n",
699                        cli->cl_dirty_pages,
700                        cli->cl_dirty_max_pages);
701                 oa->o_undirty = 0;
702         } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
703                             (long)(obd_max_dirty_pages + 1))) {
704                 /* The atomic_read() allowing the atomic_inc() are
705                  * not covered by a lock thus they may safely race and trip
706                  * this CERROR() unless we add in a small fudge factor (+1). */
707                 CERROR("%s: dirty %ld > system dirty_max %ld\n",
708                        cli_name(cli), atomic_long_read(&obd_dirty_pages),
709                        obd_max_dirty_pages);
710                 oa->o_undirty = 0;
711         } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
712                             0x7fffffff)) {
713                 CERROR("dirty %lu - dirty_max %lu too big???\n",
714                        cli->cl_dirty_pages, cli->cl_dirty_max_pages);
715                 oa->o_undirty = 0;
716         } else {
717                 unsigned long nrpages;
718                 unsigned long undirty;
719
720                 nrpages = cli->cl_max_pages_per_rpc;
721                 nrpages *= cli->cl_max_rpcs_in_flight + 1;
722                 nrpages = max(nrpages, cli->cl_dirty_max_pages);
723                 undirty = nrpages << PAGE_SHIFT;
724                 if (cli->cl_ocd_grant_param) {
725                         int nrextents;
726
727                         /* take extent tax into account when asking for more
728                          * grant space */
729                         nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
730                                      cli->cl_max_extent_pages;
731                         undirty += nrextents * cli->cl_grant_extent_tax;
732                 }
733                 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
734                  * to add extent tax, etc.
735                  */
736                 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
737                                     ~(PTLRPC_MAX_BRW_SIZE * 4UL));
738         }
739         oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
740         /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
741         if (cli->cl_lost_grant > INT_MAX) {
742                 CDEBUG(D_CACHE,
743                       "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
744                       cli_name(cli), cli->cl_lost_grant);
745                 oa->o_dropped = INT_MAX;
746         } else {
747                 oa->o_dropped = cli->cl_lost_grant;
748         }
749         cli->cl_lost_grant -= oa->o_dropped;
750         spin_unlock(&cli->cl_loi_list_lock);
751         CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
752                " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
753                oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
754 }
755
756 void osc_update_next_shrink(struct client_obd *cli)
757 {
758         cli->cl_next_shrink_grant = ktime_get_seconds() +
759                                     cli->cl_grant_shrink_interval;
760
761         CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
762                cli->cl_next_shrink_grant);
763 }
764
765 static void __osc_update_grant(struct client_obd *cli, u64 grant)
766 {
767         spin_lock(&cli->cl_loi_list_lock);
768         cli->cl_avail_grant += grant;
769         spin_unlock(&cli->cl_loi_list_lock);
770 }
771
772 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
773 {
774         if (body->oa.o_valid & OBD_MD_FLGRANT) {
775                 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
776                 __osc_update_grant(cli, body->oa.o_grant);
777         }
778 }
779
780 /**
781  * grant thread data for shrinking space.
782  */
783 struct grant_thread_data {
784         struct list_head        gtd_clients;
785         struct mutex            gtd_mutex;
786         unsigned long           gtd_stopped:1;
787 };
788 static struct grant_thread_data client_gtd;
789
790 static int osc_shrink_grant_interpret(const struct lu_env *env,
791                                       struct ptlrpc_request *req,
792                                       void *args, int rc)
793 {
794         struct osc_grant_args *aa = args;
795         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
796         struct ost_body *body;
797
798         if (rc != 0) {
799                 __osc_update_grant(cli, aa->aa_oa->o_grant);
800                 GOTO(out, rc);
801         }
802
803         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
804         LASSERT(body);
805         osc_update_grant(cli, body);
806 out:
807         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
808         aa->aa_oa = NULL;
809
810         return rc;
811 }
812
813 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
814 {
815         spin_lock(&cli->cl_loi_list_lock);
816         oa->o_grant = cli->cl_avail_grant / 4;
817         cli->cl_avail_grant -= oa->o_grant;
818         spin_unlock(&cli->cl_loi_list_lock);
819         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
820                 oa->o_valid |= OBD_MD_FLFLAGS;
821                 oa->o_flags = 0;
822         }
823         oa->o_flags |= OBD_FL_SHRINK_GRANT;
824         osc_update_next_shrink(cli);
825 }
826
827 /* Shrink the current grant, either from some large amount to enough for a
828  * full set of in-flight RPCs, or if we have already shrunk to that limit
829  * then to enough for a single RPC.  This avoids keeping more grant than
830  * needed, and avoids shrinking the grant piecemeal. */
831 static int osc_shrink_grant(struct client_obd *cli)
832 {
833         __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
834                              (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
835
836         spin_lock(&cli->cl_loi_list_lock);
837         if (cli->cl_avail_grant <= target_bytes)
838                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
839         spin_unlock(&cli->cl_loi_list_lock);
840
841         return osc_shrink_grant_to_target(cli, target_bytes);
842 }
843
844 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
845 {
846         int                     rc = 0;
847         struct ost_body        *body;
848         ENTRY;
849
850         spin_lock(&cli->cl_loi_list_lock);
851         /* Don't shrink if we are already above or below the desired limit
852          * We don't want to shrink below a single RPC, as that will negatively
853          * impact block allocation and long-term performance. */
854         if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
855                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
856
857         if (target_bytes >= cli->cl_avail_grant) {
858                 spin_unlock(&cli->cl_loi_list_lock);
859                 RETURN(0);
860         }
861         spin_unlock(&cli->cl_loi_list_lock);
862
863         OBD_ALLOC_PTR(body);
864         if (!body)
865                 RETURN(-ENOMEM);
866
867         osc_announce_cached(cli, &body->oa, 0);
868
869         spin_lock(&cli->cl_loi_list_lock);
870         if (target_bytes >= cli->cl_avail_grant) {
871                 /* available grant has changed since target calculation */
872                 spin_unlock(&cli->cl_loi_list_lock);
873                 GOTO(out_free, rc = 0);
874         }
875         body->oa.o_grant = cli->cl_avail_grant - target_bytes;
876         cli->cl_avail_grant = target_bytes;
877         spin_unlock(&cli->cl_loi_list_lock);
878         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
879                 body->oa.o_valid |= OBD_MD_FLFLAGS;
880                 body->oa.o_flags = 0;
881         }
882         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
883         osc_update_next_shrink(cli);
884
885         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
886                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
887                                 sizeof(*body), body, NULL);
888         if (rc != 0)
889                 __osc_update_grant(cli, body->oa.o_grant);
890 out_free:
891         OBD_FREE_PTR(body);
892         RETURN(rc);
893 }
894
895 static int osc_should_shrink_grant(struct client_obd *client)
896 {
897         time64_t next_shrink = client->cl_next_shrink_grant;
898
899         if (client->cl_import == NULL)
900                 return 0;
901
902         if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
903             client->cl_import->imp_grant_shrink_disabled) {
904                 osc_update_next_shrink(client);
905                 return 0;
906         }
907
908         if (ktime_get_seconds() >= next_shrink - 5) {
909                 /* Get the current RPC size directly, instead of going via:
910                  * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
911                  * Keep comment here so that it can be found by searching. */
912                 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
913
914                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
915                     client->cl_avail_grant > brw_size)
916                         return 1;
917                 else
918                         osc_update_next_shrink(client);
919         }
920         return 0;
921 }
922
923 #define GRANT_SHRINK_RPC_BATCH  100
924
925 static struct delayed_work work;
926
927 static void osc_grant_work_handler(struct work_struct *data)
928 {
929         struct client_obd *cli;
930         int rpc_sent;
931         bool init_next_shrink = true;
932         time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
933
934         rpc_sent = 0;
935         mutex_lock(&client_gtd.gtd_mutex);
936         list_for_each_entry(cli, &client_gtd.gtd_clients,
937                             cl_grant_chain) {
938                 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
939                     osc_should_shrink_grant(cli)) {
940                         osc_shrink_grant(cli);
941                         rpc_sent++;
942                 }
943
944                 if (!init_next_shrink) {
945                         if (cli->cl_next_shrink_grant < next_shrink &&
946                             cli->cl_next_shrink_grant > ktime_get_seconds())
947                                 next_shrink = cli->cl_next_shrink_grant;
948                 } else {
949                         init_next_shrink = false;
950                         next_shrink = cli->cl_next_shrink_grant;
951                 }
952         }
953         mutex_unlock(&client_gtd.gtd_mutex);
954
955         if (client_gtd.gtd_stopped == 1)
956                 return;
957
958         if (next_shrink > ktime_get_seconds()) {
959                 time64_t delay = next_shrink - ktime_get_seconds();
960
961                 schedule_delayed_work(&work, cfs_time_seconds(delay));
962         } else {
963                 schedule_work(&work.work);
964         }
965 }
966
967 void osc_schedule_grant_work(void)
968 {
969         cancel_delayed_work_sync(&work);
970         schedule_work(&work.work);
971 }
972
973 /**
974  * Start grant thread for returing grant to server for idle clients.
975  */
976 static int osc_start_grant_work(void)
977 {
978         client_gtd.gtd_stopped = 0;
979         mutex_init(&client_gtd.gtd_mutex);
980         INIT_LIST_HEAD(&client_gtd.gtd_clients);
981
982         INIT_DELAYED_WORK(&work, osc_grant_work_handler);
983         schedule_work(&work.work);
984
985         return 0;
986 }
987
988 static void osc_stop_grant_work(void)
989 {
990         client_gtd.gtd_stopped = 1;
991         cancel_delayed_work_sync(&work);
992 }
993
994 static void osc_add_grant_list(struct client_obd *client)
995 {
996         mutex_lock(&client_gtd.gtd_mutex);
997         list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
998         mutex_unlock(&client_gtd.gtd_mutex);
999 }
1000
1001 static void osc_del_grant_list(struct client_obd *client)
1002 {
1003         if (list_empty(&client->cl_grant_chain))
1004                 return;
1005
1006         mutex_lock(&client_gtd.gtd_mutex);
1007         list_del_init(&client->cl_grant_chain);
1008         mutex_unlock(&client_gtd.gtd_mutex);
1009 }
1010
1011 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1012 {
1013         /*
1014          * ocd_grant is the total grant amount we're expect to hold: if we've
1015          * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1016          * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1017          * dirty.
1018          *
1019          * race is tolerable here: if we're evicted, but imp_state already
1020          * left EVICTED state, then cl_dirty_pages must be 0 already.
1021          */
1022         spin_lock(&cli->cl_loi_list_lock);
1023         cli->cl_avail_grant = ocd->ocd_grant;
1024         if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1025                 unsigned long consumed = cli->cl_reserved_grant;
1026
1027                 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1028                         consumed += cli->cl_dirty_grant;
1029                 else
1030                         consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1031                 if (cli->cl_avail_grant < consumed) {
1032                         CERROR("%s: granted %ld but already consumed %ld\n",
1033                                cli_name(cli), cli->cl_avail_grant, consumed);
1034                         cli->cl_avail_grant = 0;
1035                 } else {
1036                         cli->cl_avail_grant -= consumed;
1037                 }
1038         }
1039
1040         if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1041                 u64 size;
1042                 int chunk_mask;
1043
1044                 /* overhead for each extent insertion */
1045                 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1046                 /* determine the appropriate chunk size used by osc_extent. */
1047                 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1048                                           ocd->ocd_grant_blkbits);
1049                 /* max_pages_per_rpc must be chunk aligned */
1050                 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1051                 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1052                                              ~chunk_mask) & chunk_mask;
1053                 /* determine maximum extent size, in #pages */
1054                 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1055                 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1056                 cli->cl_ocd_grant_param = 1;
1057         } else {
1058                 cli->cl_ocd_grant_param = 0;
1059                 cli->cl_grant_extent_tax = 0;
1060                 cli->cl_chunkbits = PAGE_SHIFT;
1061                 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1062         }
1063         spin_unlock(&cli->cl_loi_list_lock);
1064
1065         CDEBUG(D_CACHE,
1066                "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1067                cli_name(cli),
1068                cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1069                cli->cl_max_extent_pages);
1070
1071         if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1072                 osc_add_grant_list(cli);
1073 }
1074 EXPORT_SYMBOL(osc_init_grant);
1075
1076 /* We assume that the reason this OSC got a short read is because it read
1077  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1078  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1079  * this stripe never got written at or beyond this stripe offset yet. */
1080 static void handle_short_read(int nob_read, size_t page_count,
1081                               struct brw_page **pga)
1082 {
1083         char *ptr;
1084         int i = 0;
1085
1086         /* skip bytes read OK */
1087         while (nob_read > 0) {
1088                 LASSERT (page_count > 0);
1089
1090                 if (pga[i]->count > nob_read) {
1091                         /* EOF inside this page */
1092                         ptr = kmap(pga[i]->pg) +
1093                                 (pga[i]->off & ~PAGE_MASK);
1094                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1095                         kunmap(pga[i]->pg);
1096                         page_count--;
1097                         i++;
1098                         break;
1099                 }
1100
1101                 nob_read -= pga[i]->count;
1102                 page_count--;
1103                 i++;
1104         }
1105
1106         /* zero remaining pages */
1107         while (page_count-- > 0) {
1108                 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1109                 memset(ptr, 0, pga[i]->count);
1110                 kunmap(pga[i]->pg);
1111                 i++;
1112         }
1113 }
1114
1115 static int check_write_rcs(struct ptlrpc_request *req,
1116                            int requested_nob, int niocount,
1117                            size_t page_count, struct brw_page **pga)
1118 {
1119         int     i;
1120         __u32   *remote_rcs;
1121
1122         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1123                                                   sizeof(*remote_rcs) *
1124                                                   niocount);
1125         if (remote_rcs == NULL) {
1126                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1127                 return(-EPROTO);
1128         }
1129
1130         /* return error if any niobuf was in error */
1131         for (i = 0; i < niocount; i++) {
1132                 if ((int)remote_rcs[i] < 0) {
1133                         CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1134                                i, remote_rcs[i], req);
1135                         return remote_rcs[i];
1136                 }
1137
1138                 if (remote_rcs[i] != 0) {
1139                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1140                                 i, remote_rcs[i], req);
1141                         return(-EPROTO);
1142                 }
1143         }
1144         if (req->rq_bulk != NULL &&
1145             req->rq_bulk->bd_nob_transferred != requested_nob) {
1146                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1147                        req->rq_bulk->bd_nob_transferred, requested_nob);
1148                 return(-EPROTO);
1149         }
1150
1151         return (0);
1152 }
1153
1154 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1155 {
1156         if (p1->flag != p2->flag) {
1157                 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1158                                   OBD_BRW_SYNC       | OBD_BRW_ASYNC   |
1159                                   OBD_BRW_NOQUOTA    | OBD_BRW_SOFT_SYNC);
1160
1161                 /* warn if we try to combine flags that we don't know to be
1162                  * safe to combine */
1163                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1164                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1165                               "report this at https://jira.whamcloud.com/\n",
1166                               p1->flag, p2->flag);
1167                 }
1168                 return 0;
1169         }
1170
1171         return (p1->off + p1->count == p2->off);
1172 }
1173
1174 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1175 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1176                                    size_t pg_count, struct brw_page **pga,
1177                                    int opc, obd_dif_csum_fn *fn,
1178                                    int sector_size,
1179                                    u32 *check_sum)
1180 {
1181         struct ahash_request *req;
1182         /* Used Adler as the default checksum type on top of DIF tags */
1183         unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1184         struct page *__page;
1185         unsigned char *buffer;
1186         __u16 *guard_start;
1187         unsigned int bufsize;
1188         int guard_number;
1189         int used_number = 0;
1190         int used;
1191         u32 cksum;
1192         int rc = 0;
1193         int i = 0;
1194
1195         LASSERT(pg_count > 0);
1196
1197         __page = alloc_page(GFP_KERNEL);
1198         if (__page == NULL)
1199                 return -ENOMEM;
1200
1201         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1202         if (IS_ERR(req)) {
1203                 rc = PTR_ERR(req);
1204                 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1205                        obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1206                 GOTO(out, rc);
1207         }
1208
1209         buffer = kmap(__page);
1210         guard_start = (__u16 *)buffer;
1211         guard_number = PAGE_SIZE / sizeof(*guard_start);
1212         while (nob > 0 && pg_count > 0) {
1213                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1214
1215                 /* corrupt the data before we compute the checksum, to
1216                  * simulate an OST->client data error */
1217                 if (unlikely(i == 0 && opc == OST_READ &&
1218                              OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1219                         unsigned char *ptr = kmap(pga[i]->pg);
1220                         int off = pga[i]->off & ~PAGE_MASK;
1221
1222                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1223                         kunmap(pga[i]->pg);
1224                 }
1225
1226                 /*
1227                  * The left guard number should be able to hold checksums of a
1228                  * whole page
1229                  */
1230                 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1231                                                   pga[i]->off & ~PAGE_MASK,
1232                                                   count,
1233                                                   guard_start + used_number,
1234                                                   guard_number - used_number,
1235                                                   &used, sector_size,
1236                                                   fn);
1237                 if (rc)
1238                         break;
1239
1240                 used_number += used;
1241                 if (used_number == guard_number) {
1242                         cfs_crypto_hash_update_page(req, __page, 0,
1243                                 used_number * sizeof(*guard_start));
1244                         used_number = 0;
1245                 }
1246
1247                 nob -= pga[i]->count;
1248                 pg_count--;
1249                 i++;
1250         }
1251         kunmap(__page);
1252         if (rc)
1253                 GOTO(out, rc);
1254
1255         if (used_number != 0)
1256                 cfs_crypto_hash_update_page(req, __page, 0,
1257                         used_number * sizeof(*guard_start));
1258
1259         bufsize = sizeof(cksum);
1260         cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1261
1262         /* For sending we only compute the wrong checksum instead
1263          * of corrupting the data so it is still correct on a redo */
1264         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1265                 cksum++;
1266
1267         *check_sum = cksum;
1268 out:
1269         __free_page(__page);
1270         return rc;
1271 }
1272 #else /* !CONFIG_CRC_T10DIF */
1273 #define obd_dif_ip_fn NULL
1274 #define obd_dif_crc_fn NULL
1275 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum)  \
1276         -EOPNOTSUPP
1277 #endif /* CONFIG_CRC_T10DIF */
1278
1279 static int osc_checksum_bulk(int nob, size_t pg_count,
1280                              struct brw_page **pga, int opc,
1281                              enum cksum_types cksum_type,
1282                              u32 *cksum)
1283 {
1284         int                             i = 0;
1285         struct ahash_request           *req;
1286         unsigned int                    bufsize;
1287         unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
1288
1289         LASSERT(pg_count > 0);
1290
1291         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1292         if (IS_ERR(req)) {
1293                 CERROR("Unable to initialize checksum hash %s\n",
1294                        cfs_crypto_hash_name(cfs_alg));
1295                 return PTR_ERR(req);
1296         }
1297
1298         while (nob > 0 && pg_count > 0) {
1299                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1300
1301                 /* corrupt the data before we compute the checksum, to
1302                  * simulate an OST->client data error */
1303                 if (i == 0 && opc == OST_READ &&
1304                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1305                         unsigned char *ptr = kmap(pga[i]->pg);
1306                         int off = pga[i]->off & ~PAGE_MASK;
1307
1308                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1309                         kunmap(pga[i]->pg);
1310                 }
1311                 cfs_crypto_hash_update_page(req, pga[i]->pg,
1312                                             pga[i]->off & ~PAGE_MASK,
1313                                             count);
1314                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1315                                (int)(pga[i]->off & ~PAGE_MASK));
1316
1317                 nob -= pga[i]->count;
1318                 pg_count--;
1319                 i++;
1320         }
1321
1322         bufsize = sizeof(*cksum);
1323         cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1324
1325         /* For sending we only compute the wrong checksum instead
1326          * of corrupting the data so it is still correct on a redo */
1327         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1328                 (*cksum)++;
1329
1330         return 0;
1331 }
1332
1333 static int osc_checksum_bulk_rw(const char *obd_name,
1334                                 enum cksum_types cksum_type,
1335                                 int nob, size_t pg_count,
1336                                 struct brw_page **pga, int opc,
1337                                 u32 *check_sum)
1338 {
1339         obd_dif_csum_fn *fn = NULL;
1340         int sector_size = 0;
1341         int rc;
1342
1343         ENTRY;
1344         obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
1345
1346         if (fn)
1347                 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1348                                              opc, fn, sector_size, check_sum);
1349         else
1350                 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1351                                        check_sum);
1352
1353         RETURN(rc);
1354 }
1355
1356 static inline void osc_release_bounce_pages(struct brw_page **pga,
1357                                             u32 page_count)
1358 {
1359 #ifdef HAVE_LUSTRE_CRYPTO
1360         int i;
1361
1362         for (i = 0; i < page_count; i++) {
1363                 /* Bounce pages allocated by a call to
1364                  * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1365                  * are identified thanks to the PageChecked flag.
1366                  */
1367                 if (PageChecked(pga[i]->pg))
1368                         llcrypt_finalize_bounce_page(&pga[i]->pg);
1369                 pga[i]->count -= pga[i]->bp_count_diff;
1370                 pga[i]->off += pga[i]->bp_off_diff;
1371         }
1372 #endif
1373 }
1374
1375 static int
1376 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1377                      u32 page_count, struct brw_page **pga,
1378                      struct ptlrpc_request **reqp, int resend)
1379 {
1380         struct ptlrpc_request *req;
1381         struct ptlrpc_bulk_desc *desc;
1382         struct ost_body *body;
1383         struct obd_ioobj *ioobj;
1384         struct niobuf_remote *niobuf;
1385         int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1386         struct osc_brw_async_args *aa;
1387         struct req_capsule *pill;
1388         struct brw_page *pg_prev;
1389         void *short_io_buf;
1390         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1391         struct inode *inode = NULL;
1392         bool directio = false;
1393
1394         ENTRY;
1395         if (pga[0]->pg) {
1396                 inode = page2inode(pga[0]->pg);
1397                 if (inode == NULL) {
1398                         /* Try to get reference to inode from cl_page if we are
1399                          * dealing with direct IO, as handled pages are not
1400                          * actual page cache pages.
1401                          */
1402                         struct osc_async_page *oap = brw_page2oap(pga[0]);
1403                         struct cl_page *clpage = oap2cl_page(oap);
1404
1405                         inode = clpage->cp_inode;
1406                         if (inode)
1407                                 directio = true;
1408                 }
1409         }
1410         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1411                 RETURN(-ENOMEM); /* Recoverable */
1412         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1413                 RETURN(-EINVAL); /* Fatal */
1414
1415         if ((cmd & OBD_BRW_WRITE) != 0) {
1416                 opc = OST_WRITE;
1417                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1418                                                 osc_rq_pool,
1419                                                 &RQF_OST_BRW_WRITE);
1420         } else {
1421                 opc = OST_READ;
1422                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1423         }
1424         if (req == NULL)
1425                 RETURN(-ENOMEM);
1426
1427         if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1428                 for (i = 0; i < page_count; i++) {
1429                         struct brw_page *pg = pga[i];
1430                         struct page *data_page = NULL;
1431                         bool retried = false;
1432                         bool lockedbymyself;
1433                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1434                         struct address_space *map_orig = NULL;
1435                         pgoff_t index_orig;
1436
1437 retry_encrypt:
1438                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1439                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1440                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1441                         /* The page can already be locked when we arrive here.
1442                          * This is possible when cl_page_assume/vvp_page_assume
1443                          * is stuck on wait_on_page_writeback with page lock
1444                          * held. In this case there is no risk for the lock to
1445                          * be released while we are doing our encryption
1446                          * processing, because writeback against that page will
1447                          * end in vvp_page_completion_write/cl_page_completion,
1448                          * which means only once the page is fully processed.
1449                          */
1450                         lockedbymyself = trylock_page(pg->pg);
1451                         if (directio) {
1452                                 map_orig = pg->pg->mapping;
1453                                 pg->pg->mapping = inode->i_mapping;
1454                                 index_orig = pg->pg->index;
1455                                 pg->pg->index = pg->off >> PAGE_SHIFT;
1456                         }
1457                         data_page =
1458                                 llcrypt_encrypt_pagecache_blocks(pg->pg,
1459                                                                  nunits, 0,
1460                                                                  GFP_NOFS);
1461                         if (directio) {
1462                                 pg->pg->mapping = map_orig;
1463                                 pg->pg->index = index_orig;
1464                         }
1465                         if (lockedbymyself)
1466                                 unlock_page(pg->pg);
1467                         if (IS_ERR(data_page)) {
1468                                 rc = PTR_ERR(data_page);
1469                                 if (rc == -ENOMEM && !retried) {
1470                                         retried = true;
1471                                         rc = 0;
1472                                         goto retry_encrypt;
1473                                 }
1474                                 ptlrpc_request_free(req);
1475                                 RETURN(rc);
1476                         }
1477                         /* Set PageChecked flag on bounce page for
1478                          * disambiguation in osc_release_bounce_pages().
1479                          */
1480                         SetPageChecked(data_page);
1481                         pg->pg = data_page;
1482                         /* there should be no gap in the middle of page array */
1483                         if (i == page_count - 1) {
1484                                 struct osc_async_page *oap = brw_page2oap(pg);
1485
1486                                 oa->o_size = oap->oap_count +
1487                                         oap->oap_obj_off + oap->oap_page_off;
1488                         }
1489                         /* len is forced to nunits, and relative offset to 0
1490                          * so store the old, clear text info
1491                          */
1492                         pg->bp_count_diff = nunits - pg->count;
1493                         pg->count = nunits;
1494                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1495                         pg->off = pg->off & PAGE_MASK;
1496                 }
1497         } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
1498                 for (i = 0; i < page_count; i++) {
1499                         struct brw_page *pg = pga[i];
1500                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1501
1502                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1503                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1504                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1505                         /* count/off are forced to cover the whole encryption
1506                          * unit size so that all encrypted data is stored on the
1507                          * OST, so adjust bp_{count,off}_diff for the size of
1508                          * the clear text.
1509                          */
1510                         pg->bp_count_diff = nunits - pg->count;
1511                         pg->count = nunits;
1512                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1513                         pg->off = pg->off & PAGE_MASK;
1514                 }
1515         }
1516
1517         for (niocount = i = 1; i < page_count; i++) {
1518                 if (!can_merge_pages(pga[i - 1], pga[i]))
1519                         niocount++;
1520         }
1521
1522         pill = &req->rq_pill;
1523         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1524                              sizeof(*ioobj));
1525         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1526                              niocount * sizeof(*niobuf));
1527
1528         for (i = 0; i < page_count; i++) {
1529                 short_io_size += pga[i]->count;
1530                 if (!inode || !IS_ENCRYPTED(inode)) {
1531                         pga[i]->bp_count_diff = 0;
1532                         pga[i]->bp_off_diff = 0;
1533                 }
1534         }
1535
1536         /* Check if read/write is small enough to be a short io. */
1537         if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1538             !imp_connect_shortio(cli->cl_import))
1539                 short_io_size = 0;
1540
1541         req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1542                              opc == OST_READ ? 0 : short_io_size);
1543         if (opc == OST_READ)
1544                 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1545                                      short_io_size);
1546
1547         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1548         if (rc) {
1549                 ptlrpc_request_free(req);
1550                 RETURN(rc);
1551         }
1552         osc_set_io_portal(req);
1553
1554         ptlrpc_at_set_req_timeout(req);
1555         /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1556          * retry logic */
1557         req->rq_no_retry_einprogress = 1;
1558
1559         if (short_io_size != 0) {
1560                 desc = NULL;
1561                 short_io_buf = NULL;
1562                 goto no_bulk;
1563         }
1564
1565         desc = ptlrpc_prep_bulk_imp(req, page_count,
1566                 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1567                 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1568                         PTLRPC_BULK_PUT_SINK),
1569                 OST_BULK_PORTAL,
1570                 &ptlrpc_bulk_kiov_pin_ops);
1571
1572         if (desc == NULL)
1573                 GOTO(out, rc = -ENOMEM);
1574         /* NB request now owns desc and will free it when it gets freed */
1575 no_bulk:
1576         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1577         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1578         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1579         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1580
1581         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1582
1583         /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1584          * and from_kgid(), because they are asynchronous. Fortunately, variable
1585          * oa contains valid o_uid and o_gid in these two operations.
1586          * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1587          * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1588          * other process logic */
1589         body->oa.o_uid = oa->o_uid;
1590         body->oa.o_gid = oa->o_gid;
1591
1592         obdo_to_ioobj(oa, ioobj);
1593         ioobj->ioo_bufcnt = niocount;
1594         /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1595          * that might be send for this request.  The actual number is decided
1596          * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1597          * "max - 1" for old client compatibility sending "0", and also so the
1598          * the actual maximum is a power-of-two number, not one less. LU-1431 */
1599         if (desc != NULL)
1600                 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1601         else /* short io */
1602                 ioobj_max_brw_set(ioobj, 0);
1603
1604         if (short_io_size != 0) {
1605                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1606                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1607                         body->oa.o_flags = 0;
1608                 }
1609                 body->oa.o_flags |= OBD_FL_SHORT_IO;
1610                 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1611                        short_io_size);
1612                 if (opc == OST_WRITE) {
1613                         short_io_buf = req_capsule_client_get(pill,
1614                                                               &RMF_SHORT_IO);
1615                         LASSERT(short_io_buf != NULL);
1616                 }
1617         }
1618
1619         LASSERT(page_count > 0);
1620         pg_prev = pga[0];
1621         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1622                 struct brw_page *pg = pga[i];
1623                 int poff = pg->off & ~PAGE_MASK;
1624
1625                 LASSERT(pg->count > 0);
1626                 /* make sure there is no gap in the middle of page array */
1627                 LASSERTF(page_count == 1 ||
1628                          (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1629                           ergo(i > 0 && i < page_count - 1,
1630                                poff == 0 && pg->count == PAGE_SIZE)   &&
1631                           ergo(i == page_count - 1, poff == 0)),
1632                          "i: %d/%d pg: %p off: %llu, count: %u\n",
1633                          i, page_count, pg, pg->off, pg->count);
1634                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1635                          "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1636                          " prev_pg %p [pri %lu ind %lu] off %llu\n",
1637                          i, page_count,
1638                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1639                          pg_prev->pg, page_private(pg_prev->pg),
1640                          pg_prev->pg->index, pg_prev->off);
1641                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1642                         (pg->flag & OBD_BRW_SRVLOCK));
1643                 if (short_io_size != 0 && opc == OST_WRITE) {
1644                         unsigned char *ptr = kmap_atomic(pg->pg);
1645
1646                         LASSERT(short_io_size >= requested_nob + pg->count);
1647                         memcpy(short_io_buf + requested_nob,
1648                                ptr + poff,
1649                                pg->count);
1650                         kunmap_atomic(ptr);
1651                 } else if (short_io_size == 0) {
1652                         desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1653                                                          pg->count);
1654                 }
1655                 requested_nob += pg->count;
1656
1657                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1658                         niobuf--;
1659                         niobuf->rnb_len += pg->count;
1660                 } else {
1661                         niobuf->rnb_offset = pg->off;
1662                         niobuf->rnb_len    = pg->count;
1663                         niobuf->rnb_flags  = pg->flag;
1664                 }
1665                 pg_prev = pg;
1666         }
1667
1668         LASSERTF((void *)(niobuf - niocount) ==
1669                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1670                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1671                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1672
1673         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1674         if (resend) {
1675                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1676                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1677                         body->oa.o_flags = 0;
1678                 }
1679                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1680         }
1681
1682         if (osc_should_shrink_grant(cli))
1683                 osc_shrink_grant_local(cli, &body->oa);
1684
1685         /* size[REQ_REC_OFF] still sizeof (*body) */
1686         if (opc == OST_WRITE) {
1687                 if (cli->cl_checksum &&
1688                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1689                         /* store cl_cksum_type in a local variable since
1690                          * it can be changed via lprocfs */
1691                         enum cksum_types cksum_type = cli->cl_cksum_type;
1692
1693                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1694                                 body->oa.o_flags = 0;
1695
1696                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1697                                                                 cksum_type);
1698                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1699
1700                         rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1701                                                   requested_nob, page_count,
1702                                                   pga, OST_WRITE,
1703                                                   &body->oa.o_cksum);
1704                         if (rc < 0) {
1705                                 CDEBUG(D_PAGE, "failed to checksum, rc = %d\n",
1706                                        rc);
1707                                 GOTO(out, rc);
1708                         }
1709                         CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1710                                body->oa.o_cksum);
1711
1712                         /* save this in 'oa', too, for later checking */
1713                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1714                         oa->o_flags |= obd_cksum_type_pack(obd_name,
1715                                                            cksum_type);
1716                 } else {
1717                         /* clear out the checksum flag, in case this is a
1718                          * resend but cl_checksum is no longer set. b=11238 */
1719                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1720                 }
1721                 oa->o_cksum = body->oa.o_cksum;
1722                 /* 1 RC per niobuf */
1723                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1724                                      sizeof(__u32) * niocount);
1725         } else {
1726                 if (cli->cl_checksum &&
1727                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1728                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1729                                 body->oa.o_flags = 0;
1730                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1731                                 cli->cl_cksum_type);
1732                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1733                 }
1734
1735                 /* Client cksum has been already copied to wire obdo in previous
1736                  * lustre_set_wire_obdo(), and in the case a bulk-read is being
1737                  * resent due to cksum error, this will allow Server to
1738                  * check+dump pages on its side */
1739         }
1740         ptlrpc_request_set_replen(req);
1741
1742         aa = ptlrpc_req_async_args(aa, req);
1743         aa->aa_oa = oa;
1744         aa->aa_requested_nob = requested_nob;
1745         aa->aa_nio_count = niocount;
1746         aa->aa_page_count = page_count;
1747         aa->aa_resends = 0;
1748         aa->aa_ppga = pga;
1749         aa->aa_cli = cli;
1750         INIT_LIST_HEAD(&aa->aa_oaps);
1751
1752         *reqp = req;
1753         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1754         CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1755                 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1756                 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1757         RETURN(0);
1758
1759  out:
1760         ptlrpc_req_finished(req);
1761         RETURN(rc);
1762 }
1763
1764 char dbgcksum_file_name[PATH_MAX];
1765
1766 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1767                                 struct brw_page **pga, __u32 server_cksum,
1768                                 __u32 client_cksum)
1769 {
1770         struct file *filp;
1771         int rc, i;
1772         unsigned int len;
1773         char *buf;
1774
1775         /* will only keep dump of pages on first error for the same range in
1776          * file/fid, not during the resends/retries. */
1777         snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1778                  "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1779                  (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1780                   libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1781                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1782                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1783                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1784                  pga[0]->off,
1785                  pga[page_count-1]->off + pga[page_count-1]->count - 1,
1786                  client_cksum, server_cksum);
1787         filp = filp_open(dbgcksum_file_name,
1788                          O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1789         if (IS_ERR(filp)) {
1790                 rc = PTR_ERR(filp);
1791                 if (rc == -EEXIST)
1792                         CDEBUG(D_INFO, "%s: can't open to dump pages with "
1793                                "checksum error: rc = %d\n", dbgcksum_file_name,
1794                                rc);
1795                 else
1796                         CERROR("%s: can't open to dump pages with checksum "
1797                                "error: rc = %d\n", dbgcksum_file_name, rc);
1798                 return;
1799         }
1800
1801         for (i = 0; i < page_count; i++) {
1802                 len = pga[i]->count;
1803                 buf = kmap(pga[i]->pg);
1804                 while (len != 0) {
1805                         rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1806                         if (rc < 0) {
1807                                 CERROR("%s: wanted to write %u but got %d "
1808                                        "error\n", dbgcksum_file_name, len, rc);
1809                                 break;
1810                         }
1811                         len -= rc;
1812                         buf += rc;
1813                         CDEBUG(D_INFO, "%s: wrote %d bytes\n",
1814                                dbgcksum_file_name, rc);
1815                 }
1816                 kunmap(pga[i]->pg);
1817         }
1818
1819         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1820         if (rc)
1821                 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1822         filp_close(filp, NULL);
1823 }
1824
1825 static int
1826 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1827                      __u32 client_cksum, __u32 server_cksum,
1828                      struct osc_brw_async_args *aa)
1829 {
1830         const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1831         enum cksum_types cksum_type;
1832         obd_dif_csum_fn *fn = NULL;
1833         int sector_size = 0;
1834         __u32 new_cksum;
1835         char *msg;
1836         int rc;
1837
1838         if (server_cksum == client_cksum) {
1839                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1840                 return 0;
1841         }
1842
1843         if (aa->aa_cli->cl_checksum_dump)
1844                 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1845                                     server_cksum, client_cksum);
1846
1847         cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1848                                            oa->o_flags : 0);
1849
1850         switch (cksum_type) {
1851         case OBD_CKSUM_T10IP512:
1852                 fn = obd_dif_ip_fn;
1853                 sector_size = 512;
1854                 break;
1855         case OBD_CKSUM_T10IP4K:
1856                 fn = obd_dif_ip_fn;
1857                 sector_size = 4096;
1858                 break;
1859         case OBD_CKSUM_T10CRC512:
1860                 fn = obd_dif_crc_fn;
1861                 sector_size = 512;
1862                 break;
1863         case OBD_CKSUM_T10CRC4K:
1864                 fn = obd_dif_crc_fn;
1865                 sector_size = 4096;
1866                 break;
1867         default:
1868                 break;
1869         }
1870
1871         if (fn)
1872                 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1873                                              aa->aa_page_count, aa->aa_ppga,
1874                                              OST_WRITE, fn, sector_size,
1875                                              &new_cksum);
1876         else
1877                 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1878                                        aa->aa_ppga, OST_WRITE, cksum_type,
1879                                        &new_cksum);
1880
1881         if (rc < 0)
1882                 msg = "failed to calculate the client write checksum";
1883         else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1884                 msg = "the server did not use the checksum type specified in "
1885                       "the original request - likely a protocol problem";
1886         else if (new_cksum == server_cksum)
1887                 msg = "changed on the client after we checksummed it - "
1888                       "likely false positive due to mmap IO (bug 11742)";
1889         else if (new_cksum == client_cksum)
1890                 msg = "changed in transit before arrival at OST";
1891         else
1892                 msg = "changed in transit AND doesn't match the original - "
1893                       "likely false positive due to mmap IO (bug 11742)";
1894
1895         LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1896                            DFID " object "DOSTID" extent [%llu-%llu], original "
1897                            "client csum %x (type %x), server csum %x (type %x),"
1898                            " client csum now %x\n",
1899                            obd_name, msg, libcfs_nid2str(peer->nid),
1900                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1901                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1902                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1903                            POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1904                            aa->aa_ppga[aa->aa_page_count - 1]->off +
1905                                 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1906                            client_cksum,
1907                            obd_cksum_type_unpack(aa->aa_oa->o_flags),
1908                            server_cksum, cksum_type, new_cksum);
1909         return 1;
1910 }
1911
1912 /* Note rc enters this function as number of bytes transferred */
1913 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1914 {
1915         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1916         struct client_obd *cli = aa->aa_cli;
1917         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1918         const struct lnet_process_id *peer =
1919                 &req->rq_import->imp_connection->c_peer;
1920         struct ost_body *body;
1921         u32 client_cksum = 0;
1922         struct inode *inode;
1923         unsigned int blockbits = 0, blocksize = 0;
1924
1925         ENTRY;
1926
1927         if (rc < 0 && rc != -EDQUOT) {
1928                 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1929                 RETURN(rc);
1930         }
1931
1932         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1933         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1934         if (body == NULL) {
1935                 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1936                 RETURN(-EPROTO);
1937         }
1938
1939         /* set/clear over quota flag for a uid/gid/projid */
1940         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1941             body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1942                 unsigned qid[LL_MAXQUOTAS] = {
1943                                          body->oa.o_uid, body->oa.o_gid,
1944                                          body->oa.o_projid };
1945                 CDEBUG(D_QUOTA,
1946                        "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1947                        body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1948                        body->oa.o_valid, body->oa.o_flags);
1949                        osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1950                                        body->oa.o_flags);
1951         }
1952
1953         osc_update_grant(cli, body);
1954
1955         if (rc < 0)
1956                 RETURN(rc);
1957
1958         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1959                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1960
1961         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1962                 if (rc > 0) {
1963                         CERROR("%s: unexpected positive size %d\n",
1964                                obd_name, rc);
1965                         RETURN(-EPROTO);
1966                 }
1967
1968                 if (req->rq_bulk != NULL &&
1969                     sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1970                         RETURN(-EAGAIN);
1971
1972                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1973                     check_write_checksum(&body->oa, peer, client_cksum,
1974                                          body->oa.o_cksum, aa))
1975                         RETURN(-EAGAIN);
1976
1977                 rc = check_write_rcs(req, aa->aa_requested_nob,
1978                                      aa->aa_nio_count, aa->aa_page_count,
1979                                      aa->aa_ppga);
1980                 GOTO(out, rc);
1981         }
1982
1983         /* The rest of this function executes only for OST_READs */
1984
1985         if (req->rq_bulk == NULL) {
1986                 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
1987                                           RCL_SERVER);
1988                 LASSERT(rc == req->rq_status);
1989         } else {
1990                 /* if unwrap_bulk failed, return -EAGAIN to retry */
1991                 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1992         }
1993         if (rc < 0)
1994                 GOTO(out, rc = -EAGAIN);
1995
1996         if (rc > aa->aa_requested_nob) {
1997                 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
1998                        rc, aa->aa_requested_nob);
1999                 RETURN(-EPROTO);
2000         }
2001
2002         if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2003                 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2004                        rc, req->rq_bulk->bd_nob_transferred);
2005                 RETURN(-EPROTO);
2006         }
2007
2008         if (req->rq_bulk == NULL) {
2009                 /* short io */
2010                 int nob, pg_count, i = 0;
2011                 unsigned char *buf;
2012
2013                 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2014                 pg_count = aa->aa_page_count;
2015                 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2016                                                    rc);
2017                 nob = rc;
2018                 while (nob > 0 && pg_count > 0) {
2019                         unsigned char *ptr;
2020                         int count = aa->aa_ppga[i]->count > nob ?
2021                                     nob : aa->aa_ppga[i]->count;
2022
2023                         CDEBUG(D_CACHE, "page %p count %d\n",
2024                                aa->aa_ppga[i]->pg, count);
2025                         ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2026                         memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2027                                count);
2028                         kunmap_atomic((void *) ptr);
2029
2030                         buf += count;
2031                         nob -= count;
2032                         i++;
2033                         pg_count--;
2034                 }
2035         }
2036
2037         if (rc < aa->aa_requested_nob)
2038                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2039
2040         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2041                 static int cksum_counter;
2042                 u32        server_cksum = body->oa.o_cksum;
2043                 char      *via = "";
2044                 char      *router = "";
2045                 enum cksum_types cksum_type;
2046                 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2047                         body->oa.o_flags : 0;
2048
2049                 cksum_type = obd_cksum_type_unpack(o_flags);
2050                 rc = osc_checksum_bulk_rw(obd_name, cksum_type, rc,
2051                                           aa->aa_page_count, aa->aa_ppga,
2052                                           OST_READ, &client_cksum);
2053                 if (rc < 0)
2054                         GOTO(out, rc);
2055
2056                 if (req->rq_bulk != NULL &&
2057                     peer->nid != req->rq_bulk->bd_sender) {
2058                         via = " via ";
2059                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
2060                 }
2061
2062                 if (server_cksum != client_cksum) {
2063                         struct ost_body *clbody;
2064                         u32 page_count = aa->aa_page_count;
2065
2066                         clbody = req_capsule_client_get(&req->rq_pill,
2067                                                         &RMF_OST_BODY);
2068                         if (cli->cl_checksum_dump)
2069                                 dump_all_bulk_pages(&clbody->oa, page_count,
2070                                                     aa->aa_ppga, server_cksum,
2071                                                     client_cksum);
2072
2073                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2074                                            "%s%s%s inode "DFID" object "DOSTID
2075                                            " extent [%llu-%llu], client %x, "
2076                                            "server %x, cksum_type %x\n",
2077                                            obd_name,
2078                                            libcfs_nid2str(peer->nid),
2079                                            via, router,
2080                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2081                                                 clbody->oa.o_parent_seq : 0ULL,
2082                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2083                                                 clbody->oa.o_parent_oid : 0,
2084                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2085                                                 clbody->oa.o_parent_ver : 0,
2086                                            POSTID(&body->oa.o_oi),
2087                                            aa->aa_ppga[0]->off,
2088                                            aa->aa_ppga[page_count-1]->off +
2089                                            aa->aa_ppga[page_count-1]->count - 1,
2090                                            client_cksum, server_cksum,
2091                                            cksum_type);
2092                         cksum_counter = 0;
2093                         aa->aa_oa->o_cksum = client_cksum;
2094                         rc = -EAGAIN;
2095                 } else {
2096                         cksum_counter++;
2097                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2098                         rc = 0;
2099                 }
2100         } else if (unlikely(client_cksum)) {
2101                 static int cksum_missed;
2102
2103                 cksum_missed++;
2104                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2105                         CERROR("%s: checksum %u requested from %s but not sent\n",
2106                                obd_name, cksum_missed,
2107                                libcfs_nid2str(peer->nid));
2108         } else {
2109                 rc = 0;
2110         }
2111
2112         inode = page2inode(aa->aa_ppga[0]->pg);
2113         if (inode == NULL) {
2114                 /* Try to get reference to inode from cl_page if we are
2115                  * dealing with direct IO, as handled pages are not
2116                  * actual page cache pages.
2117                  */
2118                 struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
2119
2120                 inode = oap2cl_page(oap)->cp_inode;
2121                 if (inode) {
2122                         blockbits = inode->i_blkbits;
2123                         blocksize = 1 << blockbits;
2124                 }
2125         }
2126         if (inode && IS_ENCRYPTED(inode)) {
2127                 int idx;
2128
2129                 if (!llcrypt_has_encryption_key(inode)) {
2130                         CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2131                         GOTO(out, rc);
2132                 }
2133                 for (idx = 0; idx < aa->aa_page_count; idx++) {
2134                         struct brw_page *pg = aa->aa_ppga[idx];
2135                         unsigned int offs = 0;
2136
2137                         while (offs < PAGE_SIZE) {
2138                                 /* do not decrypt if page is all 0s */
2139                                 if (memchr_inv(page_address(pg->pg) + offs, 0,
2140                                          LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2141                                         /* if page is empty forward info to
2142                                          * upper layers (ll_io_zero_page) by
2143                                          * clearing PagePrivate2
2144                                          */
2145                                         if (!offs)
2146                                                 ClearPagePrivate2(pg->pg);
2147                                         break;
2148                                 }
2149
2150                                 if (blockbits) {
2151                                         /* This is direct IO case. Directly call
2152                                          * decrypt function that takes inode as
2153                                          * input parameter. Page does not need
2154                                          * to be locked.
2155                                          */
2156                                         u64 lblk_num =
2157                                                 ((u64)(pg->off >> PAGE_SHIFT) <<
2158                                                      (PAGE_SHIFT - blockbits)) +
2159                                                        (offs >> blockbits);
2160                                         unsigned int i;
2161
2162                                         for (i = offs;
2163                                              i < offs +
2164                                                     LUSTRE_ENCRYPTION_UNIT_SIZE;
2165                                              i += blocksize, lblk_num++) {
2166                                                 rc =
2167                                                   llcrypt_decrypt_block_inplace(
2168                                                           inode, pg->pg,
2169                                                           blocksize, i,
2170                                                           lblk_num);
2171                                                 if (rc)
2172                                                         break;
2173                                         }
2174                                 } else {
2175                                         rc = llcrypt_decrypt_pagecache_blocks(
2176                                                 pg->pg,
2177                                                 LUSTRE_ENCRYPTION_UNIT_SIZE,
2178                                                 offs);
2179                                 }
2180                                 if (rc)
2181                                         GOTO(out, rc);
2182
2183                                 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2184                         }
2185                 }
2186         }
2187
2188 out:
2189         if (rc >= 0)
2190                 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2191                                      aa->aa_oa, &body->oa);
2192
2193         RETURN(rc);
2194 }
2195
2196 static int osc_brw_redo_request(struct ptlrpc_request *request,
2197                                 struct osc_brw_async_args *aa, int rc)
2198 {
2199         struct ptlrpc_request *new_req;
2200         struct osc_brw_async_args *new_aa;
2201         struct osc_async_page *oap;
2202         ENTRY;
2203
2204         /* The below message is checked in replay-ost-single.sh test_8ae*/
2205         DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2206                   "redo for recoverable error %d", rc);
2207
2208         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2209                                 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2210                                   aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2211                                   aa->aa_ppga, &new_req, 1);
2212         if (rc)
2213                 RETURN(rc);
2214
2215         list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2216                 if (oap->oap_request != NULL) {
2217                         LASSERTF(request == oap->oap_request,
2218                                  "request %p != oap_request %p\n",
2219                                  request, oap->oap_request);
2220                 }
2221         }
2222         /*
2223          * New request takes over pga and oaps from old request.
2224          * Note that copying a list_head doesn't work, need to move it...
2225          */
2226         aa->aa_resends++;
2227         new_req->rq_interpret_reply = request->rq_interpret_reply;
2228         new_req->rq_async_args = request->rq_async_args;
2229         new_req->rq_commit_cb = request->rq_commit_cb;
2230         /* cap resend delay to the current request timeout, this is similar to
2231          * what ptlrpc does (see after_reply()) */
2232         if (aa->aa_resends > new_req->rq_timeout)
2233                 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2234         else
2235                 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2236         new_req->rq_generation_set = 1;
2237         new_req->rq_import_generation = request->rq_import_generation;
2238
2239         new_aa = ptlrpc_req_async_args(new_aa, new_req);
2240
2241         INIT_LIST_HEAD(&new_aa->aa_oaps);
2242         list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2243         INIT_LIST_HEAD(&new_aa->aa_exts);
2244         list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2245         new_aa->aa_resends = aa->aa_resends;
2246
2247         list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2248                 if (oap->oap_request) {
2249                         ptlrpc_req_finished(oap->oap_request);
2250                         oap->oap_request = ptlrpc_request_addref(new_req);
2251                 }
2252         }
2253
2254         /* XXX: This code will run into problem if we're going to support
2255          * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2256          * and wait for all of them to be finished. We should inherit request
2257          * set from old request. */
2258         ptlrpcd_add_req(new_req);
2259
2260         DEBUG_REQ(D_INFO, new_req, "new request");
2261         RETURN(0);
2262 }
2263
2264 /*
2265  * ugh, we want disk allocation on the target to happen in offset order.  we'll
2266  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2267  * fine for our small page arrays and doesn't require allocation.  its an
2268  * insertion sort that swaps elements that are strides apart, shrinking the
2269  * stride down until its '1' and the array is sorted.
2270  */
2271 static void sort_brw_pages(struct brw_page **array, int num)
2272 {
2273         int stride, i, j;
2274         struct brw_page *tmp;
2275
2276         if (num == 1)
2277                 return;
2278         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2279                 ;
2280
2281         do {
2282                 stride /= 3;
2283                 for (i = stride ; i < num ; i++) {
2284                         tmp = array[i];
2285                         j = i;
2286                         while (j >= stride && array[j - stride]->off > tmp->off) {
2287                                 array[j] = array[j - stride];
2288                                 j -= stride;
2289                         }
2290                         array[j] = tmp;
2291                 }
2292         } while (stride > 1);
2293 }
2294
2295 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2296 {
2297         LASSERT(ppga != NULL);
2298         OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2299 }
2300
2301 static int brw_interpret(const struct lu_env *env,
2302                          struct ptlrpc_request *req, void *args, int rc)
2303 {
2304         struct osc_brw_async_args *aa = args;
2305         struct osc_extent *ext;
2306         struct osc_extent *tmp;
2307         struct client_obd *cli = aa->aa_cli;
2308         unsigned long transferred = 0;
2309
2310         ENTRY;
2311
2312         rc = osc_brw_fini_request(req, rc);
2313         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2314
2315         /* restore clear text pages */
2316         osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2317
2318         /*
2319          * When server returns -EINPROGRESS, client should always retry
2320          * regardless of the number of times the bulk was resent already.
2321          */
2322         if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2323                 if (req->rq_import_generation !=
2324                     req->rq_import->imp_generation) {
2325                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2326                                ""DOSTID", rc = %d.\n",
2327                                req->rq_import->imp_obd->obd_name,
2328                                POSTID(&aa->aa_oa->o_oi), rc);
2329                 } else if (rc == -EINPROGRESS ||
2330                     client_should_resend(aa->aa_resends, aa->aa_cli)) {
2331                         rc = osc_brw_redo_request(req, aa, rc);
2332                 } else {
2333                         CERROR("%s: too many resent retries for object: "
2334                                "%llu:%llu, rc = %d.\n",
2335                                req->rq_import->imp_obd->obd_name,
2336                                POSTID(&aa->aa_oa->o_oi), rc);
2337                 }
2338
2339                 if (rc == 0)
2340                         RETURN(0);
2341                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2342                         rc = -EIO;
2343         }
2344
2345         if (rc == 0) {
2346                 struct obdo *oa = aa->aa_oa;
2347                 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2348                 unsigned long valid = 0;
2349                 struct cl_object *obj;
2350                 struct osc_async_page *last;
2351
2352                 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2353                 obj = osc2cl(last->oap_obj);
2354
2355                 cl_object_attr_lock(obj);
2356                 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2357                         attr->cat_blocks = oa->o_blocks;
2358                         valid |= CAT_BLOCKS;
2359                 }
2360                 if (oa->o_valid & OBD_MD_FLMTIME) {
2361                         attr->cat_mtime = oa->o_mtime;
2362                         valid |= CAT_MTIME;
2363                 }
2364                 if (oa->o_valid & OBD_MD_FLATIME) {
2365                         attr->cat_atime = oa->o_atime;
2366                         valid |= CAT_ATIME;
2367                 }
2368                 if (oa->o_valid & OBD_MD_FLCTIME) {
2369                         attr->cat_ctime = oa->o_ctime;
2370                         valid |= CAT_CTIME;
2371                 }
2372
2373                 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2374                         struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2375                         loff_t last_off = last->oap_count + last->oap_obj_off +
2376                                 last->oap_page_off;
2377
2378                         /* Change file size if this is an out of quota or
2379                          * direct IO write and it extends the file size */
2380                         if (loi->loi_lvb.lvb_size < last_off) {
2381                                 attr->cat_size = last_off;
2382                                 valid |= CAT_SIZE;
2383                         }
2384                         /* Extend KMS if it's not a lockless write */
2385                         if (loi->loi_kms < last_off &&
2386                             oap2osc_page(last)->ops_srvlock == 0) {
2387                                 attr->cat_kms = last_off;
2388                                 valid |= CAT_KMS;
2389                         }
2390                 }
2391
2392                 if (valid != 0)
2393                         cl_object_attr_update(env, obj, attr, valid);
2394                 cl_object_attr_unlock(obj);
2395         }
2396         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2397         aa->aa_oa = NULL;
2398
2399         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2400                 osc_inc_unstable_pages(req);
2401
2402         list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2403                 list_del_init(&ext->oe_link);
2404                 osc_extent_finish(env, ext, 1,
2405                                   rc && req->rq_no_delay ? -EAGAIN : rc);
2406         }
2407         LASSERT(list_empty(&aa->aa_exts));
2408         LASSERT(list_empty(&aa->aa_oaps));
2409
2410         transferred = (req->rq_bulk == NULL ? /* short io */
2411                        aa->aa_requested_nob :
2412                        req->rq_bulk->bd_nob_transferred);
2413
2414         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2415         ptlrpc_lprocfs_brw(req, transferred);
2416
2417         spin_lock(&cli->cl_loi_list_lock);
2418         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2419          * is called so we know whether to go to sync BRWs or wait for more
2420          * RPCs to complete */
2421         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2422                 cli->cl_w_in_flight--;
2423         else
2424                 cli->cl_r_in_flight--;
2425         osc_wake_cache_waiters(cli);
2426         spin_unlock(&cli->cl_loi_list_lock);
2427
2428         osc_io_unplug(env, cli, NULL);
2429         RETURN(rc);
2430 }
2431
2432 static void brw_commit(struct ptlrpc_request *req)
2433 {
2434         /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2435          * this called via the rq_commit_cb, I need to ensure
2436          * osc_dec_unstable_pages is still called. Otherwise unstable
2437          * pages may be leaked. */
2438         spin_lock(&req->rq_lock);
2439         if (likely(req->rq_unstable)) {
2440                 req->rq_unstable = 0;
2441                 spin_unlock(&req->rq_lock);
2442
2443                 osc_dec_unstable_pages(req);
2444         } else {
2445                 req->rq_committed = 1;
2446                 spin_unlock(&req->rq_lock);
2447         }
2448 }
2449
2450 /**
2451  * Build an RPC by the list of extent @ext_list. The caller must ensure
2452  * that the total pages in this list are NOT over max pages per RPC.
2453  * Extents in the list must be in OES_RPC state.
2454  */
2455 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2456                   struct list_head *ext_list, int cmd)
2457 {
2458         struct ptlrpc_request           *req = NULL;
2459         struct osc_extent               *ext;
2460         struct brw_page                 **pga = NULL;
2461         struct osc_brw_async_args       *aa = NULL;
2462         struct obdo                     *oa = NULL;
2463         struct osc_async_page           *oap;
2464         struct osc_object               *obj = NULL;
2465         struct cl_req_attr              *crattr = NULL;
2466         loff_t                          starting_offset = OBD_OBJECT_EOF;
2467         loff_t                          ending_offset = 0;
2468         /* '1' for consistency with code that checks !mpflag to restore */
2469         int mpflag = 1;
2470         int                             mem_tight = 0;
2471         int                             page_count = 0;
2472         bool                            soft_sync = false;
2473         bool                            ndelay = false;
2474         int                             i;
2475         int                             grant = 0;
2476         int                             rc;
2477         __u32                           layout_version = 0;
2478         LIST_HEAD(rpc_list);
2479         struct ost_body                 *body;
2480         ENTRY;
2481         LASSERT(!list_empty(ext_list));
2482
2483         /* add pages into rpc_list to build BRW rpc */
2484         list_for_each_entry(ext, ext_list, oe_link) {
2485                 LASSERT(ext->oe_state == OES_RPC);
2486                 mem_tight |= ext->oe_memalloc;
2487                 grant += ext->oe_grants;
2488                 page_count += ext->oe_nr_pages;
2489                 layout_version = max(layout_version, ext->oe_layout_version);
2490                 if (obj == NULL)
2491                         obj = ext->oe_obj;
2492         }
2493
2494         soft_sync = osc_over_unstable_soft_limit(cli);
2495         if (mem_tight)
2496                 mpflag = memalloc_noreclaim_save();
2497
2498         OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2499         if (pga == NULL)
2500                 GOTO(out, rc = -ENOMEM);
2501
2502         OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2503         if (oa == NULL)
2504                 GOTO(out, rc = -ENOMEM);
2505
2506         i = 0;
2507         list_for_each_entry(ext, ext_list, oe_link) {
2508                 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2509                         if (mem_tight)
2510                                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2511                         if (soft_sync)
2512                                 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2513                         pga[i] = &oap->oap_brw_page;
2514                         pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2515                         i++;
2516
2517                         list_add_tail(&oap->oap_rpc_item, &rpc_list);
2518                         if (starting_offset == OBD_OBJECT_EOF ||
2519                             starting_offset > oap->oap_obj_off)
2520                                 starting_offset = oap->oap_obj_off;
2521                         else
2522                                 LASSERT(oap->oap_page_off == 0);
2523                         if (ending_offset < oap->oap_obj_off + oap->oap_count)
2524                                 ending_offset = oap->oap_obj_off +
2525                                                 oap->oap_count;
2526                         else
2527                                 LASSERT(oap->oap_page_off + oap->oap_count ==
2528                                         PAGE_SIZE);
2529                 }
2530                 if (ext->oe_ndelay)
2531                         ndelay = true;
2532         }
2533
2534         /* first page in the list */
2535         oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
2536
2537         crattr = &osc_env_info(env)->oti_req_attr;
2538         memset(crattr, 0, sizeof(*crattr));
2539         crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2540         crattr->cra_flags = ~0ULL;
2541         crattr->cra_page = oap2cl_page(oap);
2542         crattr->cra_oa = oa;
2543         cl_req_attr_set(env, osc2cl(obj), crattr);
2544
2545         if (cmd == OBD_BRW_WRITE) {
2546                 oa->o_grant_used = grant;
2547                 if (layout_version > 0) {
2548                         CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2549                                PFID(&oa->o_oi.oi_fid), layout_version);
2550
2551                         oa->o_layout_version = layout_version;
2552                         oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2553                 }
2554         }
2555
2556         sort_brw_pages(pga, page_count);
2557         rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2558         if (rc != 0) {
2559                 CERROR("prep_req failed: %d\n", rc);
2560                 GOTO(out, rc);
2561         }
2562
2563         req->rq_commit_cb = brw_commit;
2564         req->rq_interpret_reply = brw_interpret;
2565         req->rq_memalloc = mem_tight != 0;
2566         oap->oap_request = ptlrpc_request_addref(req);
2567         if (ndelay) {
2568                 req->rq_no_resend = req->rq_no_delay = 1;
2569                 /* probably set a shorter timeout value.
2570                  * to handle ETIMEDOUT in brw_interpret() correctly. */
2571                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2572         }
2573
2574         /* Need to update the timestamps after the request is built in case
2575          * we race with setattr (locally or in queue at OST).  If OST gets
2576          * later setattr before earlier BRW (as determined by the request xid),
2577          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2578          * way to do this in a single call.  bug 10150 */
2579         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2580         crattr->cra_oa = &body->oa;
2581         crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2582         cl_req_attr_set(env, osc2cl(obj), crattr);
2583         lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2584
2585         aa = ptlrpc_req_async_args(aa, req);
2586         INIT_LIST_HEAD(&aa->aa_oaps);
2587         list_splice_init(&rpc_list, &aa->aa_oaps);
2588         INIT_LIST_HEAD(&aa->aa_exts);
2589         list_splice_init(ext_list, &aa->aa_exts);
2590
2591         spin_lock(&cli->cl_loi_list_lock);
2592         starting_offset >>= PAGE_SHIFT;
2593         if (cmd == OBD_BRW_READ) {
2594                 cli->cl_r_in_flight++;
2595                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2596                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2597                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2598                                       starting_offset + 1);
2599         } else {
2600                 cli->cl_w_in_flight++;
2601                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2602                 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2603                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2604                                       starting_offset + 1);
2605         }
2606         spin_unlock(&cli->cl_loi_list_lock);
2607
2608         DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2609                   page_count, aa, cli->cl_r_in_flight,
2610                   cli->cl_w_in_flight);
2611         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2612
2613         ptlrpcd_add_req(req);
2614         rc = 0;
2615         EXIT;
2616
2617 out:
2618         if (mem_tight)
2619                 memalloc_noreclaim_restore(mpflag);
2620
2621         if (rc != 0) {
2622                 LASSERT(req == NULL);
2623
2624                 if (oa)
2625                         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2626                 if (pga) {
2627                         osc_release_bounce_pages(pga, page_count);
2628                         osc_release_ppga(pga, page_count);
2629                 }
2630                 /* this should happen rarely and is pretty bad, it makes the
2631                  * pending list not follow the dirty order */
2632                 while (!list_empty(ext_list)) {
2633                         ext = list_entry(ext_list->next, struct osc_extent,
2634                                          oe_link);
2635                         list_del_init(&ext->oe_link);
2636                         osc_extent_finish(env, ext, 0, rc);
2637                 }
2638         }
2639         RETURN(rc);
2640 }
2641
2642 /* This is to refresh our lock in face of no RPCs. */
2643 void osc_send_empty_rpc(struct osc_object *osc, pgoff_t start)
2644 {
2645         struct ptlrpc_request *req;
2646         struct obdo oa;
2647         struct brw_page bpg = { .off = start, .count = 1};
2648         struct brw_page *pga = &bpg;
2649         int rc;
2650
2651         memset(&oa, 0, sizeof(oa));
2652         oa.o_oi = osc->oo_oinfo->loi_oi;
2653         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
2654         /* For updated servers - don't do a read */
2655         oa.o_flags = OBD_FL_NORPC;
2656
2657         rc = osc_brw_prep_request(OBD_BRW_READ, osc_cli(osc), &oa, 1, &pga,
2658                                   &req, 0);
2659
2660         /* If we succeeded we ship it off, if not there's no point in doing
2661          * anything. Also no resends.
2662          * No interpret callback, no commit callback.
2663          */
2664         if (!rc) {
2665                 req->rq_no_resend = 1;
2666                 ptlrpcd_add_req(req);
2667         }
2668 }
2669
2670 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2671 {
2672         int set = 0;
2673
2674         LASSERT(lock != NULL);
2675
2676         lock_res_and_lock(lock);
2677
2678         if (lock->l_ast_data == NULL)
2679                 lock->l_ast_data = data;
2680         if (lock->l_ast_data == data)
2681                 set = 1;
2682
2683         unlock_res_and_lock(lock);
2684
2685         return set;
2686 }
2687
2688 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2689                      void *cookie, struct lustre_handle *lockh,
2690                      enum ldlm_mode mode, __u64 *flags, bool speculative,
2691                      int errcode)
2692 {
2693         bool intent = *flags & LDLM_FL_HAS_INTENT;
2694         int rc;
2695         ENTRY;
2696
2697         /* The request was created before ldlm_cli_enqueue call. */
2698         if (intent && errcode == ELDLM_LOCK_ABORTED) {
2699                 struct ldlm_reply *rep;
2700
2701                 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2702                 LASSERT(rep != NULL);
2703
2704                 rep->lock_policy_res1 =
2705                         ptlrpc_status_ntoh(rep->lock_policy_res1);
2706                 if (rep->lock_policy_res1)
2707                         errcode = rep->lock_policy_res1;
2708                 if (!speculative)
2709                         *flags |= LDLM_FL_LVB_READY;
2710         } else if (errcode == ELDLM_OK) {
2711                 *flags |= LDLM_FL_LVB_READY;
2712         }
2713
2714         /* Call the update callback. */
2715         rc = (*upcall)(cookie, lockh, errcode);
2716
2717         /* release the reference taken in ldlm_cli_enqueue() */
2718         if (errcode == ELDLM_LOCK_MATCHED)
2719                 errcode = ELDLM_OK;
2720         if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2721                 ldlm_lock_decref(lockh, mode);
2722
2723         RETURN(rc);
2724 }
2725
2726 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2727                           void *args, int rc)
2728 {
2729         struct osc_enqueue_args *aa = args;
2730         struct ldlm_lock *lock;
2731         struct lustre_handle *lockh = &aa->oa_lockh;
2732         enum ldlm_mode mode = aa->oa_mode;
2733         struct ost_lvb *lvb = aa->oa_lvb;
2734         __u32 lvb_len = sizeof(*lvb);
2735         __u64 flags = 0;
2736         struct ldlm_enqueue_info einfo = {
2737                 .ei_type = aa->oa_type,
2738                 .ei_mode = mode,
2739         };
2740
2741         ENTRY;
2742
2743         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2744          * be valid. */
2745         lock = ldlm_handle2lock(lockh);
2746         LASSERTF(lock != NULL,
2747                  "lockh %#llx, req %p, aa %p - client evicted?\n",
2748                  lockh->cookie, req, aa);
2749
2750         /* Take an additional reference so that a blocking AST that
2751          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2752          * to arrive after an upcall has been executed by
2753          * osc_enqueue_fini(). */
2754         ldlm_lock_addref(lockh, mode);
2755
2756         /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2757         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2758
2759         /* Let CP AST to grant the lock first. */
2760         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2761
2762         if (aa->oa_speculative) {
2763                 LASSERT(aa->oa_lvb == NULL);
2764                 LASSERT(aa->oa_flags == NULL);
2765                 aa->oa_flags = &flags;
2766         }
2767
2768         /* Complete obtaining the lock procedure. */
2769         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2770                                    lvb, lvb_len, lockh, rc);
2771         /* Complete osc stuff. */
2772         rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2773                               aa->oa_flags, aa->oa_speculative, rc);
2774
2775         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2776
2777         ldlm_lock_decref(lockh, mode);
2778         LDLM_LOCK_PUT(lock);
2779         RETURN(rc);
2780 }
2781
2782 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2783  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2784  * other synchronous requests, however keeping some locks and trying to obtain
2785  * others may take a considerable amount of time in a case of ost failure; and
2786  * when other sync requests do not get released lock from a client, the client
2787  * is evicted from the cluster -- such scenarious make the life difficult, so
2788  * release locks just after they are obtained. */
2789 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2790                      __u64 *flags, union ldlm_policy_data *policy,
2791                      struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2792                      void *cookie, struct ldlm_enqueue_info *einfo,
2793                      struct ptlrpc_request_set *rqset, int async,
2794                      bool speculative)
2795 {
2796         struct obd_device *obd = exp->exp_obd;
2797         struct lustre_handle lockh = { 0 };
2798         struct ptlrpc_request *req = NULL;
2799         int intent = *flags & LDLM_FL_HAS_INTENT;
2800         __u64 match_flags = *flags;
2801         enum ldlm_mode mode;
2802         int rc;
2803         ENTRY;
2804
2805         /* Filesystem lock extents are extended to page boundaries so that
2806          * dealing with the page cache is a little smoother.  */
2807         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2808         policy->l_extent.end |= ~PAGE_MASK;
2809
2810         /* Next, search for already existing extent locks that will cover us */
2811         /* If we're trying to read, we also search for an existing PW lock.  The
2812          * VFS and page cache already protect us locally, so lots of readers/
2813          * writers can share a single PW lock.
2814          *
2815          * There are problems with conversion deadlocks, so instead of
2816          * converting a read lock to a write lock, we'll just enqueue a new
2817          * one.
2818          *
2819          * At some point we should cancel the read lock instead of making them
2820          * send us a blocking callback, but there are problems with canceling
2821          * locks out from other users right now, too. */
2822         mode = einfo->ei_mode;
2823         if (einfo->ei_mode == LCK_PR)
2824                 mode |= LCK_PW;
2825         /* Normal lock requests must wait for the LVB to be ready before
2826          * matching a lock; speculative lock requests do not need to,
2827          * because they will not actually use the lock. */
2828         if (!speculative)
2829                 match_flags |= LDLM_FL_LVB_READY;
2830         if (intent != 0)
2831                 match_flags |= LDLM_FL_BLOCK_GRANTED;
2832         mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2833                                einfo->ei_type, policy, mode, &lockh);
2834         if (mode) {
2835                 struct ldlm_lock *matched;
2836
2837                 if (*flags & LDLM_FL_TEST_LOCK)
2838                         RETURN(ELDLM_OK);
2839
2840                 matched = ldlm_handle2lock(&lockh);
2841                 if (speculative) {
2842                         /* This DLM lock request is speculative, and does not
2843                          * have an associated IO request. Therefore if there
2844                          * is already a DLM lock, it wll just inform the
2845                          * caller to cancel the request for this stripe.*/
2846                         lock_res_and_lock(matched);
2847                         if (ldlm_extent_equal(&policy->l_extent,
2848                             &matched->l_policy_data.l_extent))
2849                                 rc = -EEXIST;
2850                         else
2851                                 rc = -ECANCELED;
2852                         unlock_res_and_lock(matched);
2853
2854                         ldlm_lock_decref(&lockh, mode);
2855                         LDLM_LOCK_PUT(matched);
2856                         RETURN(rc);
2857                 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2858                         *flags |= LDLM_FL_LVB_READY;
2859
2860                         /* We already have a lock, and it's referenced. */
2861                         (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2862
2863                         ldlm_lock_decref(&lockh, mode);
2864                         LDLM_LOCK_PUT(matched);
2865                         RETURN(ELDLM_OK);
2866                 } else {
2867                         ldlm_lock_decref(&lockh, mode);
2868                         LDLM_LOCK_PUT(matched);
2869                 }
2870         }
2871
2872         if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2873                 RETURN(-ENOLCK);
2874
2875         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2876         *flags &= ~LDLM_FL_BLOCK_GRANTED;
2877
2878         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2879                               sizeof(*lvb), LVB_T_OST, &lockh, async);
2880         if (async) {
2881                 if (!rc) {
2882                         struct osc_enqueue_args *aa;
2883                         aa = ptlrpc_req_async_args(aa, req);
2884                         aa->oa_exp         = exp;
2885                         aa->oa_mode        = einfo->ei_mode;
2886                         aa->oa_type        = einfo->ei_type;
2887                         lustre_handle_copy(&aa->oa_lockh, &lockh);
2888                         aa->oa_upcall      = upcall;
2889                         aa->oa_cookie      = cookie;
2890                         aa->oa_speculative = speculative;
2891                         if (!speculative) {
2892                                 aa->oa_flags  = flags;
2893                                 aa->oa_lvb    = lvb;
2894                         } else {
2895                                 /* speculative locks are essentially to enqueue
2896                                  * a DLM lock  in advance, so we don't care
2897                                  * about the result of the enqueue. */
2898                                 aa->oa_lvb    = NULL;
2899                                 aa->oa_flags  = NULL;
2900                         }
2901
2902                         req->rq_interpret_reply = osc_enqueue_interpret;
2903                         ptlrpc_set_add_req(rqset, req);
2904                 }
2905                 RETURN(rc);
2906         }
2907
2908         rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2909                               flags, speculative, rc);
2910
2911         RETURN(rc);
2912 }
2913
2914 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2915                    struct ldlm_res_id *res_id, enum ldlm_type type,
2916                    union ldlm_policy_data *policy, enum ldlm_mode mode,
2917                    __u64 *flags, struct osc_object *obj,
2918                    struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2919 {
2920         struct obd_device *obd = exp->exp_obd;
2921         __u64 lflags = *flags;
2922         enum ldlm_mode rc;
2923         ENTRY;
2924
2925         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2926                 RETURN(-EIO);
2927
2928         /* Filesystem lock extents are extended to page boundaries so that
2929          * dealing with the page cache is a little smoother */
2930         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2931         policy->l_extent.end |= ~PAGE_MASK;
2932
2933         /* Next, search for already existing extent locks that will cover us */
2934         rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
2935                                         res_id, type, policy, mode, lockh,
2936                                         match_flags);
2937         if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2938                 RETURN(rc);
2939
2940         if (obj != NULL) {
2941                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2942
2943                 LASSERT(lock != NULL);
2944                 if (osc_set_lock_data(lock, obj)) {
2945                         lock_res_and_lock(lock);
2946                         if (!ldlm_is_lvb_cached(lock)) {
2947                                 LASSERT(lock->l_ast_data == obj);
2948                                 osc_lock_lvb_update(env, obj, lock, NULL);
2949                                 ldlm_set_lvb_cached(lock);
2950                         }
2951                         unlock_res_and_lock(lock);
2952                 } else {
2953                         ldlm_lock_decref(lockh, rc);
2954                         rc = 0;
2955                 }
2956                 LDLM_LOCK_PUT(lock);
2957         }
2958         RETURN(rc);
2959 }
2960
2961 static int osc_statfs_interpret(const struct lu_env *env,
2962                                 struct ptlrpc_request *req, void *args, int rc)
2963 {
2964         struct osc_async_args *aa = args;
2965         struct obd_statfs *msfs;
2966
2967         ENTRY;
2968         if (rc == -EBADR)
2969                 /*
2970                  * The request has in fact never been sent due to issues at
2971                  * a higher level (LOV).  Exit immediately since the caller
2972                  * is aware of the problem and takes care of the clean up.
2973                  */
2974                 RETURN(rc);
2975
2976         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2977             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
2978                 GOTO(out, rc = 0);
2979
2980         if (rc != 0)
2981                 GOTO(out, rc);
2982
2983         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2984         if (msfs == NULL)
2985                 GOTO(out, rc = -EPROTO);
2986
2987         *aa->aa_oi->oi_osfs = *msfs;
2988 out:
2989         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2990
2991         RETURN(rc);
2992 }
2993
2994 static int osc_statfs_async(struct obd_export *exp,
2995                             struct obd_info *oinfo, time64_t max_age,
2996                             struct ptlrpc_request_set *rqset)
2997 {
2998         struct obd_device     *obd = class_exp2obd(exp);
2999         struct ptlrpc_request *req;
3000         struct osc_async_args *aa;
3001         int rc;
3002         ENTRY;
3003
3004         if (obd->obd_osfs_age >= max_age) {
3005                 CDEBUG(D_SUPER,
3006                        "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
3007                        obd->obd_name, &obd->obd_osfs,
3008                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
3009                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
3010                 spin_lock(&obd->obd_osfs_lock);
3011                 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
3012                 spin_unlock(&obd->obd_osfs_lock);
3013                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
3014                 if (oinfo->oi_cb_up)
3015                         oinfo->oi_cb_up(oinfo, 0);
3016
3017                 RETURN(0);
3018         }
3019
3020         /* We could possibly pass max_age in the request (as an absolute
3021          * timestamp or a "seconds.usec ago") so the target can avoid doing
3022          * extra calls into the filesystem if that isn't necessary (e.g.
3023          * during mount that would help a bit).  Having relative timestamps
3024          * is not so great if request processing is slow, while absolute
3025          * timestamps are not ideal because they need time synchronization. */
3026         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3027         if (req == NULL)
3028                 RETURN(-ENOMEM);
3029
3030         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3031         if (rc) {
3032                 ptlrpc_request_free(req);
3033                 RETURN(rc);
3034         }
3035         ptlrpc_request_set_replen(req);
3036         req->rq_request_portal = OST_CREATE_PORTAL;
3037         ptlrpc_at_set_req_timeout(req);
3038
3039         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3040                 /* procfs requests not want stat in wait for avoid deadlock */
3041                 req->rq_no_resend = 1;
3042                 req->rq_no_delay = 1;
3043         }
3044
3045         req->rq_interpret_reply = osc_statfs_interpret;
3046         aa = ptlrpc_req_async_args(aa, req);
3047         aa->aa_oi = oinfo;
3048
3049         ptlrpc_set_add_req(rqset, req);
3050         RETURN(0);
3051 }
3052
3053 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3054                       struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3055 {
3056         struct obd_device     *obd = class_exp2obd(exp);
3057         struct obd_statfs     *msfs;
3058         struct ptlrpc_request *req;
3059         struct obd_import     *imp, *imp0;
3060         int rc;
3061         ENTRY;
3062
3063         /*Since the request might also come from lprocfs, so we need
3064          *sync this with client_disconnect_export Bug15684
3065          */
3066         with_imp_locked(obd, imp0, rc)
3067                 imp = class_import_get(imp0);
3068         if (rc)
3069                 RETURN(rc);
3070
3071         /* We could possibly pass max_age in the request (as an absolute
3072          * timestamp or a "seconds.usec ago") so the target can avoid doing
3073          * extra calls into the filesystem if that isn't necessary (e.g.
3074          * during mount that would help a bit).  Having relative timestamps
3075          * is not so great if request processing is slow, while absolute
3076          * timestamps are not ideal because they need time synchronization. */
3077         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3078
3079         class_import_put(imp);
3080
3081         if (req == NULL)
3082                 RETURN(-ENOMEM);
3083
3084         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3085         if (rc) {
3086                 ptlrpc_request_free(req);
3087                 RETURN(rc);
3088         }
3089         ptlrpc_request_set_replen(req);
3090         req->rq_request_portal = OST_CREATE_PORTAL;
3091         ptlrpc_at_set_req_timeout(req);
3092
3093         if (flags & OBD_STATFS_NODELAY) {
3094                 /* procfs requests not want stat in wait for avoid deadlock */
3095                 req->rq_no_resend = 1;
3096                 req->rq_no_delay = 1;
3097         }
3098
3099         rc = ptlrpc_queue_wait(req);
3100         if (rc)
3101                 GOTO(out, rc);
3102
3103         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3104         if (msfs == NULL)
3105                 GOTO(out, rc = -EPROTO);
3106
3107         *osfs = *msfs;
3108
3109         EXIT;
3110 out:
3111         ptlrpc_req_finished(req);
3112         return rc;
3113 }
3114
3115 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3116                          void *karg, void __user *uarg)
3117 {
3118         struct obd_device *obd = exp->exp_obd;
3119         struct obd_ioctl_data *data = karg;
3120         int rc = 0;
3121
3122         ENTRY;
3123         if (!try_module_get(THIS_MODULE)) {
3124                 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3125                        module_name(THIS_MODULE));
3126                 return -EINVAL;
3127         }
3128         switch (cmd) {
3129         case OBD_IOC_CLIENT_RECOVER:
3130                 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3131                                            data->ioc_inlbuf1, 0);
3132                 if (rc > 0)
3133                         rc = 0;
3134                 break;
3135         case IOC_OSC_SET_ACTIVE:
3136                 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3137                                               data->ioc_offset);
3138                 break;
3139         default:
3140                 rc = -ENOTTY;
3141                 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3142                        obd->obd_name, cmd, current->comm, rc);
3143                 break;
3144         }
3145
3146         module_put(THIS_MODULE);
3147         return rc;
3148 }
3149
3150 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3151                        u32 keylen, void *key, u32 vallen, void *val,
3152                        struct ptlrpc_request_set *set)
3153 {
3154         struct ptlrpc_request *req;
3155         struct obd_device     *obd = exp->exp_obd;
3156         struct obd_import     *imp = class_exp2cliimp(exp);
3157         char                  *tmp;
3158         int                    rc;
3159         ENTRY;
3160
3161         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3162
3163         if (KEY_IS(KEY_CHECKSUM)) {
3164                 if (vallen != sizeof(int))
3165                         RETURN(-EINVAL);
3166                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3167                 RETURN(0);
3168         }
3169
3170         if (KEY_IS(KEY_SPTLRPC_CONF)) {
3171                 sptlrpc_conf_client_adapt(obd);
3172                 RETURN(0);
3173         }
3174
3175         if (KEY_IS(KEY_FLUSH_CTX)) {
3176                 sptlrpc_import_flush_my_ctx(imp);
3177                 RETURN(0);
3178         }
3179
3180         if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3181                 struct client_obd *cli = &obd->u.cli;
3182                 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3183                 long target = *(long *)val;
3184
3185                 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3186                 *(long *)val -= nr;
3187                 RETURN(0);
3188         }
3189
3190         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3191                 RETURN(-EINVAL);
3192
3193         /* We pass all other commands directly to OST. Since nobody calls osc
3194            methods directly and everybody is supposed to go through LOV, we
3195            assume lov checked invalid values for us.
3196            The only recognised values so far are evict_by_nid and mds_conn.
3197            Even if something bad goes through, we'd get a -EINVAL from OST
3198            anyway. */
3199
3200         req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3201                                                 &RQF_OST_SET_GRANT_INFO :
3202                                                 &RQF_OBD_SET_INFO);
3203         if (req == NULL)
3204                 RETURN(-ENOMEM);
3205
3206         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3207                              RCL_CLIENT, keylen);
3208         if (!KEY_IS(KEY_GRANT_SHRINK))
3209                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3210                                      RCL_CLIENT, vallen);
3211         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3212         if (rc) {
3213                 ptlrpc_request_free(req);
3214                 RETURN(rc);
3215         }
3216
3217         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3218         memcpy(tmp, key, keylen);
3219         tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3220                                                         &RMF_OST_BODY :
3221                                                         &RMF_SETINFO_VAL);
3222         memcpy(tmp, val, vallen);
3223
3224         if (KEY_IS(KEY_GRANT_SHRINK)) {
3225                 struct osc_grant_args *aa;
3226                 struct obdo *oa;
3227
3228                 aa = ptlrpc_req_async_args(aa, req);
3229                 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3230                 if (!oa) {
3231                         ptlrpc_req_finished(req);
3232                         RETURN(-ENOMEM);
3233                 }
3234                 *oa = ((struct ost_body *)val)->oa;
3235                 aa->aa_oa = oa;
3236                 req->rq_interpret_reply = osc_shrink_grant_interpret;
3237         }
3238
3239         ptlrpc_request_set_replen(req);
3240         if (!KEY_IS(KEY_GRANT_SHRINK)) {
3241                 LASSERT(set != NULL);
3242                 ptlrpc_set_add_req(set, req);
3243                 ptlrpc_check_set(NULL, set);
3244         } else {
3245                 ptlrpcd_add_req(req);
3246         }
3247
3248         RETURN(0);
3249 }
3250 EXPORT_SYMBOL(osc_set_info_async);
3251
3252 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3253                   struct obd_device *obd, struct obd_uuid *cluuid,
3254                   struct obd_connect_data *data, void *localdata)
3255 {
3256         struct client_obd *cli = &obd->u.cli;
3257
3258         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3259                 long lost_grant;
3260                 long grant;
3261
3262                 spin_lock(&cli->cl_loi_list_lock);
3263                 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3264                 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3265                         /* restore ocd_grant_blkbits as client page bits */
3266                         data->ocd_grant_blkbits = PAGE_SHIFT;
3267                         grant += cli->cl_dirty_grant;
3268                 } else {
3269                         grant += cli->cl_dirty_pages << PAGE_SHIFT;
3270                 }
3271                 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3272                 lost_grant = cli->cl_lost_grant;
3273                 cli->cl_lost_grant = 0;
3274                 spin_unlock(&cli->cl_loi_list_lock);
3275
3276                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3277                        " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3278                        data->ocd_version, data->ocd_grant, lost_grant);
3279         }
3280
3281         RETURN(0);
3282 }
3283 EXPORT_SYMBOL(osc_reconnect);
3284
3285 int osc_disconnect(struct obd_export *exp)
3286 {
3287         struct obd_device *obd = class_exp2obd(exp);
3288         int rc;
3289
3290         rc = client_disconnect_export(exp);
3291         /**
3292          * Initially we put del_shrink_grant before disconnect_export, but it
3293          * causes the following problem if setup (connect) and cleanup
3294          * (disconnect) are tangled together.
3295          *      connect p1                     disconnect p2
3296          *   ptlrpc_connect_import
3297          *     ...............               class_manual_cleanup
3298          *                                     osc_disconnect
3299          *                                     del_shrink_grant
3300          *   ptlrpc_connect_interrupt
3301          *     osc_init_grant
3302          *   add this client to shrink list
3303          *                                      cleanup_osc
3304          * Bang! grant shrink thread trigger the shrink. BUG18662
3305          */
3306         osc_del_grant_list(&obd->u.cli);
3307         return rc;
3308 }
3309 EXPORT_SYMBOL(osc_disconnect);
3310
3311 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3312                                  struct hlist_node *hnode, void *arg)
3313 {
3314         struct lu_env *env = arg;
3315         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3316         struct ldlm_lock *lock;
3317         struct osc_object *osc = NULL;
3318         ENTRY;
3319
3320         lock_res(res);
3321         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3322                 if (lock->l_ast_data != NULL && osc == NULL) {
3323                         osc = lock->l_ast_data;
3324                         cl_object_get(osc2cl(osc));
3325                 }
3326
3327                 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3328                  * by the 2nd round of ldlm_namespace_clean() call in
3329                  * osc_import_event(). */
3330                 ldlm_clear_cleaned(lock);
3331         }
3332         unlock_res(res);
3333
3334         if (osc != NULL) {
3335                 osc_object_invalidate(env, osc);
3336                 cl_object_put(env, osc2cl(osc));
3337         }
3338
3339         RETURN(0);
3340 }
3341 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3342
3343 static int osc_import_event(struct obd_device *obd,
3344                             struct obd_import *imp,
3345                             enum obd_import_event event)
3346 {
3347         struct client_obd *cli;
3348         int rc = 0;
3349
3350         ENTRY;
3351         LASSERT(imp->imp_obd == obd);
3352
3353         switch (event) {
3354         case IMP_EVENT_DISCON: {
3355                 cli = &obd->u.cli;
3356                 spin_lock(&cli->cl_loi_list_lock);
3357                 cli->cl_avail_grant = 0;
3358                 cli->cl_lost_grant = 0;
3359                 spin_unlock(&cli->cl_loi_list_lock);
3360                 break;
3361         }
3362         case IMP_EVENT_INACTIVE: {
3363                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3364                 break;
3365         }
3366         case IMP_EVENT_INVALIDATE: {
3367                 struct ldlm_namespace *ns = obd->obd_namespace;
3368                 struct lu_env         *env;
3369                 __u16                  refcheck;
3370
3371                 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3372
3373                 env = cl_env_get(&refcheck);
3374                 if (!IS_ERR(env)) {
3375                         osc_io_unplug(env, &obd->u.cli, NULL);
3376
3377                         cfs_hash_for_each_nolock(ns->ns_rs_hash,
3378                                                  osc_ldlm_resource_invalidate,
3379                                                  env, 0);
3380                         cl_env_put(env, &refcheck);
3381
3382                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3383                 } else
3384                         rc = PTR_ERR(env);
3385                 break;
3386         }
3387         case IMP_EVENT_ACTIVE: {
3388                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3389                 break;
3390         }
3391         case IMP_EVENT_OCD: {
3392                 struct obd_connect_data *ocd = &imp->imp_connect_data;
3393
3394                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3395                         osc_init_grant(&obd->u.cli, ocd);
3396
3397                 /* See bug 7198 */
3398                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3399                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3400
3401                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3402                 break;
3403         }
3404         case IMP_EVENT_DEACTIVATE: {
3405                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3406                 break;
3407         }
3408         case IMP_EVENT_ACTIVATE: {
3409                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3410                 break;
3411         }
3412         default:
3413                 CERROR("Unknown import event %d\n", event);
3414                 LBUG();
3415         }
3416         RETURN(rc);
3417 }
3418
3419 /**
3420  * Determine whether the lock can be canceled before replaying the lock
3421  * during recovery, see bug16774 for detailed information.
3422  *
3423  * \retval zero the lock can't be canceled
3424  * \retval other ok to cancel
3425  */
3426 static int osc_cancel_weight(struct ldlm_lock *lock)
3427 {
3428         /*
3429          * Cancel all unused and granted extent lock.
3430          */
3431         if (lock->l_resource->lr_type == LDLM_EXTENT &&
3432             ldlm_is_granted(lock) &&
3433             osc_ldlm_weigh_ast(lock) == 0)
3434                 RETURN(1);
3435
3436         RETURN(0);
3437 }
3438
3439 static int brw_queue_work(const struct lu_env *env, void *data)
3440 {
3441         struct client_obd *cli = data;
3442
3443         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3444
3445         osc_io_unplug(env, cli, NULL);
3446         RETURN(0);
3447 }
3448
3449 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3450 {
3451         struct client_obd *cli = &obd->u.cli;
3452         void *handler;
3453         int rc;
3454
3455         ENTRY;
3456
3457         rc = ptlrpcd_addref();
3458         if (rc)
3459                 RETURN(rc);
3460
3461         rc = client_obd_setup(obd, lcfg);
3462         if (rc)
3463                 GOTO(out_ptlrpcd, rc);
3464
3465
3466         handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3467         if (IS_ERR(handler))
3468                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3469         cli->cl_writeback_work = handler;
3470
3471         handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3472         if (IS_ERR(handler))
3473                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3474         cli->cl_lru_work = handler;
3475
3476         rc = osc_quota_setup(obd);
3477         if (rc)
3478                 GOTO(out_ptlrpcd_work, rc);
3479
3480         cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3481         osc_update_next_shrink(cli);
3482
3483         RETURN(rc);
3484
3485 out_ptlrpcd_work:
3486         if (cli->cl_writeback_work != NULL) {
3487                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3488                 cli->cl_writeback_work = NULL;
3489         }
3490         if (cli->cl_lru_work != NULL) {
3491                 ptlrpcd_destroy_work(cli->cl_lru_work);
3492                 cli->cl_lru_work = NULL;
3493         }
3494         client_obd_cleanup(obd);
3495 out_ptlrpcd:
3496         ptlrpcd_decref();
3497         RETURN(rc);
3498 }
3499 EXPORT_SYMBOL(osc_setup_common);
3500
3501 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3502 {
3503         struct client_obd *cli = &obd->u.cli;
3504         int                adding;
3505         int                added;
3506         int                req_count;
3507         int                rc;
3508
3509         ENTRY;
3510
3511         rc = osc_setup_common(obd, lcfg);
3512         if (rc < 0)
3513                 RETURN(rc);
3514
3515         rc = osc_tunables_init(obd);
3516         if (rc)
3517                 RETURN(rc);
3518
3519         /*
3520          * We try to control the total number of requests with a upper limit
3521          * osc_reqpool_maxreqcount. There might be some race which will cause
3522          * over-limit allocation, but it is fine.
3523          */
3524         req_count = atomic_read(&osc_pool_req_count);
3525         if (req_count < osc_reqpool_maxreqcount) {
3526                 adding = cli->cl_max_rpcs_in_flight + 2;
3527                 if (req_count + adding > osc_reqpool_maxreqcount)
3528                         adding = osc_reqpool_maxreqcount - req_count;
3529
3530                 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3531                 atomic_add(added, &osc_pool_req_count);
3532         }
3533
3534         ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3535
3536         spin_lock(&osc_shrink_lock);
3537         list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3538         spin_unlock(&osc_shrink_lock);
3539         cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3540         cli->cl_import->imp_idle_debug = D_HA;
3541
3542         RETURN(0);
3543 }
3544
3545 int osc_precleanup_common(struct obd_device *obd)
3546 {
3547         struct client_obd *cli = &obd->u.cli;
3548         ENTRY;
3549
3550         /* LU-464
3551          * for echo client, export may be on zombie list, wait for
3552          * zombie thread to cull it, because cli.cl_import will be
3553          * cleared in client_disconnect_export():
3554          *   class_export_destroy() -> obd_cleanup() ->
3555          *   echo_device_free() -> echo_client_cleanup() ->
3556          *   obd_disconnect() -> osc_disconnect() ->
3557          *   client_disconnect_export()
3558          */
3559         obd_zombie_barrier();
3560         if (cli->cl_writeback_work) {
3561                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3562                 cli->cl_writeback_work = NULL;
3563         }
3564
3565         if (cli->cl_lru_work) {
3566                 ptlrpcd_destroy_work(cli->cl_lru_work);
3567                 cli->cl_lru_work = NULL;
3568         }
3569
3570         obd_cleanup_client_import(obd);
3571         RETURN(0);
3572 }
3573 EXPORT_SYMBOL(osc_precleanup_common);
3574
3575 static int osc_precleanup(struct obd_device *obd)
3576 {
3577         ENTRY;
3578
3579         osc_precleanup_common(obd);
3580
3581         ptlrpc_lprocfs_unregister_obd(obd);
3582         RETURN(0);
3583 }
3584
3585 int osc_cleanup_common(struct obd_device *obd)
3586 {
3587         struct client_obd *cli = &obd->u.cli;
3588         int rc;
3589
3590         ENTRY;
3591
3592         spin_lock(&osc_shrink_lock);
3593         list_del(&cli->cl_shrink_list);
3594         spin_unlock(&osc_shrink_lock);
3595
3596         /* lru cleanup */
3597         if (cli->cl_cache != NULL) {
3598                 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3599                 spin_lock(&cli->cl_cache->ccc_lru_lock);
3600                 list_del_init(&cli->cl_lru_osc);
3601                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3602                 cli->cl_lru_left = NULL;
3603                 cl_cache_decref(cli->cl_cache);
3604                 cli->cl_cache = NULL;
3605         }
3606
3607         /* free memory of osc quota cache */
3608         osc_quota_cleanup(obd);
3609
3610         rc = client_obd_cleanup(obd);
3611
3612         ptlrpcd_decref();
3613         RETURN(rc);
3614 }
3615 EXPORT_SYMBOL(osc_cleanup_common);
3616
3617 static const struct obd_ops osc_obd_ops = {
3618         .o_owner                = THIS_MODULE,
3619         .o_setup                = osc_setup,
3620         .o_precleanup           = osc_precleanup,
3621         .o_cleanup              = osc_cleanup_common,
3622         .o_add_conn             = client_import_add_conn,
3623         .o_del_conn             = client_import_del_conn,
3624         .o_connect              = client_connect_import,
3625         .o_reconnect            = osc_reconnect,
3626         .o_disconnect           = osc_disconnect,
3627         .o_statfs               = osc_statfs,
3628         .o_statfs_async         = osc_statfs_async,
3629         .o_create               = osc_create,
3630         .o_destroy              = osc_destroy,
3631         .o_getattr              = osc_getattr,
3632         .o_setattr              = osc_setattr,
3633         .o_iocontrol            = osc_iocontrol,
3634         .o_set_info_async       = osc_set_info_async,
3635         .o_import_event         = osc_import_event,
3636         .o_quotactl             = osc_quotactl,
3637 };
3638
3639 LIST_HEAD(osc_shrink_list);
3640 DEFINE_SPINLOCK(osc_shrink_lock);
3641
3642 #ifdef HAVE_SHRINKER_COUNT
3643 static struct shrinker osc_cache_shrinker = {
3644         .count_objects  = osc_cache_shrink_count,
3645         .scan_objects   = osc_cache_shrink_scan,
3646         .seeks          = DEFAULT_SEEKS,
3647 };
3648 #else
3649 static int osc_cache_shrink(struct shrinker *shrinker,
3650                             struct shrink_control *sc)
3651 {
3652         (void)osc_cache_shrink_scan(shrinker, sc);
3653
3654         return osc_cache_shrink_count(shrinker, sc);
3655 }
3656
3657 static struct shrinker osc_cache_shrinker = {
3658         .shrink   = osc_cache_shrink,
3659         .seeks    = DEFAULT_SEEKS,
3660 };
3661 #endif
3662
3663 static int __init osc_init(void)
3664 {
3665         unsigned int reqpool_size;
3666         unsigned int reqsize;
3667         int rc;
3668         ENTRY;
3669
3670         /* print an address of _any_ initialized kernel symbol from this
3671          * module, to allow debugging with gdb that doesn't support data
3672          * symbols from modules.*/
3673         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3674
3675         rc = lu_kmem_init(osc_caches);
3676         if (rc)
3677                 RETURN(rc);
3678
3679         rc = class_register_type(&osc_obd_ops, NULL, true,
3680                                  LUSTRE_OSC_NAME, &osc_device_type);
3681         if (rc)
3682                 GOTO(out_kmem, rc);
3683
3684         rc = register_shrinker(&osc_cache_shrinker);
3685         if (rc)
3686                 GOTO(out_type, rc);
3687
3688         /* This is obviously too much memory, only prevent overflow here */
3689         if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3690                 GOTO(out_shrinker, rc = -EINVAL);
3691
3692         reqpool_size = osc_reqpool_mem_max << 20;
3693
3694         reqsize = 1;
3695         while (reqsize < OST_IO_MAXREQSIZE)
3696                 reqsize = reqsize << 1;
3697
3698         /*
3699          * We don't enlarge the request count in OSC pool according to
3700          * cl_max_rpcs_in_flight. The allocation from the pool will only be
3701          * tried after normal allocation failed. So a small OSC pool won't
3702          * cause much performance degression in most of cases.
3703          */
3704         osc_reqpool_maxreqcount = reqpool_size / reqsize;
3705
3706         atomic_set(&osc_pool_req_count, 0);
3707         osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3708                                           ptlrpc_add_rqs_to_pool);
3709
3710         if (osc_rq_pool == NULL)
3711                 GOTO(out_shrinker, rc = -ENOMEM);
3712
3713         rc = osc_start_grant_work();
3714         if (rc != 0)
3715                 GOTO(out_req_pool, rc);
3716
3717         RETURN(rc);
3718
3719 out_req_pool:
3720         ptlrpc_free_rq_pool(osc_rq_pool);
3721 out_shrinker:
3722         unregister_shrinker(&osc_cache_shrinker);
3723 out_type:
3724         class_unregister_type(LUSTRE_OSC_NAME);
3725 out_kmem:
3726         lu_kmem_fini(osc_caches);
3727
3728         RETURN(rc);
3729 }
3730
3731 static void __exit osc_exit(void)
3732 {
3733         osc_stop_grant_work();
3734         unregister_shrinker(&osc_cache_shrinker);
3735         class_unregister_type(LUSTRE_OSC_NAME);
3736         lu_kmem_fini(osc_caches);
3737         ptlrpc_free_rq_pool(osc_rq_pool);
3738 }
3739
3740 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3741 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3742 MODULE_VERSION(LUSTRE_VERSION_STRING);
3743 MODULE_LICENSE("GPL");
3744
3745 module_init(osc_init);
3746 module_exit(osc_exit);