Whamcloud - gitweb
LU-1305 osd: osd-zfs to use correct objset name
[fs/lustre-release.git] / lustre / osd-zfs / osd_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  */
30 /*
31  * Copyright (c) 2011, 2012 Whamcloud, Inc.
32  * Use is subject to license terms.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  *
38  * lustre/osd-zfs/osd_io.c
39  *
40  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
41  * Author: Mike Pershin <tappro@whamcloud.com>
42  */
43
44 #ifndef EXPORT_SYMTAB
45 # define EXPORT_SYMTAB
46 #endif
47 #define DEBUG_SUBSYSTEM S_OSD
48
49 #include <lustre_ver.h>
50 #include <libcfs/libcfs.h>
51 #include <lustre_fsfilt.h>
52 #include <obd_support.h>
53 #include <lustre_net.h>
54 #include <obd.h>
55 #include <obd_class.h>
56 #include <lustre_disk.h>
57 #include <lustre_fid.h>
58
59 #include "osd_internal.h"
60
61 #include <sys/dnode.h>
62 #include <sys/dbuf.h>
63 #include <sys/spa.h>
64 #include <sys/stat.h>
65 #include <sys/zap.h>
66 #include <sys/spa_impl.h>
67 #include <sys/zfs_znode.h>
68 #include <sys/dmu_tx.h>
69 #include <sys/dmu_objset.h>
70 #include <sys/dsl_prop.h>
71 #include <sys/sa_impl.h>
72 #include <sys/txg.h>
73
74 static char *osd_zerocopy_tag = "zerocopy";
75
76 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
77                         struct lu_buf *buf, loff_t *pos,
78                         struct lustre_capa *capa)
79 {
80         struct osd_object *obj  = osd_dt_obj(dt);
81         struct osd_device *osd = osd_obj2dev(obj);
82         uint64_t           old_size;
83         int                size = buf->lb_len;
84         int                rc;
85
86         LASSERT(dt_object_exists(dt));
87         LASSERT(obj->oo_db);
88
89         cfs_read_lock(&obj->oo_attr_lock);
90         old_size = obj->oo_attr.la_size;
91         cfs_read_unlock(&obj->oo_attr_lock);
92
93         if (*pos + size > old_size) {
94                 if (old_size < *pos)
95                         return 0;
96                 else
97                         size = old_size - *pos;
98         }
99
100         rc = -dmu_read(osd->od_objset.os, obj->oo_db->db_object, *pos, size,
101                         buf->lb_buf, DMU_READ_PREFETCH);
102         if (rc == 0) {
103                 rc = size;
104                 *pos += size;
105
106                 /* XXX: workaround for bug in HEAD: fsfilt_ldiskfs_read() returns
107                  * requested number of bytes, not actually read ones */
108                 if (S_ISLNK(obj->oo_dt.do_lu.lo_header->loh_attr))
109                         rc = buf->lb_len;
110         }
111         return rc;
112 }
113
114 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
115                                 const loff_t size, loff_t pos,
116                                 struct thandle *th)
117 {
118         struct osd_object  *obj  = osd_dt_obj(dt);
119         struct osd_thandle *oh;
120         uint64_t            oid;
121         ENTRY;
122
123         oh = container_of0(th, struct osd_thandle, ot_super);
124
125         /* in some cases declare can race with creation (e.g. llog)
126          * and we need to wait till object is initialized. notice
127          * LOHA_EXISTs is supposed to be the last step in the
128          * initialization */
129
130         /* declare possible size change. notice we can't check
131          * current size here as another thread can change it */
132
133         if (dt_object_exists(dt)) {
134                 LASSERT(obj->oo_db);
135                 oid = obj->oo_db->db_object;
136
137                 dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
138         } else {
139                 oid = DMU_NEW_OBJECT;
140                 dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
141         }
142
143         dmu_tx_hold_write(oh->ot_tx, oid, pos, size);
144
145         RETURN(0);
146 }
147
148 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
149                         const struct lu_buf *buf, loff_t *pos,
150                         struct thandle *th, struct lustre_capa *capa,
151                         int ignore_quota)
152 {
153         struct osd_object  *obj  = osd_dt_obj(dt);
154         struct osd_device  *osd = osd_obj2dev(obj);
155         udmu_objset_t      *uos = &osd->od_objset;
156         struct osd_thandle *oh;
157         uint64_t            offset = *pos;
158         int                 rc;
159         ENTRY;
160
161         LASSERT(dt_object_exists(dt));
162         LASSERT(obj->oo_db);
163
164         LASSERT(th != NULL);
165         oh = container_of0(th, struct osd_thandle, ot_super);
166
167         dmu_write(osd->od_objset.os, obj->oo_db->db_object, offset,
168                 (uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
169         cfs_write_lock(&obj->oo_attr_lock);
170         if (obj->oo_attr.la_size < offset + buf->lb_len) {
171                 obj->oo_attr.la_size = offset + buf->lb_len;
172                 cfs_write_unlock(&obj->oo_attr_lock);
173                 /* osd_object_sa_update() will be copying directly from oo_attr
174                  * into dbuf.  any update within a single txg will copy the
175                  * most actual */
176                 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
177                                         &obj->oo_attr.la_size, 8, oh);
178                 if (unlikely(rc))
179                         GOTO(out, rc);
180         } else {
181                 cfs_write_unlock(&obj->oo_attr_lock);
182         }
183
184         *pos += buf->lb_len;
185         rc = buf->lb_len;
186
187 out:
188         RETURN(rc);
189 }
190
191 /*
192  * XXX: for the moment I don't want to use lnb_flags for osd-internal
193  *      purposes as it's not very well defined ...
194  *      instead I use the lowest bit of the address so that:
195  *        arc buffer:  .lnb_obj = abuf          (arc we loan for write)
196  *        dbuf buffer: .lnb_obj = dbuf | 1      (dbuf we get for read)
197  *        copy buffer: .lnb_page->mapping = obj (page we allocate for write)
198  *
199  *      bzzz, to blame
200  */
201 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
202                         struct niobuf_local *lnb, int npages)
203 {
204         struct osd_object *obj  = osd_dt_obj(dt);
205         struct osd_device *osd = osd_obj2dev(obj);
206         unsigned long      ptr;
207         int                i;
208
209         LASSERT(dt_object_exists(dt));
210         LASSERT(obj->oo_db);
211
212         for (i = 0; i < npages; i++) {
213                 if (lnb[i].page == NULL)
214                         continue;
215                 if (lnb[i].page->mapping == (void *)obj) {
216                         /* this is anonymous page allocated for copy-write */
217                         lnb[i].page->mapping = NULL;
218                         __free_page(lnb[i].page);
219                         cfs_atomic_dec(&osd->od_zerocopy_alloc);
220                 } else {
221                         /* see comment in osd_bufs_get_read() */
222                         ptr = (unsigned long)lnb[i].dentry;
223                         if (ptr & 1UL) {
224                                 ptr &= ~1UL;
225                                 dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
226                                 cfs_atomic_dec(&osd->od_zerocopy_pin);
227                         } else if (lnb[i].dentry != NULL) {
228                                 dmu_return_arcbuf((void *)lnb[i].dentry);
229                                 cfs_atomic_dec(&osd->od_zerocopy_loan);
230                         }
231                 }
232                 lnb[i].page = NULL;
233                 lnb[i].dentry = NULL;
234         }
235
236         return 0;
237 }
238
239 static struct page *kmem_to_page(void *addr)
240 {
241         struct page *page;
242
243         if (kmem_virt(addr))
244                 page = vmalloc_to_page(addr);
245         else
246                 page = virt_to_page(addr);
247
248         return page;
249 }
250
251 static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
252                                 loff_t off, ssize_t len, struct niobuf_local *lnb)
253 {
254         struct osd_device *osd = osd_obj2dev(obj);
255         dmu_buf_t        **dbp;
256         int                rc, i, numbufs, npages = 0;
257         ENTRY;
258
259         /* grab buffers for read:
260          * OSD API let us to grab buffers first, then initiate IO(s)
261          * so that all required IOs will be done in parallel, but at the
262          * moment DMU doesn't provide us with a method to grab buffers.
263          * If we discover this is a vital for good performance we
264          * can get own replacement for dmu_buf_hold_array_by_bonus().
265          */
266         while (len > 0) {
267                 rc = -dmu_buf_hold_array_by_bonus(obj->oo_db, off, len, TRUE,
268                                                 osd_zerocopy_tag, &numbufs,
269                                                 &dbp);
270                 LASSERT(rc == 0);
271
272                 for (i = 0; i < numbufs; i++) {
273                         int bufoff, tocpy, thispage;
274                         void *dbf = dbp[i];
275
276                         LASSERT(len > 0);
277
278                         cfs_atomic_inc(&osd->od_zerocopy_pin);
279
280                         bufoff = off - dbp[i]->db_offset;
281                         tocpy = min_t(int, dbp[i]->db_size - bufoff, len);
282
283                         /* kind of trick to differentiate dbuf vs. arcbuf */
284                         LASSERT(((unsigned long)dbp[i] & 1) == 0);
285                         dbf = (void *) ((unsigned long)dbp[i] | 1);
286
287                         while (tocpy > 0) {
288                                 thispage = CFS_PAGE_SIZE;
289                                 thispage -= bufoff & (CFS_PAGE_SIZE - 1);
290                                 thispage = min(tocpy, thispage);
291
292                                 lnb->rc = 0;
293                                 lnb->lnb_file_offset = off;
294                                 lnb->offset = bufoff & ~CFS_PAGE_MASK;
295                                 lnb->len = thispage;
296                                 lnb->page = kmem_to_page(dbp[i]->db_data +
297                                                                 bufoff);
298                                 /* mark just a single slot: we need this
299                                  * reference to dbuf to be release once */
300                                 lnb->dentry = dbf;
301                                 dbf = NULL;
302
303                                 tocpy -= thispage;
304                                 len -= thispage;
305                                 bufoff += thispage;
306                                 off += thispage;
307
308                                 npages++;
309                                 lnb++;
310                         }
311
312                         /* steal dbuf so dmu_buf_rele_array() cant release it */
313                         dbp[i] = NULL;
314                 }
315
316                 dmu_buf_rele_array(dbp, numbufs, osd_zerocopy_tag);
317         }
318
319         RETURN(npages);
320 }
321
322 static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
323                                 loff_t off, ssize_t len, struct niobuf_local *lnb)
324 {
325         struct osd_device *osd = osd_obj2dev(obj);
326         int                plen, off_in_block, sz_in_block;
327         int                i = 0, npages = 0;
328         arc_buf_t         *abuf;
329         uint32_t           bs;
330         uint64_t           dummy;
331         ENTRY;
332
333         dmu_object_size_from_db(obj->oo_db, &bs, &dummy);
334
335         /*
336          * currently only full blocks are subject to zerocopy approach:
337          * so that we're sure nobody is trying to update the same block
338          */
339         while (len > 0) {
340                 LASSERT(npages < PTLRPC_MAX_BRW_PAGES);
341
342                 off_in_block = off & (bs - 1);
343                 sz_in_block = min_t(int, bs - off_in_block, len);
344
345                 if (sz_in_block == bs) {
346                         /* full block, try to use zerocopy */
347
348                         abuf = dmu_request_arcbuf(obj->oo_db, bs);
349                         if (unlikely(abuf == NULL))
350                                 GOTO(out_err, -ENOMEM);
351
352                         cfs_atomic_inc(&osd->od_zerocopy_loan);
353
354                         /* go over pages arcbuf contains, put them as
355                          * local niobufs for ptlrpc's bulks */
356                         while (sz_in_block > 0) {
357                                 plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
358
359                                 lnb[i].lnb_file_offset = off;
360                                 lnb[i].offset = 0;
361                                 lnb[i].len = plen;
362                                 lnb[i].rc = 0;
363                                 if (sz_in_block == bs)
364                                         lnb[i].dentry = (void *)abuf;
365                                 else
366                                         lnb[i].dentry = NULL;
367
368                                 /* this one is not supposed to fail */
369                                 lnb[i].page = kmem_to_page(abuf->b_data +
370                                                         off_in_block);
371                                 LASSERT(lnb[i].page);
372
373                                 lprocfs_counter_add(osd->od_stats,
374                                                 LPROC_OSD_ZEROCOPY_IO, 1);
375
376                                 sz_in_block -= plen;
377                                 len -= plen;
378                                 off += plen;
379                                 off_in_block += plen;
380                                 i++;
381                                 npages++;
382                         }
383                 } else {
384                         if (off_in_block == 0 && len < bs &&
385                                         off + len >= obj->oo_attr.la_size)
386                                 lprocfs_counter_add(osd->od_stats,
387                                                 LPROC_OSD_TAIL_IO, 1);
388
389                         /* can't use zerocopy, allocate temp. buffers */
390                         while (sz_in_block > 0) {
391                                 plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
392
393                                 lnb[i].lnb_file_offset = off;
394                                 lnb[i].offset = 0;
395                                 lnb[i].len = plen;
396                                 lnb[i].rc = 0;
397                                 lnb[i].dentry = NULL;
398
399                                 lnb[i].page = alloc_page(OSD_GFP_IO);
400                                 if (unlikely(lnb[i].page == NULL))
401                                         GOTO(out_err, -ENOMEM);
402
403                                 LASSERT(lnb[i].page->mapping == NULL);
404                                 lnb[i].page->mapping = (void *)obj;
405
406                                 cfs_atomic_inc(&osd->od_zerocopy_alloc);
407                                 lprocfs_counter_add(osd->od_stats,
408                                                 LPROC_OSD_COPY_IO, 1);
409
410                                 sz_in_block -= plen;
411                                 len -= plen;
412                                 off += plen;
413                                 i++;
414                                 npages++;
415                         }
416                 }
417         }
418
419         RETURN(npages);
420
421 out_err:
422         osd_bufs_put(env, &obj->oo_dt, lnb, npages);
423         RETURN(-ENOMEM);
424 }
425
426 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
427                         loff_t offset, ssize_t len, struct niobuf_local *lnb,
428                         int rw, struct lustre_capa *capa)
429 {
430         struct osd_object *obj  = osd_dt_obj(dt);
431         int                rc;
432
433         LASSERT(dt_object_exists(dt));
434         LASSERT(obj->oo_db);
435
436         if (rw == 0)
437                 rc = osd_bufs_get_read(env, obj, offset, len, lnb);
438         else
439                 rc = osd_bufs_get_write(env, obj, offset, len, lnb);
440
441         return rc;
442 }
443
444 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
445                         struct niobuf_local *lnb, int npages)
446 {
447         struct osd_object *obj = osd_dt_obj(dt);
448
449         LASSERT(dt_object_exists(dt));
450         LASSERT(obj->oo_db);
451
452         return 0;
453 }
454
455 static int osd_declare_write_commit(const struct lu_env *env,
456                                 struct dt_object *dt,
457                                 struct niobuf_local *lnb, int npages,
458                                 struct thandle *th)
459 {
460         struct osd_object  *obj = osd_dt_obj(dt);
461         struct osd_thandle *oh;
462         uint64_t            offset = 0;
463         uint32_t            size = 0;
464         int                 i;
465         ENTRY;
466
467         LASSERT(dt_object_exists(dt));
468         LASSERT(obj->oo_db);
469
470         LASSERT(lnb);
471         LASSERT(npages > 0);
472
473         oh = container_of0(th, struct osd_thandle, ot_super);
474
475         for (i = 0; i < npages; i++) {
476                 if (lnb[i].rc)
477                         /* ENOSPC, network RPC error, etc.
478                          * We don't want to book space for pages which will be
479                          * skipped in osd_write_commit(). Hence we skip pages
480                          * with lnb_rc != 0 here too */
481                         continue;
482                 if (size == 0) {
483                         /* first valid lnb */
484                         offset = lnb[i].lnb_file_offset;
485                         size = lnb[i].len;
486                         continue;
487                 }
488                 if (offset + size == lnb[i].lnb_file_offset) {
489                         /* this lnb is contiguous to the previous one */
490                         size += lnb[i].len;
491                         continue;
492                 }
493
494                 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object, offset,size);
495
496                 offset = lnb->lnb_file_offset;
497                 size = lnb->len;
498         }
499
500         if (size)
501                 dmu_tx_hold_write(oh->ot_tx, obj->oo_db->db_object, offset,size);
502
503         dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
504
505         oh->ot_write_commit = 1; /* used in osd_trans_start() for fail_loc */
506
507         RETURN(0);
508 }
509
510 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
511                         struct niobuf_local *lnb, int npages,
512                         struct thandle *th)
513 {
514         struct osd_object  *obj  = osd_dt_obj(dt);
515         struct osd_device  *osd = osd_obj2dev(obj);
516         udmu_objset_t      *uos = &osd->od_objset;
517         struct osd_thandle *oh;
518         uint64_t            new_size = 0;
519         int                 i, rc = 0;
520         ENTRY;
521
522         LASSERT(dt_object_exists(dt));
523         LASSERT(obj->oo_db);
524
525         LASSERT(th != NULL);
526         oh = container_of0(th, struct osd_thandle, ot_super);
527
528         for (i = 0; i < npages; i++) {
529                 CDEBUG(D_INODE, "write %u bytes at %u\n",
530                         (unsigned) lnb[i].len,
531                         (unsigned) lnb[i].lnb_file_offset);
532
533                 if (lnb[i].rc) {
534                         /* ENOSPC, network RPC error, etc.
535                          * Unlike ldiskfs, zfs allocates new blocks on rewrite,
536                          * so we skip this page if lnb_rc is set to -ENOSPC */
537                         CDEBUG(D_INODE, "obj "DFID": skipping lnb[%u]: rc=%d\n",
538                                 PFID(lu_object_fid(&dt->do_lu)), i,
539                                 lnb[i].rc);
540                         continue;
541                 }
542
543                 if (lnb[i].page->mapping == (void *)obj) {
544                         dmu_write(osd->od_objset.os, obj->oo_db->db_object,
545                                 lnb[i].lnb_file_offset, lnb[i].len,
546                                 kmap(lnb[i].page), oh->ot_tx);
547                         kunmap(lnb[i].page);
548                 } else if (lnb[i].dentry) {
549                         LASSERT(((unsigned long)lnb[i].dentry & 1) == 0);
550                         /* buffer loaned for zerocopy, try to use it.
551                          * notice that dmu_assign_arcbuf() is smart
552                          * enough to recognize changed blocksize
553                          * in this case it fallbacks to dmu_write() */
554                         dmu_assign_arcbuf(obj->oo_db, lnb[i].lnb_file_offset,
555                                         (void *)lnb[i].dentry, oh->ot_tx);
556                         /* drop the reference, otherwise osd_put_bufs()
557                          * will be releasing it - bad! */
558                         lnb[i].dentry = NULL;
559                         cfs_atomic_dec(&osd->od_zerocopy_loan);
560                 }
561
562                 if (new_size < lnb[i].lnb_file_offset + lnb[i].len)
563                         new_size = lnb[i].lnb_file_offset + lnb[i].len;
564         }
565
566         if (unlikely(new_size == 0)) {
567                 /* no pages to write, no transno is needed */
568                 th->th_local = 1;
569                 /* it is important to return 0 even when all lnb_rc == -ENOSPC
570                  * since ofd_commitrw_write() retries several times on ENOSPC */
571                 RETURN(0);
572         }
573
574         cfs_write_lock(&obj->oo_attr_lock);
575         if (obj->oo_attr.la_size < new_size) {
576                 obj->oo_attr.la_size = new_size;
577                 cfs_write_unlock(&obj->oo_attr_lock);
578                 /* osd_object_sa_update() will be copying directly from
579                  * oo_attr into dbuf. any update within a single txg will copy
580                  * the most actual */
581                 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
582                                         &obj->oo_attr.la_size, 8, oh);
583         } else {
584                 cfs_write_unlock(&obj->oo_attr_lock);
585         }
586
587         RETURN(rc);
588 }
589
590 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
591                         struct niobuf_local *lnb, int npages)
592 {
593         struct osd_object *obj  = osd_dt_obj(dt);
594         struct lu_buf      buf;
595         loff_t             offset;
596         int                i;
597
598         LASSERT(dt_object_exists(dt));
599         LASSERT(obj->oo_db);
600
601         for (i = 0; i < npages; i++) {
602                 buf.lb_buf = kmap(lnb[i].page);
603                 buf.lb_len = lnb[i].len;
604                 offset = lnb[i].lnb_file_offset;
605
606                 CDEBUG(D_OTHER, "read %u bytes at %u\n",
607                         (unsigned) lnb[i].len,
608                         (unsigned) lnb[i].lnb_file_offset);
609                 lnb[i].rc = osd_read(env, dt, &buf, &offset, NULL);
610                 kunmap(lnb[i].page);
611
612                 if (lnb[i].rc < buf.lb_len) {
613                         /* all subsequent rc should be 0 */
614                         while (++i < npages)
615                                 lnb[i].rc = 0;
616                         break;
617                 }
618         }
619
620         return 0;
621 }
622
623 /*
624  * Punch/truncate an object
625  *
626  *      IN:     db  - dmu_buf of the object to free data in.
627  *              off - start of section to free.
628  *              len - length of section to free (DMU_OBJECT_END => to EOF).
629  *
630  *      RETURN: 0 if success
631  *              error code if failure
632  *
633  * The transaction passed to this routine must have
634  * dmu_tx_hold_sa() and if off < size, dmu_tx_hold_free()
635  * called and then assigned to a transaction group.
636  */
637 static int __osd_object_punch(objset_t *os, dmu_buf_t *db, dmu_tx_t *tx,
638                                 uint64_t size, uint64_t off, uint64_t len)
639 {
640         int rc = 0;
641
642         /* Assert that the transaction has been assigned to a
643            transaction group. */
644         LASSERT(tx->tx_txg != 0);
645         /*
646          * Nothing to do if file already at desired length.
647          */
648         if (len == DMU_OBJECT_END && size == off)
649                 return 0;
650
651         if (off < size)
652                 rc = -dmu_free_range(os, db->db_object, off, len, tx);
653
654         return rc;
655 }
656
657 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
658                         __u64 start, __u64 end, struct thandle *th,
659                         struct lustre_capa *capa)
660 {
661         struct osd_object  *obj = osd_dt_obj(dt);
662         struct osd_device  *osd = osd_obj2dev(obj);
663         udmu_objset_t      *uos = &osd->od_objset;
664         struct osd_thandle *oh;
665         __u64               len;
666         int                 rc = 0;
667         ENTRY;
668
669         LASSERT(dt_object_exists(dt));
670         LASSERT(osd_invariant(obj));
671
672         LASSERT(th != NULL);
673         oh = container_of0(th, struct osd_thandle, ot_super);
674
675         cfs_write_lock(&obj->oo_attr_lock);
676         /* truncate */
677         if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
678                 len = DMU_OBJECT_END;
679         else
680                 len = end - start;
681         cfs_write_unlock(&obj->oo_attr_lock);
682
683         rc = __osd_object_punch(osd->od_objset.os, obj->oo_db, oh->ot_tx,
684                                 obj->oo_attr.la_size, start, len);
685         /* set new size */
686         if (len == DMU_OBJECT_END) {
687                 cfs_write_lock(&obj->oo_attr_lock);
688                 obj->oo_attr.la_size = start;
689                 cfs_write_unlock(&obj->oo_attr_lock);
690                 rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
691                                         &obj->oo_attr.la_size, 8, oh);
692         }
693         RETURN(rc);
694 }
695
696 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
697                         __u64 start, __u64 end, struct thandle *handle)
698 {
699         struct osd_object  *obj = osd_dt_obj(dt);
700         struct osd_thandle *oh;
701         __u64               len;
702         ENTRY;
703
704         oh = container_of0(handle, struct osd_thandle, ot_super);
705
706         cfs_read_lock(&obj->oo_attr_lock);
707         if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
708                 len = DMU_OBJECT_END;
709         else
710                 len = end - start;
711
712         /* declare we'll free some blocks ... */
713         if (start < obj->oo_attr.la_size) {
714                 cfs_read_unlock(&obj->oo_attr_lock);
715                 dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, start, len);
716         } else {
717                 cfs_read_unlock(&obj->oo_attr_lock);
718         }
719
720         /* ... and we'll modify size attribute */
721         dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
722
723         RETURN(0);
724 }
725
726
727 struct dt_body_operations osd_body_ops = {
728         .dbo_read                       = osd_read,
729         .dbo_declare_write              = osd_declare_write,
730         .dbo_write                      = osd_write,
731         .dbo_bufs_get                   = osd_bufs_get,
732         .dbo_bufs_put                   = osd_bufs_put,
733         .dbo_write_prep                 = osd_write_prep,
734         .dbo_declare_write_commit       = osd_declare_write_commit,
735         .dbo_write_commit               = osd_write_commit,
736         .dbo_read_prep                  = osd_read_prep,
737         .do_declare_punch               = osd_declare_punch,
738         .do_punch                       = osd_punch,
739 };
740