Whamcloud - gitweb
LU-11282 osd-zfs: drop cache immediately 72/33072/7
authorAlex Zhuravlev <bzzz@whamcloud.com>
Fri, 24 Aug 2018 10:43:43 +0000 (14:43 +0400)
committerOleg Drokin <green@whamcloud.com>
Sat, 1 Sep 2018 03:29:41 +0000 (03:29 +0000)
if this is requested via:
  lctl set_param osd-zfs.*.readcache_max_filesize=<bytes>

dropping cache at read is almost free, but may take few
cycles at write as we have to find corresponding dbufs.

Change-Id: I107fc1bf5a8d7655da4054048ff07d3dffa9d4d8
Signed-off-by: Alex Zhuravlev <bzzz@whamcloud.com>
Reviewed-on: https://review.whamcloud.com/33072
Tested-by: Jenkins
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/osd-zfs/osd_handler.c
lustre/osd-zfs/osd_internal.h
lustre/osd-zfs/osd_io.c
lustre/osd-zfs/osd_lproc.c

index dfa285c..3a156b6 100644 (file)
@@ -1095,6 +1095,7 @@ static int osd_mount(const struct lu_env *env,
 
        o->od_xattr_in_sa = B_TRUE;
        o->od_max_blksz = osd_spa_maxblocksize(o->od_os->os_spa);
 
        o->od_xattr_in_sa = B_TRUE;
        o->od_max_blksz = osd_spa_maxblocksize(o->od_os->os_spa);
+       o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
 
        rc = __osd_obj2dnode(o->od_os, o->od_rootid, &rootdn);
        if (rc)
 
        rc = __osd_obj2dnode(o->od_os, o->od_rootid, &rootdn);
        if (rc)
index edb881c..d0656d3 100644 (file)
@@ -97,6 +97,8 @@
 
 #define OSD_GRANT_FOR_LOCAL_OIDS (2ULL << 20) /* 2MB for last_rcvd, ... */
 
 
 #define OSD_GRANT_FOR_LOCAL_OIDS (2ULL << 20) /* 2MB for last_rcvd, ... */
 
+#define OSD_MAX_CACHE_SIZE OBD_OBJECT_EOF
+
 extern struct dt_body_operations osd_body_scrub_ops;
 
 /**
 extern struct dt_body_operations osd_body_scrub_ops;
 
 /**
@@ -385,6 +387,7 @@ struct osd_device {
        struct list_head         od_index_backup_list;
        struct list_head         od_index_restore_list;
        spinlock_t               od_lock;
        struct list_head         od_index_backup_list;
        struct list_head         od_index_restore_list;
        spinlock_t               od_lock;
+       unsigned long long       od_readcache_max_filesize;
 };
 
 enum osd_destroy_type {
 };
 
 enum osd_destroy_type {
index 5715f10..bd26af6 100644 (file)
 static char *osd_0copy_tag = "zerocopy";
 
 
 static char *osd_0copy_tag = "zerocopy";
 
 
+static void dbuf_set_pending_evict(dmu_buf_t *db)
+{
+       dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
+       dbi->db_pending_evict = TRUE;
+}
+
 static void record_start_io(struct osd_device *osd, int rw, int discont_pages)
 {
        struct obd_histogram *h = osd->od_brw_stats.hist;
 static void record_start_io(struct osd_device *osd, int rw, int discont_pages)
 {
        struct obd_histogram *h = osd->od_brw_stats.hist;
@@ -333,7 +339,7 @@ static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
                             loff_t off, ssize_t len, struct niobuf_local *lnb)
 {
        struct osd_device *osd = osd_obj2dev(obj);
                             loff_t off, ssize_t len, struct niobuf_local *lnb)
 {
        struct osd_device *osd = osd_obj2dev(obj);
-       int rc, i, numbufs, npages = 0;
+       int rc, i, numbufs, npages = 0, drop_cache = 0;
        ktime_t start = ktime_get();
        dmu_buf_t **dbp;
        s64 delta_ms;
        ktime_t start = ktime_get();
        dmu_buf_t **dbp;
        s64 delta_ms;
@@ -341,6 +347,9 @@ static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
        ENTRY;
        record_start_io(osd, READ, 0);
 
        ENTRY;
        record_start_io(osd, READ, 0);
 
+       if (obj->oo_attr.la_size >= osd->od_readcache_max_filesize)
+               drop_cache = 1;
+
        /* grab buffers for read:
         * OSD API let us to grab buffers first, then initiate IO(s)
         * so that all required IOs will be done in parallel, but at the
        /* grab buffers for read:
         * OSD API let us to grab buffers first, then initiate IO(s)
         * so that all required IOs will be done in parallel, but at the
@@ -401,6 +410,9 @@ static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
                                lnb++;
                        }
 
                                lnb++;
                        }
 
+                       if (drop_cache)
+                               dbuf_set_pending_evict(dbp[i]);
+
                        /* steal dbuf so dmu_buf_rele_array() can't release
                         * it */
                        dbp[i] = NULL;
                        /* steal dbuf so dmu_buf_rele_array() can't release
                         * it */
                        dbp[i] = NULL;
@@ -776,6 +788,23 @@ out:
        return rc;
 }
 
        return rc;
 }
 
+static void osd_evict_dbufs_after_write(struct osd_object *obj,
+                                       loff_t off, ssize_t len)
+{
+       dmu_buf_t **dbp;
+       int i, rc, numbufs;
+
+       rc = -dmu_buf_hold_array_by_bonus(&obj->oo_dn->dn_bonus->db, off, len,
+                                         TRUE, osd_0copy_tag, &numbufs, &dbp);
+       if (unlikely(rc))
+               return;
+
+       for (i = 0; i < numbufs; i++)
+               dbuf_set_pending_evict(dbp[i]);
+
+       dmu_buf_rele_array(dbp, numbufs, osd_0copy_tag);
+}
+
 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
                        struct niobuf_local *lnb, int npages,
                        struct thandle *th)
 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
                        struct niobuf_local *lnb, int npages,
                        struct thandle *th)
@@ -784,7 +813,7 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
        struct osd_device  *osd = osd_obj2dev(obj);
        struct osd_thandle *oh;
        uint64_t            new_size = 0;
        struct osd_device  *osd = osd_obj2dev(obj);
        struct osd_thandle *oh;
        uint64_t            new_size = 0;
-       int                 i, rc = 0;
+       int                 i, abufsz, rc = 0, drop_cache = 0;
        unsigned long      iosize = 0;
        ENTRY;
 
        unsigned long      iosize = 0;
        ENTRY;
 
@@ -799,6 +828,11 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
                                 lnb[npages - 1].lnb_file_offset +
                                 lnb[npages - 1].lnb_len);
 
                                 lnb[npages - 1].lnb_file_offset +
                                 lnb[npages - 1].lnb_len);
 
+       if (obj->oo_attr.la_size >= osd->od_readcache_max_filesize ||
+           lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len >=
+           osd->od_readcache_max_filesize)
+               drop_cache = 1;
+
        /* LU-8791: take oo_guard to avoid the deadlock that changing block
         * size and assigning arcbuf take place at the same time.
         *
        /* LU-8791: take oo_guard to avoid the deadlock that changing block
         * size and assigning arcbuf take place at the same time.
         *
@@ -850,8 +884,9 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
                                      oh->ot_tx);
                        kunmap(lnb[i].lnb_page);
                        iosize += lnb[i].lnb_len;
                                      oh->ot_tx);
                        kunmap(lnb[i].lnb_page);
                        iosize += lnb[i].lnb_len;
+                       abufsz = lnb[i].lnb_len; /* to drop cache below */
                } else if (lnb[i].lnb_data) {
                } else if (lnb[i].lnb_data) {
-                       int j, apages, abufsz;
+                       int j, apages;
                        LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
                        /* buffer loaned for zerocopy, try to use it.
                         * notice that dmu_assign_arcbuf() is smart
                        LASSERT(((unsigned long)lnb[i].lnb_data & 1) == 0);
                        /* buffer loaned for zerocopy, try to use it.
                         * notice that dmu_assign_arcbuf() is smart
@@ -873,8 +908,20 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
                        lnb[i].lnb_data = NULL;
                        atomic_dec(&osd->od_zerocopy_loan);
                        iosize += abufsz;
                        lnb[i].lnb_data = NULL;
                        atomic_dec(&osd->od_zerocopy_loan);
                        iosize += abufsz;
+               } else {
+                       /* we don't want to deal with cache if nothing
+                        * has been send to ZFS at this step */
+                       continue;
                }
 
                }
 
+               if (!drop_cache)
+                       continue;
+
+               /* we have to mark dbufs for eviction here because
+                * dmu_assign_arcbuf() may create a new dbuf for
+                * loaned abuf */
+               osd_evict_dbufs_after_write(obj, lnb[i].lnb_file_offset,
+                                           abufsz);
        }
        up_read(&obj->oo_guard);
 
        }
        up_read(&obj->oo_guard);
 
index 0997a4b..7944f18 100644 (file)
@@ -330,6 +330,44 @@ static ssize_t zfs_osd_index_backup_seq_write(struct file *file,
 }
 LPROC_SEQ_FOPS(zfs_osd_index_backup);
 
 }
 LPROC_SEQ_FOPS(zfs_osd_index_backup);
 
+static int zfs_osd_readcache_seq_show(struct seq_file *m, void *data)
+{
+       struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
+
+       LASSERT(osd != NULL);
+       if (unlikely(osd->od_os == NULL))
+               return -EINPROGRESS;
+
+       seq_printf(m, "%llu\n", osd->od_readcache_max_filesize);
+       return 0;
+}
+
+static ssize_t
+zfs_osd_readcache_seq_write(struct file *file, const char __user *buffer,
+                           size_t count, loff_t *off)
+{
+       struct seq_file *m = file->private_data;
+       struct dt_device *dt = m->private;
+       struct osd_device *osd = osd_dt_dev(dt);
+       s64 val;
+       int rc;
+
+       LASSERT(osd != NULL);
+       if (unlikely(osd->od_os == NULL))
+               return -EINPROGRESS;
+
+       rc = lprocfs_str_with_units_to_s64(buffer, count, &val, '1');
+       if (rc)
+               return rc;
+       if (val < 0)
+               return -ERANGE;
+
+       osd->od_readcache_max_filesize = val > OSD_MAX_CACHE_SIZE ?
+                                        OSD_MAX_CACHE_SIZE : val;
+       return count;
+}
+LPROC_SEQ_FOPS(zfs_osd_readcache);
+
 LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_blksize);
 LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytestotal);
 LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytesfree);
 LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_blksize);
 LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytestotal);
 LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytesfree);
@@ -362,6 +400,8 @@ struct lprocfs_vars lprocfs_osd_obd_vars[] = {
          .fops =       &zfs_osd_force_sync_fops        },
        { .name =       "index_backup",
          .fops =       &zfs_osd_index_backup_fops      },
          .fops =       &zfs_osd_force_sync_fops        },
        { .name =       "index_backup",
          .fops =       &zfs_osd_index_backup_fops      },
+       { .name =       "readcache_max_filesize",
+         .fops =       &zfs_osd_readcache_fops },
        { 0 }
 };
 
        { 0 }
 };