Whamcloud - gitweb
LU-14627 lnet: Ensure ref taken when queueing for discovery 18/43418/9 master
authorChris Horn <chris.horn@hpe.com>
Thu, 22 Apr 2021 19:51:44 +0000 (14:51 -0500)
committerOleg Drokin <green@whamcloud.com>
Mon, 14 Jun 2021 16:44:28 +0000 (16:44 +0000)
Call lnet_peer_queue_for_discovery() in
lnet_discovery_event_handler() to ensure that we take a ref on
the peer when forcing it onto the discovery queue. This also ensures
that the peer state has LNET_PEER_DISCOVERING.

Add a test to sanity-lnet.sh that can trigger the refcount loss bug
in discovery.

HPE-bug-id: LUS-7651
Test-Parameters: trivial testlist=sanity-lnet
Signed-off-by: Chris Horn <chris.horn@hpe.com>
Change-Id: Ie2908668c4ffde0f993b5b7ea9aa58acd1d6fa9c
Reviewed-on: https://review.whamcloud.com/43418
Reviewed-by: Serguei Smirnov <ssmirnov@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Alexander Boyko <alexander.boyko@hpe.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Stephane Thiell <sthiell@stanford.edu>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
191 files changed:
LUSTRE-VERSION-GEN
config/lustre-build-ldiskfs.m4
config/lustre-build-zfs.m4
ldiskfs/Makefile.in
ldiskfs/kernel_patches/patches/linux-5.8/ext4-corrupted-inode-block-bitmaps-handling-patches.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-data-in-dirent.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-export-mb-stream-allocator-variables.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-kill-dx-root.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-max-dir-size.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-mballoc-extra-checks.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-mballoc-pa-free-mismatch.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-misc.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-no-max-dir-size-limit-for-iam-objects.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-pdirop.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-prealloc.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/linux-5.8/ext4-simple-blockalloc.patch [new file with mode: 0644]
ldiskfs/kernel_patches/series/ldiskfs-5.8.0-ml.series [new file with mode: 0644]
libcfs/autoconf/lustre-libcfs.m4
libcfs/include/libcfs/linux/linux-net.h
libcfs/libcfs/libcfs_string.c
libcfs/libcfs/linux-crypto-adler.c
lnet/include/lnet/lib-types.h
lnet/include/uapi/linux/lnet/lnet-dlc.h
lnet/klnds/gnilnd/gnilnd.c
lnet/klnds/gnilnd/gnilnd_proc.c
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd_cb.c
lnet/klnds/socklnd/socklnd_proto.c
lnet/lnet/acceptor.c
lnet/lnet/api-ni.c
lnet/lnet/config.c
lnet/lnet/lib-socket.c
lnet/lnet/peer.c
lnet/utils/lnetconfig/liblnetconfig.c
lustre-iokit/stats-collect/iokit-gather-stats
lustre/ChangeLog
lustre/autoconf/lustre-core.m4
lustre/doc/lfs-find.1
lustre/include/cl_object.h
lustre/include/llog_swab.h
lustre/include/lprocfs_status.h
lustre/include/lu_object.h
lustre/include/lustre/lustreapi.h
lustre/include/lustre_compat.h
lustre/include/lustre_disk.h
lustre/include/lustre_fid.h
lustre/include/lustre_linkea.h
lustre/include/lustre_lmv.h
lustre/include/lustre_net.h
lustre/include/lustre_osc.h
lustre/include/lustre_req_layout.h
lustre/include/lustre_swab.h
lustre/include/md_object.h
lustre/include/obd.h
lustre/include/obd_class.h
lustre/include/obd_support.h
lustre/include/uapi/linux/lustre/lustre_idl.h
lustre/include/uapi/linux/lustre/lustre_user.h
lustre/kernel_patches/targets/3.10-rhel7.9.target.in
lustre/kernel_patches/targets/4.12-sles12sp5.target.in
lustre/kernel_patches/targets/4.12-sles15sp2.target.in
lustre/kernel_patches/targets/4.18-rhel8.4.target.in [new file with mode: 0644]
lustre/kernel_patches/which_patch
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lockd.c
lustre/lfsck/lfsck_namespace.c
lustre/llite/dcache.c
lustre/llite/dir.c
lustre/llite/file.c
lustre/llite/llite_internal.h
lustre/llite/llite_lib.c
lustre/llite/llite_mmap.c
lustre/llite/llite_nfs.c
lustre/llite/lproc_llite.c
lustre/llite/namei.c
lustre/llite/rw26.c
lustre/llite/statahead.c
lustre/llite/vvp_io.c
lustre/llite/vvp_object.c
lustre/lmv/lmv_intent.c
lustre/lmv/lmv_obd.c
lustre/lod/lod_lov.c
lustre/lod/lod_object.c
lustre/lod/lod_pool.c
lustre/lod/lod_qos.c
lustre/lov/lov_io.c
lustre/lov/lov_obd.c
lustre/lov/lov_pool.c
lustre/mdc/mdc_acl.c
lustre/mdc/mdc_changelog.c
lustre/mdc/mdc_dev.c
lustre/mdc/mdc_internal.h
lustre/mdc/mdc_lib.c
lustre/mdc/mdc_locks.c
lustre/mdc/mdc_reint.c
lustre/mdc/mdc_request.c
lustre/mdd/mdd_device.c
lustre/mdd/mdd_dir.c
lustre/mdd/mdd_internal.h
lustre/mdd/mdd_object.c
lustre/mdt/mdt_coordinator.c
lustre/mdt/mdt_handler.c
lustre/mdt/mdt_lib.c
lustre/mdt/mdt_open.c
lustre/mdt/mdt_reint.c
lustre/mgc/mgc_internal.h
lustre/mgc/mgc_request.c
lustre/mgs/mgs_barrier.c
lustre/mgs/mgs_handler.c
lustre/mgs/mgs_internal.h
lustre/mgs/mgs_llog.c
lustre/mgs/mgs_nids.c
lustre/obdclass/cl_page.c
lustre/obdclass/jobid.c
lustre/obdclass/linkea.c
lustre/obdclass/llog.c
lustre/obdclass/llog_osd.c
lustre/obdclass/lprocfs_status.c
lustre/obdclass/lu_tgt_pool.c
lustre/obdclass/obd_mount.c
lustre/obdecho/echo_client.c
lustre/ofd/ofd_obd.c
lustre/osc/osc_cache.c
lustre/osc/osc_internal.h
lustre/osc/osc_io.c
lustre/osc/osc_lock.c
lustre/osc/osc_object.c
lustre/osc/osc_request.c
lustre/osd-ldiskfs/osd_io.c
lustre/osp/osp_internal.h
lustre/osp/osp_md_object.c
lustre/osp/osp_object.c
lustre/osp/osp_sync.c
lustre/ptlrpc/client.c
lustre/ptlrpc/gss/gss_cli_upcall.c
lustre/ptlrpc/gss/sec_gss.c
lustre/ptlrpc/import.c
lustre/ptlrpc/layout.c
lustre/ptlrpc/lproc_ptlrpc.c
lustre/ptlrpc/niobuf.c
lustre/ptlrpc/nodemap_lproc.c
lustre/ptlrpc/nodemap_storage.c
lustre/ptlrpc/pack_generic.c
lustre/ptlrpc/ptlrpc_internal.h
lustre/ptlrpc/sec.c
lustre/ptlrpc/sec_plain.c
lustre/ptlrpc/service.c
lustre/ptlrpc/wiretest.c
lustre/quota/qmt_handler.c
lustre/quota/qmt_pool.c
lustre/quota/qsd_lock.c
lustre/quota/qsd_request.c
lustre/scripts/Makefile.am
lustre/scripts/lustre_rmmod
lustre/scripts/statechange-lustre.sh
lustre/scripts/vdev_attach-lustre.sh [new symlink]
lustre/scripts/vdev_clear-lustre.sh [new symlink]
lustre/scripts/vdev_remove-lustre.sh [new symlink]
lustre/target/out_handler.c
lustre/target/tgt_handler.c
lustre/tests/ha.sh
lustre/tests/llapi_layout_test.c
lustre/tests/ost-pools.sh
lustre/tests/racer.sh
lustre/tests/racer/file_create.sh
lustre/tests/racer/racer.sh
lustre/tests/recovery-small.sh
lustre/tests/replay-single.sh
lustre/tests/sanity-flr.sh
lustre/tests/sanity-hsm.sh
lustre/tests/sanity-lfsck.sh
lustre/tests/sanity-lnet.sh
lustre/tests/sanity-pcc.sh
lustre/tests/sanity-quota.sh
lustre/tests/sanity-scrub.sh
lustre/tests/sanity-sec.sh
lustre/tests/sanity-selinux.sh
lustre/tests/sanity.sh
lustre/tests/sanityn.sh
lustre/tests/setup-nfs.sh
lustre/tests/test-framework.sh
lustre/utils/Makefile.am
lustre/utils/lfs.c
lustre/utils/liblustreapi.c
lustre/utils/liblustreapi_hsm.c
lustre/utils/liblustreapi_layout.c
lustre/utils/lustreapi_internal.h
lustre/utils/wirecheck.c
lustre/utils/wiretest.c

index db06e1c..2cd438e 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-DEFAULT_VERSION=2.14.51
+DEFAULT_VERSION=2.14.52
 LVF=LUSTRE-VERSION-FILE
 
 LF='
index 67e3b41..5695078 100644 (file)
@@ -103,9 +103,13 @@ AS_IF([test -z "$LDISKFS_SERIES"],
        AS_VERSION_COMPARE([$LINUXRELEASE],[5.4.0],[],
        [LDISKFS_SERIES="5.4.0-ml.series"],[
        AS_VERSION_COMPARE([$LINUXRELEASE],[5.4.21],
-               [LDISKFS_SERIES="5.4.0-ml.series"],  # lt
-               [LDISKFS_SERIES="5.4.21-ml.series"], # eq
-               [LDISKFS_SERIES="5.4.21-ml.series"]  # gt
+         [LDISKFS_SERIES="5.4.0-ml.series"],  # lt
+         [LDISKFS_SERIES="5.4.21-ml.series"], # eq
+         [AS_VERSION_COMPARE([$LINUXRELEASE],[5.8.0],
+           [LDISKFS_SERIES="5.4.21-ml.series"], # lt
+           [LDISKFS_SERIES="5.8.0-ml.series"],  # eq
+           [LDISKFS_SERIES="5.8.0-ml.series"],  # gt
+         )]
                )])
        ],
 [])
index acacf34..5a83455 100644 (file)
@@ -343,7 +343,7 @@ AC_DEFUN([LB_ZFS_USER], [
 
        ZFS_LIBZFS_INCLUDE=${zfsinc}
        ZFS_LIBZFS_LDFLAGS=${zfslib}
-       ZFS_LIBZFS_LIBS="-lzfs -lnvpair"
+       ZFS_LIBZFS_LIBS="-lzfs -lnvpair -lzpool"
        AC_SUBST(ZFS_LIBZFS_INCLUDE)
        AC_SUBST(ZFS_LIBZFS_LDFLAGS)
        AC_SUBST(ZFS_LIBZFS_LIBS)
index a061c5a..a76432b 100644 (file)
@@ -10,7 +10,7 @@ linux_headers := $(wildcard @LINUX@/include/linux/ext4*.h)
 linux_new_headers := htree_lock.h
 trace_headers := $(wildcard @LINUX@/include/trace/events/ext4*.h)
 
-backfs_sources := $(filter-out %.mod.c,$(wildcard @EXT4_SRC_DIR@/*.c))
+backfs_sources := $(filter-out %.mod.c %/inode-test.c,$(wildcard @EXT4_SRC_DIR@/*.c))
 
 new_sources := mmp.c htree_lock.c
 new_headers :=
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-corrupted-inode-block-bitmaps-handling-patches.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
new file mode 100644 (file)
index 0000000..db280da
--- /dev/null
@@ -0,0 +1,262 @@
+Since we could skip corrupt block groups, this patch
+use ext4_warning() intead of ext4_error() to make FS not
+emount RO in default
+
+---
+ fs/ext4/balloc.c  |   10 ++++----
+ fs/ext4/ialloc.c  |    6 ++--
+ fs/ext4/mballoc.c |   66 +++++++++++++++++++++---------------------------------
+ 3 files changed, 34 insertions(+), 48 deletions(-)
+
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -382,7 +382,7 @@ static int ext4_validate_block_bitmap(st
+                                                   desc, bh) ||
+                    ext4_simulate_fail(sb, EXT4_SIM_BBITMAP_CRC))) {
+               ext4_unlock_group(sb, block_group);
+-              ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
++              ext4_warning(sb, "bg %u: bad block bitmap checksum", block_group);
+               ext4_mark_group_bitmap_corrupted(sb, block_group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+               return -EFSBADCRC;
+@@ -390,8 +390,8 @@ static int ext4_validate_block_bitmap(st
+       blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
+       if (unlikely(blk != 0)) {
+               ext4_unlock_group(sb, block_group);
+-              ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
+-                         block_group, blk);
++              ext4_warning(sb, "bg %u: block %llu: invalid block bitmap",
++                           block_group, blk);
+               ext4_mark_group_bitmap_corrupted(sb, block_group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+               return -EFSCORRUPTED;
+@@ -467,8 +467,8 @@ ext4_read_block_bitmap_nowait(struct sup
+               ext4_unlock_group(sb, block_group);
+               unlock_buffer(bh);
+               if (err) {
+-                      ext4_error(sb, "Failed to init block bitmap for group "
+-                                 "%u: %d", block_group, err);
++                      ext4_warning(sb, "Failed to init block bitmap for group "
++                                   "%u: %d", block_group, err);
+                       goto out;
+               }
+               goto verify;
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -97,8 +97,8 @@ static int ext4_validate_inode_bitmap(st
+                                          EXT4_INODES_PER_GROUP(sb) / 8) ||
+           ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_CRC)) {
+               ext4_unlock_group(sb, block_group);
+-              ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
+-                         "inode_bitmap = %llu", block_group, blk);
++              ext4_warning(sb, "Corrupt inode bitmap - block_group = %u, "
++                           "inode_bitmap = %llu", block_group, blk);
+               ext4_mark_group_bitmap_corrupted(sb, block_group,
+                                       EXT4_GROUP_INFO_IBITMAP_CORRUPT);
+               return -EFSBADCRC;
+@@ -345,7 +345,7 @@ out:
+               if (!fatal)
+                       fatal = err;
+       } else {
+-              ext4_error(sb, "bit already cleared for inode %lu", ino);
++              ext4_warning(sb, "bit already cleared for inode %lu", ino);
+               ext4_mark_group_bitmap_corrupted(sb, block_group,
+                                       EXT4_GROUP_INFO_IBITMAP_CORRUPT);
+       }
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -802,10 +802,14 @@ int ext4_mb_generate_buddy(struct super_
+       grp->bb_fragments = fragments;
+       if (free != grp->bb_free) {
+-              ext4_grp_locked_error(sb, group, 0, 0,
+-                                    "block bitmap and bg descriptor "
+-                                    "inconsistent: %u vs %u free clusters",
+-                                    free, grp->bb_free);
++              struct ext4_group_desc *gdp;
++              gdp = ext4_get_group_desc(sb, group, NULL);
++              ext4_warning(sb, "group %lu: block bitmap and bg descriptor "
++                           "inconsistent: %u vs %u free clusters "
++                           "%u in gd, %lu pa's",
++                           (long unsigned int)group, free, grp->bb_free,
++                           ext4_free_group_clusters(sb, gdp),
++                           grp->bb_prealloc_nr);
+               /*
+                * If we intend to continue, we consider group descriptor
+                * corrupt and update bb_free using bitmap value
+@@ -1168,7 +1172,7 @@ ext4_mb_load_buddy_gfp(struct super_bloc
+       int block;
+       int pnum;
+       int poff;
+-      struct page *page;
++      struct page *page = NULL;
+       int ret;
+       struct ext4_group_info *grp;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -1194,7 +1198,7 @@ ext4_mb_load_buddy_gfp(struct super_bloc
+                */
+               ret = ext4_mb_init_group(sb, group, gfp);
+               if (ret)
+-                      return ret;
++                      goto err;
+       }
+       /*
+@@ -1297,6 +1301,7 @@ err:
+               put_page(e4b->bd_buddy_page);
+       e4b->bd_buddy = NULL;
+       e4b->bd_bitmap = NULL;
++      ext4_warning(sb, "Error loading buddy information for %u", group);
+       return ret;
+ }
+@@ -3782,9 +3787,11 @@ int ext4_mb_check_ondisk_bitmap(struct s
+       }
+       if (free != free_in_gdp) {
+-              ext4_error(sb, "on-disk bitmap for group %d"
++              ext4_warning(sb, "on-disk bitmap for group %d"
+                       "corrupted: %u blocks free in bitmap, %u - in gd\n",
+                       group, free, free_in_gdp);
++              ext4_mark_group_bitmap_corrupted(sb, group,
++                                      EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+               return -EIO;
+       }
+       return 0;
+@@ -4129,16 +4136,8 @@ ext4_mb_release_inode_pa(struct ext4_bud
+       /* "free < pa->pa_free" means we maybe double alloc the same blocks,
+        * otherwise maybe leave some free blocks unavailable, no need to BUG.*/
+       if ((free > pa->pa_free && !pa->pa_error) || (free < pa->pa_free)) {
+-              ext4_error(sb, "pa free mismatch: [pa %p] "
+-                              "[phy %lu] [logic %lu] [len %u] [free %u] "
+-                              "[error %u] [inode %d] [freed %u]", pa,
+-                              (unsigned long)pa->pa_pstart,
+-                              (unsigned long)pa->pa_lstart,
+-                              pa->pa_len, (unsigned)pa->pa_free,
+-                              (unsigned)pa->pa_error, pa->pa_inode->i_ino,
+-                              free);
+               ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
+-                                      free, pa->pa_free);
++                                    free, pa->pa_free);
+               /*
+                * pa is already deleted so we use the value obtained
+                * from the bitmap and continue.
+@@ -4199,16 +4198,11 @@ ext4_mb_discard_group_preallocations(str
+       bitmap_bh = ext4_read_block_bitmap(sb, group);
+       if (IS_ERR(bitmap_bh)) {
+               err = PTR_ERR(bitmap_bh);
+-              ext4_error_err(sb, -err,
+-                             "Error %d reading block bitmap for %u",
+-                             err, group);
+               goto out_dbg;
+       }
+       err = ext4_mb_load_buddy(sb, group, &e4b);
+       if (err) {
+-              ext4_warning(sb, "Error %d loading buddy information for %u",
+-                           err, group);
+               put_bh(bitmap_bh);
+               goto out_dbg;
+       }
+@@ -4376,17 +4370,12 @@ repeat:
+               err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+                                            GFP_NOFS|__GFP_NOFAIL);
+-              if (err) {
+-                      ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
+-                                     err, group);
++              if (err)
+                       return;
+-              }
+               bitmap_bh = ext4_read_block_bitmap(sb, group);
+               if (IS_ERR(bitmap_bh)) {
+                       err = PTR_ERR(bitmap_bh);
+-                      ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
+-                                     err, group);
+                       ext4_mb_unload_buddy(&e4b);
+                       continue;
+               }
+@@ -4681,11 +4670,8 @@ ext4_mb_discard_lg_preallocations(struct
+               group = ext4_get_group_number(sb, pa->pa_pstart);
+               err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+                                            GFP_NOFS|__GFP_NOFAIL);
+-              if (err) {
+-                      ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
+-                                     err, group);
++              if (err)
+                       continue;
+-              }
+               ext4_lock_group(sb, group);
+               list_del(&pa->pa_group_list);
+               ext4_get_group_info(sb, group)->bb_prealloc_nr--;
+@@ -4974,7 +4960,7 @@ errout:
+                        * been updated or not when fail case. So can
+                        * not revert pa_free back, just mark pa_error*/
+                       pa->pa_error++;
+-                      ext4_error(sb,
++                      ext4_warning(sb,
+                               "Updating bitmap error: [err %d] "
+                               "[pa %p] [phy %lu] [logic %lu] "
+                               "[len %u] [free %u] [error %u] "
+@@ -4985,6 +4971,7 @@ errout:
+                               (unsigned)pa->pa_free,
+                               (unsigned)pa->pa_error,
+                               pa->pa_inode ? pa->pa_inode->i_ino : 0);
++                      ext4_mark_group_bitmap_corrupted(sb, 0, 0);
+               }
+       }
+       ext4_mb_release_context(ac);
+@@ -5271,7 +5258,7 @@ do_more:
+       err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
+                                    GFP_NOFS|__GFP_NOFAIL);
+       if (err)
+-              goto error_return;
++              goto error_brelse;
+       /*
+        * We need to make sure we don't reuse the freed block until after the
+@@ -5362,8 +5349,9 @@ do_more:
+               goto do_more;
+       }
+ error_return:
+-      brelse(bitmap_bh);
+       ext4_std_error(sb, err);
++error_brelse:
++      brelse(bitmap_bh);
+       return;
+ }
+@@ -5463,7 +5451,7 @@ int ext4_group_add_blocks(handle_t *hand
+       err = ext4_mb_load_buddy(sb, block_group, &e4b);
+       if (err)
+-              goto error_return;
++              goto error_brelse;
+       /*
+        * need to update group_info->bb_free and bitmap
+@@ -5502,8 +5490,9 @@ int ext4_group_add_blocks(handle_t *hand
+               err = ret;
+ error_return:
+-      brelse(bitmap_bh);
+       ext4_std_error(sb, err);
++error_brelse:
++      brelse(bitmap_bh);
+       return err;
+ }
+@@ -5578,11 +5567,8 @@ ext4_trim_all_free(struct super_block *s
+       trace_ext4_trim_all_free(sb, group, start, max);
+       ret = ext4_mb_load_buddy(sb, group, &e4b);
+-      if (ret) {
+-              ext4_warning(sb, "Error %d loading buddy information for %u",
+-                           ret, group);
++      if (ret)
+               return ret;
+-      }
+       bitmap = e4b.bd_bitmap;
+       ext4_lock_group(sb, group);
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-data-in-dirent.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-data-in-dirent.patch
new file mode 100644 (file)
index 0000000..014fedc
--- /dev/null
@@ -0,0 +1,721 @@
+this patch implements feature which allows ext4 fs users (e.g. Lustre)
+to store data in ext4 dirent.
+data is stored in ext4 dirent after file-name, this space is accounted
+in de->rec_len. flag EXT4_DIRENT_LUFID added to d_type if extra data
+is present.
+
+make use of dentry->d_fsdata to pass fid to ext4. so no
+changes in ext4_add_entry() interface required.
+
+---
+ fs/ext4/dir.c    |   13 ++-
+ fs/ext4/ext4.h   |   97 +++++++++++++++++++++++++-
+ fs/ext4/inline.c |    8 +-
+ fs/ext4/namei.c  |  201 +++++++++++++++++++++++++++++++++++++++++++------------
+ fs/ext4/super.c  |    4 -
+ 5 files changed, 267 insertions(+), 56 deletions(-)
+
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -78,7 +78,7 @@ int __ext4_check_dir_entry(const char *f
+               error_msg = "rec_len is smaller than minimal";
+       else if (unlikely(rlen % 4 != 0))
+               error_msg = "rec_len % 4 != 0";
+-      else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
++      else if (unlikely(rlen < EXT4_DIR_ENTRY_LEN(de)))
+               error_msg = "rec_len is too small for name_len";
+       else if (unlikely(next_offset > size))
+               error_msg = "directory entry overrun";
+@@ -226,7 +226,7 @@ static int ext4_readdir(struct file *fil
+                                * failure will be detected in the
+                                * dirent test below. */
+                               if (ext4_rec_len_from_disk(de->rec_len,
+-                                      sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
++                                  sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
+                                       break;
+                               i += ext4_rec_len_from_disk(de->rec_len,
+                                                           sb->s_blocksize);
+@@ -449,12 +449,17 @@ int ext4_htree_store_dirent(struct file
+       struct fname *fname, *new_fn;
+       struct dir_private_info *info;
+       int len;
++      int extra_data = 0;
+       info = dir_file->private_data;
+       p = &info->root.rb_node;
+       /* Create and allocate the fname structure */
+-      len = sizeof(struct fname) + ent_name->len + 1;
++      if (dirent->file_type & EXT4_DIRENT_LUFID)
++              extra_data = ext4_get_dirent_data_len(dirent);
++
++      len = sizeof(struct fname) + ent_name->len + extra_data + 1;
++
+       new_fn = kzalloc(len, GFP_KERNEL);
+       if (!new_fn)
+               return -ENOMEM;
+@@ -463,7 +468,7 @@ int ext4_htree_store_dirent(struct file
+       new_fn->inode = le32_to_cpu(dirent->inode);
+       new_fn->name_len = ent_name->len;
+       new_fn->file_type = dirent->file_type;
+-      memcpy(new_fn->name, ent_name->name, ent_name->len);
++      memcpy(new_fn->name, ent_name->name, ent_name->len + extra_data);
+       while (*p) {
+               parent = *p;
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1118,6 +1118,7 @@ struct ext4_inode_info {
+       __u32 i_csum_seed;
+       kprojid_t i_projid;
++      void *i_dirdata;
+ };
+ /*
+@@ -1161,6 +1162,7 @@ struct ext4_inode_info {
+ #define EXT4_MOUNT_POSIX_ACL          0x08000 /* POSIX Access Control Lists */
+ #define EXT4_MOUNT_NO_AUTO_DA_ALLOC   0x10000 /* No auto delalloc mapping */
+ #define EXT4_MOUNT_BARRIER            0x20000 /* Use block barriers */
++#define EXT4_MOUNT_DIRDATA            0x60000 /* Data in directory entries*/
+ #define EXT4_MOUNT_QUOTA              0x40000 /* Some quota option set */
+ #define EXT4_MOUNT_USRQUOTA           0x80000 /* "old" user quota,
+                                                * enable enforcement for hidden
+@@ -1955,6 +1957,7 @@ EXT4_FEATURE_INCOMPAT_FUNCS(casefold,            C
+                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
+                                        EXT4_FEATURE_INCOMPAT_EA_INODE| \
+                                        EXT4_FEATURE_INCOMPAT_MMP | \
++                                       EXT4_FEATURE_INCOMPAT_DIRDATA| \
+                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
+                                        EXT4_FEATURE_INCOMPAT_ENCRYPT | \
+                                        EXT4_FEATURE_INCOMPAT_CASEFOLD | \
+@@ -2133,6 +2136,43 @@ struct ext4_dir_entry_tail {
+ #define EXT4_FT_SYMLINK               7
+ #define EXT4_FT_MAX           8
++#define EXT4_FT_MASK          0xf
++
++#if EXT4_FT_MAX > EXT4_FT_MASK
++#error "conflicting EXT4_FT_MAX and EXT4_FT_MASK"
++#endif
++
++/*
++ * d_type has 4 unused bits, so it can hold four types data. these different
++ * type of data (e.g. lustre data, high 32 bits of 64-bit inode number) can be
++ * stored, in flag order, after file-name in ext4 dirent.
++*/
++/*
++ * this flag is added to d_type if ext4 dirent has extra data after
++ * filename. this data length is variable and length is stored in first byte
++ * of data. data start after filename NUL byte.
++ * This is used by Lustre FS.
++  */
++#define EXT4_DIRENT_LUFID             0x10
++
++#define EXT4_LUFID_MAGIC    0xAD200907UL
++struct ext4_dentry_param {
++      __u32  edp_magic;       /* EXT4_LUFID_MAGIC */
++      char   edp_len;         /* size of edp_data in bytes */
++      char   edp_data[0];     /* packed array of data */
++} __packed;
++
++static inline unsigned char *ext4_dentry_get_data(struct super_block *sb,
++                                                struct ext4_dentry_param *p)
++
++{
++      if (!ext4_has_feature_dirdata(sb))
++              return NULL;
++      if (p && p->edp_magic == EXT4_LUFID_MAGIC)
++              return &p->edp_len;
++      else
++              return NULL;
++}
+ #define EXT4_FT_DIR_CSUM      0xDE
+@@ -2143,8 +2183,16 @@ struct ext4_dir_entry_tail {
+  */
+ #define EXT4_DIR_PAD                  4
+ #define EXT4_DIR_ROUND                        (EXT4_DIR_PAD - 1)
+-#define EXT4_DIR_REC_LEN(name_len)    (((name_len) + 8 + EXT4_DIR_ROUND) & \
++#define EXT4_DIR_REC_LEN_(name_len)   (((name_len) + 8 + EXT4_DIR_ROUND) & \
+                                        ~EXT4_DIR_ROUND)
++#define EXT4_DIR_ENTRY_LEN_(de)               (EXT4_DIR_REC_LEN_((de)->name_len +\
++                                      ext4_get_dirent_data_len(de)))
++/* ldiskfs */
++#define EXT4_DIR_REC_LEN(name_len)    EXT4_DIR_REC_LEN_((name_len))
++#define EXT4_DIR_ENTRY_LEN(de)                EXT4_DIR_ENTRY_LEN_((de))
++/* lustre osd_handler compat */
++#define __EXT4_DIR_REC_LEN(name_len)  EXT4_DIR_REC_LEN_((name_len))
++
+ #define EXT4_MAX_REC_LEN              ((1<<16)-1)
+ /*
+@@ -2604,11 +2652,11 @@ extern int ext4_find_dest_de(struct inod
+                            struct buffer_head *bh,
+                            void *buf, int buf_size,
+                            struct ext4_filename *fname,
+-                           struct ext4_dir_entry_2 **dest_de);
++                           struct ext4_dir_entry_2 **dest_de, int *dlen);
+ void ext4_insert_dentry(struct inode *inode,
+                       struct ext4_dir_entry_2 *de,
+                       int buf_size,
+-                      struct ext4_filename *fname);
++                      struct ext4_filename *fname, void *data);
+ static inline void ext4_update_dx_flag(struct inode *inode)
+ {
+       if (!ext4_has_feature_dir_index(inode->i_sb)) {
+@@ -2623,10 +2671,17 @@ static const unsigned char ext4_filetype
+ static inline  unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
+-      if (!ext4_has_feature_filetype(sb) || filetype >= EXT4_FT_MAX)
++      int fl_index = filetype & EXT4_FT_MASK;
++
++      if (!ext4_has_feature_filetype(sb) || fl_index >= EXT4_FT_MAX)
+               return DT_UNKNOWN;
+-      return ext4_filetype_table[filetype];
++      if (!test_opt(sb, DIRDATA))
++              return ext4_filetype_table[fl_index];
++
++      return (ext4_filetype_table[fl_index]) |
++              (filetype & EXT4_DIRENT_LUFID);
++
+ }
+ extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh,
+                            void *buf, int buf_size);
+@@ -2785,6 +2840,8 @@ extern struct inode *ext4_create_inode(h
+ extern int ext4_delete_entry(handle_t *handle, struct inode * dir,
+                            struct ext4_dir_entry_2 *de_del,
+                            struct buffer_head *bh);
++extern int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
++                             struct inode *inode, const void *, const void *);
+ extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+                               __u32 start_minor_hash, __u32 *next_hash);
+ extern int ext4_search_dir(struct buffer_head *bh,
+@@ -3559,6 +3616,36 @@ static inline int ext4_buffer_uptodate(s
+       return buffer_uptodate(bh);
+ }
++/*
++ * Compute the total directory entry data length.
++ * This includes the filename and an implicit NUL terminator (always present),
++ * and optional extensions.  Each extension has a bit set in the high 4 bits of
++ * de->file_type, and the extension length is the first byte in each entry.
++ */
++static inline int ext4_get_dirent_data_len(struct ext4_dir_entry_2 *de)
++{
++      char *len = de->name + de->name_len + 1 /* NUL terminator */;
++      int dlen = 0;
++      __u8 extra_data_flags = (de->file_type & ~EXT4_FT_MASK) >> 4;
++      struct ext4_dir_entry_tail *t = (struct ext4_dir_entry_tail *)de;
++
++      if (!t->det_reserved_zero1 &&
++          le16_to_cpu(t->det_rec_len) ==
++              sizeof(struct ext4_dir_entry_tail) &&
++          !t->det_reserved_zero2 &&
++          t->det_reserved_ft == EXT4_FT_DIR_CSUM)
++              return 0;
++
++      while (extra_data_flags) {
++              if (extra_data_flags & 1) {
++                      dlen += *len + (dlen == 0);
++                      len += *len;
++              }
++              extra_data_flags >>= 1;
++      }
++      return dlen;
++}
++
+ #endif        /* __KERNEL__ */
+ #define EFSBADCRC     EBADMSG         /* Bad CRC detected */
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1023,7 +1023,7 @@ static int ext4_add_dirent_to_inline(han
+       struct ext4_dir_entry_2 *de;
+       err = ext4_find_dest_de(dir, inode, iloc->bh, inline_start,
+-                              inline_size, fname, &de);
++                              inline_size, fname, &de, NULL);
+       if (err)
+               return err;
+@@ -1031,7 +1031,7 @@ static int ext4_add_dirent_to_inline(han
+       err = ext4_journal_get_write_access(handle, iloc->bh);
+       if (err)
+               return err;
+-      ext4_insert_dentry(inode, de, inline_size, fname);
++      ext4_insert_dentry(inode, de, inline_size, fname, NULL);
+       ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
+@@ -1380,7 +1380,7 @@ int ext4_inlinedir_to_tree(struct file *
+                       fake.name_len = 1;
+                       strcpy(fake.name, ".");
+                       fake.rec_len = ext4_rec_len_to_disk(
+-                                              EXT4_DIR_REC_LEN(fake.name_len),
++                                              EXT4_DIR_ENTRY_LEN(&fake),
+                                               inline_size);
+                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
+                       de = &fake;
+@@ -1390,7 +1390,7 @@ int ext4_inlinedir_to_tree(struct file *
+                       fake.name_len = 2;
+                       strcpy(fake.name, "..");
+                       fake.rec_len = ext4_rec_len_to_disk(
+-                                              EXT4_DIR_REC_LEN(fake.name_len),
++                                              EXT4_DIR_ENTRY_LEN(&fake),
+                                               inline_size);
+                       ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
+                       de = &fake;
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -265,7 +265,8 @@ static unsigned dx_get_count(struct dx_e
+ static unsigned dx_get_limit(struct dx_entry *entries);
+ static void dx_set_count(struct dx_entry *entries, unsigned value);
+ static void dx_set_limit(struct dx_entry *entries, unsigned value);
+-static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
++static inline unsigned dx_root_limit(struct inode *dir,
++              struct ext4_dir_entry_2 *dot_de, unsigned infosize);
+ static unsigned dx_node_limit(struct inode *dir);
+ static struct dx_frame *dx_probe(struct ext4_filename *fname,
+                                struct inode *dir,
+@@ -409,22 +410,23 @@ static struct dx_countlimit *get_dx_coun
+ {
+       struct ext4_dir_entry *dp;
+       struct dx_root_info *root;
+-      int count_offset;
++      int count_offset, dot_rec_len, dotdot_rec_len;
+       if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
+               count_offset = 8;
+-      else if (le16_to_cpu(dirent->rec_len) == 12) {
+-              dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
++      else {
++              dot_rec_len = le16_to_cpu(dirent->rec_len);
++              dp = (struct ext4_dir_entry *)(((void *)dirent) + dot_rec_len);
+               if (le16_to_cpu(dp->rec_len) !=
+-                  EXT4_BLOCK_SIZE(inode->i_sb) - 12)
++                  EXT4_BLOCK_SIZE(inode->i_sb) - dot_rec_len)
+                       return NULL;
+-              root = (struct dx_root_info *)(((void *)dp + 12));
++              dotdot_rec_len = EXT4_DIR_ENTRY_LEN((struct ext4_dir_entry_2 *)dp);
++              root = (struct dx_root_info *)(((void *)dp + dotdot_rec_len));
+               if (root->reserved_zero ||
+                   root->info_length != sizeof(struct dx_root_info))
+                       return NULL;
+-              count_offset = 32;
+-      } else
+-              return NULL;
++              count_offset = 8 + dot_rec_len + dotdot_rec_len;
++      }
+       if (offset)
+               *offset = count_offset;
+@@ -529,11 +531,12 @@ ext4_next_entry(struct ext4_dir_entry_2
+  */
+ struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de)
+ {
++      BUG_ON(de->name_len != 1);
+       /* get dotdot first */
+-      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
++      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_ENTRY_LEN(de));
+       /* dx root info is after dotdot entry */
+-      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
++      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_ENTRY_LEN(de));
+       return (struct dx_root_info *)de;
+ }
+@@ -578,10 +581,16 @@ static inline void dx_set_limit(struct d
+       ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
+ }
+-static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
++static inline unsigned dx_root_limit(struct inode *dir,
++              struct ext4_dir_entry_2 *dot_de, unsigned infosize)
+ {
+-      unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
+-              EXT4_DIR_REC_LEN(2) - infosize;
++      struct ext4_dir_entry_2 *dotdot_de;
++      unsigned entry_space;
++
++      BUG_ON(dot_de->name_len != 1);
++      dotdot_de = ext4_next_entry(dot_de, dir->i_sb->s_blocksize);
++      entry_space = dir->i_sb->s_blocksize - EXT4_DIR_ENTRY_LEN(dot_de) -
++                       EXT4_DIR_ENTRY_LEN(dotdot_de) - infosize;
+       if (ext4_has_metadata_csum(dir->i_sb))
+               entry_space -= sizeof(struct dx_tail);
+@@ -702,7 +711,7 @@ static struct stats dx_show_leaf(struct
+                                      (unsigned) ((char *) de - base));
+ #endif
+                       }
+-                      space += EXT4_DIR_REC_LEN(de->name_len);
++                      space += EXT4_DIR_ENTRY_LEN(de);
+                       names++;
+               }
+               de = ext4_next_entry(de, size);
+@@ -809,11 +818,14 @@ dx_probe(struct ext4_filename *fname, st
+       entries = (struct dx_entry *)(((char *)info) + info->info_length);
+-      if (dx_get_limit(entries) != dx_root_limit(dir,
+-                                                 info->info_length)) {
++      if (dx_get_limit(entries) !=
++          dx_root_limit(dir, (struct ext4_dir_entry_2 *)frame->bh->b_data,
++                        info->info_length)) {
+               ext4_warning_inode(dir, "dx entry: limit %u != root limit %u",
+                                  dx_get_limit(entries),
+-                                 dx_root_limit(dir, info->info_length));
++                                 dx_root_limit(dir,
++                                        (struct ext4_dir_entry_2 *)frame->bh->b_data,
++                                        info->info_length));
+               goto fail;
+       }
+@@ -1799,7 +1811,7 @@ dx_move_dirents(char *from, char *to, st
+       while (count--) {
+               struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
+                                               (from + (map->offs<<2));
+-              rec_len = EXT4_DIR_REC_LEN(de->name_len);
++              rec_len = EXT4_DIR_ENTRY_LEN(de);
+               memcpy (to, de, rec_len);
+               ((struct ext4_dir_entry_2 *) to)->rec_len =
+                               ext4_rec_len_to_disk(rec_len, blocksize);
+@@ -1823,7 +1835,7 @@ static struct ext4_dir_entry_2* dx_pack_
+       while ((char*)de < base + blocksize) {
+               next = ext4_next_entry(de, blocksize);
+               if (de->inode && de->name_len) {
+-                      rec_len = EXT4_DIR_REC_LEN(de->name_len);
++                      rec_len = EXT4_DIR_ENTRY_LEN(de);
+                       if (de > to)
+                               memmove(to, de, rec_len);
+                       to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
+@@ -1950,14 +1962,16 @@ int ext4_find_dest_de(struct inode *dir,
+                     struct buffer_head *bh,
+                     void *buf, int buf_size,
+                     struct ext4_filename *fname,
+-                    struct ext4_dir_entry_2 **dest_de)
++                    struct ext4_dir_entry_2 **dest_de, int *dlen)
+ {
+       struct ext4_dir_entry_2 *de;
+-      unsigned short reclen = EXT4_DIR_REC_LEN(fname_len(fname));
++      unsigned short reclen = EXT4_DIR_REC_LEN(fname_len(fname)) +
++                                                (dlen ? *dlen : 0);
+       int nlen, rlen;
+       unsigned int offset = 0;
+       char *top;
++      dlen ? *dlen = 0 : 0; /* default set to 0 */
+       de = (struct ext4_dir_entry_2 *)buf;
+       top = buf + buf_size - reclen;
+       while ((char *) de <= top) {
+@@ -1966,10 +1980,26 @@ int ext4_find_dest_de(struct inode *dir,
+                       return -EFSCORRUPTED;
+               if (ext4_match(dir, fname, de))
+                       return -EEXIST;
+-              nlen = EXT4_DIR_REC_LEN(de->name_len);
++              nlen = EXT4_DIR_ENTRY_LEN(de);
+               rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+               if ((de->inode ? rlen - nlen : rlen) >= reclen)
+                       break;
++              /* Then for dotdot entries, check for the smaller space
++               * required for just the entry, no FID */
++              if (fname_len(fname) == 2 && memcmp(fname_name(fname), "..", 2) == 0) {
++                      if ((de->inode ? rlen - nlen : rlen) >=
++                          EXT4_DIR_REC_LEN(fname_len(fname))) {
++                              /* set dlen=1 to indicate not
++                               * enough space store fid */
++                              dlen ? *dlen = 1 : 0;
++                              break;
++                      }
++                      /* The new ".." entry must be written over the
++                       * previous ".." entry, which is the first
++                       * entry traversed by this scan. If it doesn't
++                       * fit, something is badly wrong, so -EIO. */
++                      return -EIO;
++              }
+               de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
+               offset += rlen;
+       }
+@@ -1983,12 +2013,12 @@ int ext4_find_dest_de(struct inode *dir,
+ void ext4_insert_dentry(struct inode *inode,
+                       struct ext4_dir_entry_2 *de,
+                       int buf_size,
+-                      struct ext4_filename *fname)
++                      struct ext4_filename *fname, void *data)
+ {
+       int nlen, rlen;
+-      nlen = EXT4_DIR_REC_LEN(de->name_len);
++      nlen = EXT4_DIR_ENTRY_LEN(de);
+       rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+       if (de->inode) {
+               struct ext4_dir_entry_2 *de1 =
+@@ -2002,6 +2032,11 @@ void ext4_insert_dentry(struct inode *in
+       ext4_set_de_type(inode->i_sb, de, inode->i_mode);
+       de->name_len = fname_len(fname);
+       memcpy(de->name, fname_name(fname), fname_len(fname));
++      if (data) {
++              de->name[fname_len(fname)] = 0;
++              memcpy(&de->name[fname_len(fname) + 1], data, *(char *)data);
++              de->file_type |= EXT4_DIRENT_LUFID;
++      }
+ }
+ /*
+@@ -2019,14 +2054,19 @@ static int add_dirent_to_buf(handle_t *h
+ {
+       unsigned int    blocksize = dir->i_sb->s_blocksize;
+       int             csum_size = 0;
+-      int             err, err2;
++      int             err, err2, dlen = 0;
++      unsigned char   *data;
++      data = ext4_dentry_get_data(inode->i_sb, (struct ext4_dentry_param *)
++                                              EXT4_I(inode)->i_dirdata);
+       if (ext4_has_metadata_csum(inode->i_sb))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+       if (!de) {
++              if (data)
++                      dlen = (*data) + 1;
+               err = ext4_find_dest_de(dir, inode, bh, bh->b_data,
+-                                      blocksize - csum_size, fname, &de);
++                                      blocksize - csum_size, fname, &de, &dlen);
+               if (err)
+                       return err;
+       }
+@@ -2038,7 +2078,10 @@ static int add_dirent_to_buf(handle_t *h
+       }
+       /* By now the buffer is marked for journaling */
+-      ext4_insert_dentry(inode, de, blocksize, fname);
++      /* If writing the short form of "dotdot", don't add the data section */
++      if (dlen == 1)
++              data = NULL;
++      ext4_insert_dentry(inode, de, blocksize, fname, data);
+       /*
+        * XXX shouldn't update any times until successful
+@@ -2143,7 +2186,8 @@ static int make_indexed_dir(handle_t *ha
+       dx_set_block(entries, 1);
+       dx_set_count(entries, 1);
+-      dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
++      dx_set_limit(entries, dx_root_limit(dir,
++                                       dot_de, sizeof(*dx_info)));
+       /* Initialize as for dx_probe */
+       fname->hinfo.hash_version = dx_info->hash_version;
+@@ -2193,6 +2237,8 @@ static int ext4_update_dotdot(handle_t *
+       struct buffer_head *dir_block;
+       struct ext4_dir_entry_2 *de;
+       int len, journal = 0, err = 0;
++      int dlen = 0;
++      char *data;
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+@@ -2218,11 +2264,16 @@ static int ext4_update_dotdot(handle_t *
+                       goto out_journal;
+               journal = 1;
+-              de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(1));
++              de->rec_len = cpu_to_le16(EXT4_DIR_ENTRY_LEN(de));
+       }
+-      len -= EXT4_DIR_REC_LEN(1);
+-      assert(len == 0 || len >= EXT4_DIR_REC_LEN(2));
++      len -= EXT4_DIR_ENTRY_LEN(de);
++      data = ext4_dentry_get_data(dir->i_sb,
++                      (struct ext4_dentry_param *)dentry->d_fsdata);
++      if (data)
++              dlen = *data + 1;
++      assert(len == 0 || len >= EXT4_DIR_REC_LEN(2 + dlen));
++
+       de = (struct ext4_dir_entry_2 *)
+                       ((char *) de + le16_to_cpu(de->rec_len));
+       if (!journal) {
+@@ -2239,7 +2290,12 @@ static int ext4_update_dotdot(handle_t *
+               assert(le16_to_cpu(de->rec_len) >= EXT4_DIR_REC_LEN(2));
+       de->name_len = 2;
+       strcpy(de->name, "..");
+-      ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++      if (data != NULL && ext4_get_dirent_data_len(de) >= dlen) {
++              de->name[2] = 0;
++              memcpy(&de->name[2 + 1], data, *data);
++              ext4_set_de_type(dir->i_sb, de, S_IFDIR);
++              de->file_type |= EXT4_DIRENT_LUFID;
++      }
+ out_journal:
+       if (journal) {
+@@ -2280,6 +2336,7 @@ static int ext4_add_entry(handle_t *hand
+       ext4_lblk_t block, blocks;
+       int     csum_size = 0;
++      EXT4_I(inode)->i_dirdata = dentry->d_fsdata;
+       if (ext4_has_metadata_csum(inode->i_sb))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+@@ -2832,37 +2889,70 @@ err_unlock_inode:
+       return err;
+ }
++struct tp_block {
++      struct inode *inode;
++      void *data1;
++      void *data2;
++};
++
+ struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
+                         struct ext4_dir_entry_2 *de,
+                         int blocksize, int csum_size,
+                         unsigned int parent_ino, int dotdot_real_len)
+ {
++      void *data1 = NULL, *data2 = NULL;
++      int dot_reclen = 0;
++
++      if (dotdot_real_len == 10) {
++              struct tp_block *tpb = (struct tp_block *)inode;
++              data1 = tpb->data1;
++              data2 = tpb->data2;
++              inode = tpb->inode;
++              dotdot_real_len = 0;
++      }
+       de->inode = cpu_to_le32(inode->i_ino);
+       de->name_len = 1;
+-      de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
+-                                         blocksize);
+       strcpy(de->name, ".");
+       ext4_set_de_type(inode->i_sb, de, S_IFDIR);
++      /* get packed fid data*/
++      data1 = ext4_dentry_get_data(inode->i_sb,
++                              (struct ext4_dentry_param *) data1);
++      if (data1) {
++              de->name[1] = 0;
++              memcpy(&de->name[2], data1, *(char *) data1);
++              de->file_type |= EXT4_DIRENT_LUFID;
++      }
++      de->rec_len = cpu_to_le16(EXT4_DIR_ENTRY_LEN(de));
++      dot_reclen = cpu_to_le16(de->rec_len);
+       de = ext4_next_entry(de, blocksize);
+       de->inode = cpu_to_le32(parent_ino);
+       de->name_len = 2;
++      strcpy(de->name, "..");
++      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
++      data2 = ext4_dentry_get_data(inode->i_sb,
++                      (struct ext4_dentry_param *) data2);
++      if (data2) {
++              de->name[2] = 0;
++              memcpy(&de->name[3], data2, *(char *) data2);
++              de->file_type |= EXT4_DIRENT_LUFID;
++      }
+       if (!dotdot_real_len)
+               de->rec_len = ext4_rec_len_to_disk(blocksize -
+-                                      (csum_size + EXT4_DIR_REC_LEN(1)),
++                                      (csum_size + dot_reclen),
+                                       blocksize);
+       else
+               de->rec_len = ext4_rec_len_to_disk(
+-                              EXT4_DIR_REC_LEN(de->name_len), blocksize);
+-      strcpy(de->name, "..");
+-      ext4_set_de_type(inode->i_sb, de, S_IFDIR);
++                              EXT4_DIR_ENTRY_LEN(de), blocksize);
+       return ext4_next_entry(de, blocksize);
+ }
+ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+-                           struct inode *inode)
++                           struct inode *inode,
++                           const void *data1, const void *data2)
+ {
++      struct tp_block param;
+       struct buffer_head *dir_block = NULL;
+       struct ext4_dir_entry_2 *de;
+       ext4_lblk_t block = 0;
+@@ -2886,7 +2976,11 @@ static int ext4_init_new_dir(handle_t *h
+       if (IS_ERR(dir_block))
+               return PTR_ERR(dir_block);
+       de = (struct ext4_dir_entry_2 *)dir_block->b_data;
+-      ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
++      param.inode = inode;
++      param.data1 = (void *)data1;
++      param.data2 = (void *)data2;
++      ext4_init_dot_dotdot((struct inode *)(&param), de, blocksize,
++                           csum_size, dir->i_ino, 10);
+       set_nlink(inode, 2);
+       if (csum_size)
+               ext4_initialize_dirent_tail(dir_block, blocksize);
+@@ -2901,6 +2995,29 @@ out:
+       return err;
+ }
++/* Initialize @inode as a subdirectory of @dir, and add the
++ * "." and ".." entries into the first directory block. */
++int ext4_add_dot_dotdot(handle_t *handle, struct inode *dir,
++                      struct inode *inode,
++                      const void *data1, const void *data2)
++{
++      int rc;
++
++      if (IS_ERR(handle))
++              return PTR_ERR(handle);
++
++      if (IS_DIRSYNC(dir))
++              ext4_handle_sync(handle);
++
++      inode->i_op = &ext4_dir_inode_operations;
++      inode->i_fop = &ext4_dir_operations;
++      rc = ext4_init_new_dir(handle, dir, inode, data1, data2);
++      if (!rc)
++              rc = ext4_mark_inode_dirty(handle, inode);
++      return rc;
++}
++EXPORT_SYMBOL(ext4_add_dot_dotdot);
++
+ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ {
+       handle_t *handle;
+@@ -2927,7 +3044,7 @@ retry:
+       inode->i_op = &ext4_dir_inode_operations;
+       inode->i_fop = &ext4_dir_operations;
+-      err = ext4_init_new_dir(handle, dir, inode);
++      err = ext4_init_new_dir(handle, dir, inode, NULL, NULL);
+       if (err)
+               goto out_clear_inode;
+       err = ext4_mark_inode_dirty(handle, inode);
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1510,7 +1510,7 @@ enum {
+       Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
+       Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
+       Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
+-      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
++      Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_dirdata,
+       Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version,
+       Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
+       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
+@@ -1590,6 +1590,7 @@ static const match_table_t tokens = {
+       {Opt_nolazytime, "nolazytime"},
+       {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
+       {Opt_nodelalloc, "nodelalloc"},
++      {Opt_dirdata, "dirdata"},
+       {Opt_removed, "mblk_io_submit"},
+       {Opt_removed, "nomblk_io_submit"},
+       {Opt_block_validity, "block_validity"},
+@@ -1822,6 +1823,7 @@ static const struct mount_opts {
+       {Opt_usrjquota, 0, MOPT_Q},
+       {Opt_grpjquota, 0, MOPT_Q},
+       {Opt_offusrjquota, 0, MOPT_Q},
++      {Opt_dirdata, EXT4_MOUNT_DIRDATA, MOPT_SET},
+       {Opt_offgrpjquota, 0, MOPT_Q},
+       {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
+       {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-export-mb-stream-allocator-variables.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-export-mb-stream-allocator-variables.patch
new file mode 100644 (file)
index 0000000..7ab4fff
--- /dev/null
@@ -0,0 +1,97 @@
+---
+ fs/ext4/ext4.h    |    2 +
+ fs/ext4/mballoc.c |   58 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ fs/ext4/sysfs.c   |    4 +++
+ 3 files changed, 64 insertions(+)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2808,6 +2808,8 @@ extern void ext4_end_bitmap_read(struct
+ /* mballoc.c */
+ extern const struct proc_ops ext4_seq_prealloc_table_fops;
+ extern const struct seq_operations ext4_mb_seq_groups_ops;
++extern const struct proc_ops ext4_seq_mb_last_group_fops;
++extern int ext4_mb_seq_last_start_seq_show(struct seq_file *m, void *v);
+ extern long ext4_mb_stats;
+ extern long ext4_mb_max_to_scan;
+ extern int ext4_mb_init(struct super_block *);
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2574,6 +2574,64 @@ static struct kmem_cache *get_groupinfo_
+       return cachep;
+ }
++#define EXT4_MB_MAX_INPUT_STRING_SIZE 32
++
++static ssize_t ext4_mb_last_group_write(struct file *file,
++                                      const char __user *buf,
++                                      size_t cnt, loff_t *pos)
++{
++      char dummy[EXT4_MB_MAX_INPUT_STRING_SIZE + 1];
++      struct super_block *sb = PDE_DATA(file_inode(file));
++      struct ext4_sb_info *sbi = EXT4_SB(sb);
++      unsigned long val;
++      char *end;
++
++      if (cnt > EXT4_MB_MAX_INPUT_STRING_SIZE)
++              return -EINVAL;
++      if (copy_from_user(dummy, buf, cnt))
++              return -EFAULT;
++      dummy[cnt] = '\0';
++      val = simple_strtoul(dummy, &end, 0);
++      if (dummy == end)
++              return -EINVAL;
++      if (val >= ext4_get_groups_count(sb))
++              return -ERANGE;
++      spin_lock(&sbi->s_md_lock);
++      sbi->s_mb_last_group = val;
++      sbi->s_mb_last_start = 0;
++      spin_unlock(&sbi->s_md_lock);
++      return cnt;
++}
++
++static int ext4_mb_seq_last_group_seq_show(struct seq_file *m, void *v)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(m->private);
++
++      seq_printf(m , "%ld\n", sbi->s_mb_last_group);
++      return 0;
++}
++
++static int ext4_mb_seq_last_group_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, ext4_mb_seq_last_group_seq_show, PDE_DATA(inode));
++}
++
++const struct proc_ops ext4_seq_mb_last_group_fops = {
++      .proc_open      = ext4_mb_seq_last_group_open,
++      .proc_read      = seq_read,
++      .proc_lseek     = seq_lseek,
++      .proc_release   = seq_release,
++      .proc_write     = ext4_mb_last_group_write,
++};
++
++int ext4_mb_seq_last_start_seq_show(struct seq_file *m, void *v)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(m->private);
++
++      seq_printf(m , "%ld\n", sbi->s_mb_last_start);
++      return 0;
++}
++
+ /*
+  * Allocate the top-level s_group_info array for the specified number
+  * of groups
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -524,6 +524,10 @@ int ext4_register_sysfs(struct super_blo
+                               &ext4_mb_seq_groups_ops, sb);
+               proc_create_data("prealloc_table", S_IRUGO, sbi->s_proc,
+                               &ext4_seq_prealloc_table_fops, sb);
++              proc_create_data("mb_last_group", S_IRUGO, sbi->s_proc,
++                              &ext4_seq_mb_last_group_fops, sb);
++              proc_create_single_data("mb_last_start", S_IRUGO, sbi->s_proc,
++                              ext4_mb_seq_last_start_seq_show, sb);
+       }
+       return 0;
+ }
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-kill-dx-root.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-kill-dx-root.patch
new file mode 100644 (file)
index 0000000..ef2a361
--- /dev/null
@@ -0,0 +1,236 @@
+From aa282f628e4ad75eea7f8ee1b26dea920c238241 Mon Sep 17 00:00:00 2001
+From: Shaun Tancheff <stancheff@cray.com>
+Date: Tue, 6 Aug 2019 17:00:55 -0500
+Subject: [PATCH] + linux-5.3/ext4-kill-dx-root
+
+---
+ fs/ext4/namei.c |  111 +++++++++++++++++++++++++++++---------------------------
+ 1 file changed, 58 insertions(+), 53 deletions(-)
+
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -219,22 +219,13 @@ struct dx_entry
+  * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
+  */
+-struct dx_root
++struct dx_root_info
+ {
+-      struct fake_dirent dot;
+-      char dot_name[4];
+-      struct fake_dirent dotdot;
+-      char dotdot_name[4];
+-      struct dx_root_info
+-      {
+-              __le32 reserved_zero;
+-              u8 hash_version;
+-              u8 info_length; /* 8 */
+-              u8 indirect_levels;
+-              u8 unused_flags;
+-      }
+-      info;
+-      struct dx_entry entries[];
++      __le32 reserved_zero;
++      u8 hash_version;
++      u8 info_length; /* 8 */
++      u8 indirect_levels;
++      u8 unused_flags;
+ };
+ struct dx_node
+@@ -536,6 +527,16 @@ ext4_next_entry(struct ext4_dir_entry_2
+  * Future: use high four bits of block for coalesce-on-delete flags
+  * Mask them off for now.
+  */
++struct dx_root_info *dx_get_dx_info(struct ext4_dir_entry_2 *de)
++{
++      /* get dotdot first */
++      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(1));
++
++      /* dx root info is after dotdot entry */
++      de = (struct ext4_dir_entry_2 *)((char *)de + EXT4_DIR_REC_LEN(2));
++
++      return (struct dx_root_info *)de;
++}
+ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
+ {
+@@ -760,7 +761,7 @@ dx_probe(struct ext4_filename *fname, st
+ {
+       unsigned count, indirect;
+       struct dx_entry *at, *entries, *p, *q, *m;
+-      struct dx_root *root;
++      struct dx_root_info *info;
+       struct dx_frame *frame = frame_in;
+       struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
+       u32 hash;
+@@ -769,18 +770,17 @@ dx_probe(struct ext4_filename *fname, st
+       frame->bh = ext4_read_dirblock(dir, 0, INDEX);
+       if (IS_ERR(frame->bh))
+               return (struct dx_frame *) frame->bh;
+-
+-      root = (struct dx_root *) frame->bh->b_data;
+-      if (root->info.hash_version != DX_HASH_TEA &&
+-          root->info.hash_version != DX_HASH_HALF_MD4 &&
+-          root->info.hash_version != DX_HASH_LEGACY) {
+-              ext4_warning_inode(dir, "Unrecognised inode hash code %u for directory "
+-                                 "%lu", root->info.hash_version, dir->i_ino);
++      info = dx_get_dx_info((struct ext4_dir_entry_2 *)frame->bh->b_data);
++        if (info->hash_version != DX_HASH_TEA &&
++            info->hash_version != DX_HASH_HALF_MD4 &&
++            info->hash_version != DX_HASH_LEGACY) {
++              ext4_warning(dir->i_sb, "Unrecognised inode hash code %d for directory "
++                             "#%lu", info->hash_version, dir->i_ino);
+               goto fail;
+       }
+       if (fname)
+               hinfo = &fname->hinfo;
+-      hinfo->hash_version = root->info.hash_version;
++      hinfo->hash_version = info->hash_version;
+       if (hinfo->hash_version <= DX_HASH_TEA)
+               hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+       hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+@@ -788,13 +788,13 @@ dx_probe(struct ext4_filename *fname, st
+               ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo);
+       hash = hinfo->hash;
+-      if (root->info.unused_flags & 1) {
++      if (info->unused_flags & 1) {
+               ext4_warning_inode(dir, "Unimplemented hash flags: %#06x",
+-                                 root->info.unused_flags);
++                                 info->unused_flags);
+               goto fail;
+       }
+-      indirect = root->info.indirect_levels;
++      indirect = info->indirect_levels;
+       if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
+               ext4_warning(dir->i_sb,
+                            "Directory (ino: %lu) htree depth %#06x exceed"
+@@ -807,14 +807,13 @@ dx_probe(struct ext4_filename *fname, st
+               goto fail;
+       }
+-      entries = (struct dx_entry *)(((char *)&root->info) +
+-                                    root->info.info_length);
++      entries = (struct dx_entry *)(((char *)info) + info->info_length);
+       if (dx_get_limit(entries) != dx_root_limit(dir,
+-                                                 root->info.info_length)) {
++                                                 info->info_length)) {
+               ext4_warning_inode(dir, "dx entry: limit %u != root limit %u",
+                                  dx_get_limit(entries),
+-                                 dx_root_limit(dir, root->info.info_length));
++                                 dx_root_limit(dir, info->info_length));
+               goto fail;
+       }
+@@ -899,7 +898,7 @@ static void dx_release(struct dx_frame *
+       if (frames[0].bh == NULL)
+               return;
+-      info = &((struct dx_root *)frames[0].bh->b_data)->info;
++      info = dx_get_dx_info((struct ext4_dir_entry_2 *)frames[0].bh->b_data);
+       /* save local copy, "info" may be freed after brelse() */
+       indirect_levels = info->indirect_levels;
+       for (i = 0; i <= indirect_levels; i++) {
+@@ -2072,16 +2071,15 @@ static int make_indexed_dir(handle_t *ha
+                           struct inode *inode, struct buffer_head *bh)
+ {
+       struct buffer_head *bh2;
+-      struct dx_root  *root;
+       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+       struct dx_entry *entries;
+-      struct ext4_dir_entry_2 *de, *de2;
++      struct ext4_dir_entry_2 *de, *de2, *dot_de, *dotdot_de;
+       char            *data2, *top;
+       unsigned        len;
+       int             retval;
+       unsigned        blocksize;
+       ext4_lblk_t  block;
+-      struct fake_dirent *fde;
++      struct dx_root_info *dx_info;
+       int csum_size = 0;
+       if (ext4_has_metadata_csum(inode->i_sb))
+@@ -2096,18 +2094,19 @@ static int make_indexed_dir(handle_t *ha
+               brelse(bh);
+               return retval;
+       }
+-      root = (struct dx_root *) bh->b_data;
++
++      dot_de = (struct ext4_dir_entry_2 *)bh->b_data;
++      dotdot_de = ext4_next_entry(dot_de, blocksize);
+       /* The 0th block becomes the root, move the dirents out */
+-      fde = &root->dotdot;
+-      de = (struct ext4_dir_entry_2 *)((char *)fde +
+-              ext4_rec_len_from_disk(fde->rec_len, blocksize));
+-      if ((char *) de >= (((char *) root) + blocksize)) {
++      de = (struct ext4_dir_entry_2 *)((char *)dotdot_de +
++              ext4_rec_len_from_disk(dotdot_de->rec_len, blocksize));
++      if ((char *)de >= (((char *)dot_de) + blocksize)) {
+               EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
+               brelse(bh);
+               return -EFSCORRUPTED;
+       }
+-      len = ((char *) root) + (blocksize - csum_size) - (char *) de;
++      len = ((char *)dot_de) + (blocksize - csum_size) - (char *)de;
+       /* Allocate new block for the 0th block's dirents */
+       bh2 = ext4_append(handle, dir, &block);
+@@ -2130,19 +2129,24 @@ static int make_indexed_dir(handle_t *ha
+               ext4_initialize_dirent_tail(bh2, blocksize);
+       /* Initialize the root; the dot dirents already exist */
+-      de = (struct ext4_dir_entry_2 *) (&root->dotdot);
+-      de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
+-                                         blocksize);
+-      memset (&root->info, 0, sizeof(root->info));
+-      root->info.info_length = sizeof(root->info);
+-      root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+-      entries = root->entries;
++      dotdot_de->rec_len =
++              ext4_rec_len_to_disk(blocksize - le16_to_cpu(dot_de->rec_len),
++                                   blocksize);
++
++      /* initialize hashing info */
++      dx_info = dx_get_dx_info(dot_de);
++      memset(dx_info, 0, sizeof(*dx_info));
++      dx_info->info_length = sizeof(*dx_info);
++      dx_info->hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
++
++      entries = (void *)dx_info + sizeof(*dx_info);
++
+       dx_set_block(entries, 1);
+       dx_set_count(entries, 1);
+-      dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
++      dx_set_limit(entries, dx_root_limit(dir, sizeof(*dx_info)));
+       /* Initialize as for dx_probe */
+-      fname->hinfo.hash_version = root->info.hash_version;
++      fname->hinfo.hash_version = dx_info->hash_version;
+       if (fname->hinfo.hash_version <= DX_HASH_TEA)
+               fname->hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+       fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+@@ -2506,7 +2510,7 @@ again:
+                               goto journal_error;
+                       }
+               } else {
+-                      struct dx_root *dxroot;
++                      struct dx_root_info *info;
+                       memcpy((char *) entries2, (char *) entries,
+                              icount * sizeof(struct dx_entry));
+                       dx_set_limit(entries2, dx_node_limit(dir));
+@@ -2514,8 +2518,9 @@ again:
+                       /* Set up root */
+                       dx_set_count(entries, 1);
+                       dx_set_block(entries + 0, newblock);
+-                      dxroot = (struct dx_root *)frames[0].bh->b_data;
+-                      dxroot->info.indirect_levels += 1;
++                      info = dx_get_dx_info((struct ext4_dir_entry_2 *)
++                                            frames[0].bh->b_data);
++                      info->indirect_levels = 1;
+                       dxtrace(printk(KERN_DEBUG
+                                      "Creating %d level index...\n",
+                                      dxroot->info.indirect_levels));
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-max-dir-size.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-max-dir-size.patch
new file mode 100644 (file)
index 0000000..efdc87c
--- /dev/null
@@ -0,0 +1,46 @@
+Add a proc interface for max_dir_size.
+
+---
+ fs/ext4/sysfs.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -209,6 +209,8 @@ EXT4_ATTR_FUNC(reserved_clusters, 0644);
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
+                ext4_sb_info, s_inode_readahead_blks);
+ EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
++EXT4_RW_ATTR_SBI_UI(max_dir_size, s_max_dir_size_kb);
++EXT4_RW_ATTR_SBI_UI(max_dir_size_kb, s_max_dir_size_kb);
+ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+@@ -252,6 +254,8 @@ static struct attribute *ext4_attrs[] =
+       ATTR_LIST(reserved_clusters),
+       ATTR_LIST(inode_readahead_blks),
+       ATTR_LIST(inode_goal),
++      ATTR_LIST(max_dir_size),
++      ATTR_LIST(max_dir_size_kb),
+       ATTR_LIST(mb_stats),
+       ATTR_LIST(mb_max_to_scan),
+       ATTR_LIST(mb_min_to_scan),
+@@ -376,7 +380,9 @@ static ssize_t ext4_attr_show(struct kob
+                                       le32_to_cpup(ptr));
+               else
+                       return snprintf(buf, PAGE_SIZE, "%u\n",
+-                                      *((unsigned int *) ptr));
++                                      strcmp("max_dir_size", a->attr.name) ?
++                                      *((unsigned int *) ptr) :
++                                      (*((unsigned int *) ptr)) << 10);
+       case attr_pointer_ul:
+               if (!ptr)
+                       return 0;
+@@ -439,6 +445,8 @@ static ssize_t ext4_attr_store(struct ko
+               ret = kstrtoul(skip_spaces(buf), 0, &t);
+               if (ret)
+                       return ret;
++              if (strcmp("max_dir_size", a->attr.name) == 0)
++                      t >>= 10;
+               if (a->attr_ptr == ptr_ext4_super_block_offset)
+                       *((__le32 *) ptr) = cpu_to_le32(t);
+               else
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-mballoc-extra-checks.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-mballoc-extra-checks.patch
new file mode 100644 (file)
index 0000000..d43f4a1
--- /dev/null
@@ -0,0 +1,301 @@
+---
+ fs/ext4/ext4.h    |    1 
+ fs/ext4/mballoc.c |  103 +++++++++++++++++++++++++++++++++++++++++++++++-------
+ fs/ext4/mballoc.h |    2 -
+ 3 files changed, 93 insertions(+), 13 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3156,6 +3156,7 @@ struct ext4_group_info {
+       ext4_grpblk_t   bb_fragments;   /* nr of freespace fragments */
+       ext4_grpblk_t   bb_largest_free_order;/* order of largest frag in BG */
+       struct          list_head bb_prealloc_list;
++      unsigned long   bb_prealloc_nr;
+ #ifdef DOUBLE_CHECK
+       void            *bb_bitmap;
+ #endif
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -345,7 +345,7 @@ static const char * const ext4_groupinfo
+       "ext4_groupinfo_64k", "ext4_groupinfo_128k"
+ };
+-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+                                       ext4_group_t group);
+ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+                                               ext4_group_t group);
+@@ -769,7 +769,7 @@ mb_set_largest_free_order(struct super_b
+ }
+ static noinline_for_stack
+-void ext4_mb_generate_buddy(struct super_block *sb,
++int ext4_mb_generate_buddy(struct super_block *sb,
+                               void *buddy, void *bitmap, ext4_group_t group)
+ {
+       struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+@@ -813,6 +813,7 @@ void ext4_mb_generate_buddy(struct super
+               grp->bb_free = free;
+               ext4_mark_group_bitmap_corrupted(sb, group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
++              return -EIO;
+       }
+       mb_set_largest_free_order(sb, grp);
+@@ -823,6 +824,8 @@ void ext4_mb_generate_buddy(struct super
+       sbi->s_mb_buddies_generated++;
+       sbi->s_mb_generation_time += period;
+       spin_unlock(&sbi->s_bal_lock);
++
++      return 0;
+ }
+ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
+@@ -943,7 +946,7 @@ static int ext4_mb_init_cache(struct pag
+       }
+       first_block = page->index * blocks_per_page;
+-      for (i = 0; i < blocks_per_page; i++) {
++      for (i = 0; i < blocks_per_page && err == 0; i++) {
+               group = (first_block + i) >> 1;
+               if (group >= ngroups)
+                       break;
+@@ -987,7 +990,7 @@ static int ext4_mb_init_cache(struct pag
+                       ext4_lock_group(sb, group);
+                       /* init the buddy */
+                       memset(data, 0xff, blocksize);
+-                      ext4_mb_generate_buddy(sb, data, incore, group);
++                      err = ext4_mb_generate_buddy(sb, data, incore, group);
+                       ext4_unlock_group(sb, group);
+                       incore = NULL;
+               } else {
+@@ -1002,7 +1005,7 @@ static int ext4_mb_init_cache(struct pag
+                       memcpy(data, bitmap, blocksize);
+                       /* mark all preallocated blks used in in-core bitmap */
+-                      ext4_mb_generate_from_pa(sb, data, group);
++                      err = ext4_mb_generate_from_pa(sb, data, group);
+                       ext4_mb_generate_from_freelist(sb, data, group);
+                       ext4_unlock_group(sb, group);
+@@ -1012,7 +1015,8 @@ static int ext4_mb_init_cache(struct pag
+                       incore = data;
+               }
+       }
+-      SetPageUptodate(page);
++      if (likely(err == 0))
++              SetPageUptodate(page);
+ out:
+       if (bh) {
+@@ -2396,9 +2400,11 @@ static void *ext4_mb_seq_groups_next(str
+ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ {
+       struct super_block *sb = PDE_DATA(file_inode(seq->file));
++      struct ext4_group_desc *gdp;
+       ext4_group_t group = (ext4_group_t) ((unsigned long) v);
+       int i;
+       int err, buddy_loaded = 0;
++      int free = 0;
+       struct ext4_buddy e4b;
+       struct ext4_group_info *grinfo;
+       unsigned char blocksize_bits = min_t(unsigned char,
+@@ -2411,7 +2417,7 @@ static int ext4_mb_seq_groups_show(struc
+       group--;
+       if (group == 0)
+-              seq_puts(seq, "#group: free  frags first ["
++              seq_puts(seq, "#group: bfree gfree frags first pa    ["
+                             " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
+                             " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
+@@ -2429,13 +2435,19 @@ static int ext4_mb_seq_groups_show(struc
+               buddy_loaded = 1;
+       }
++      gdp = ext4_get_group_desc(sb, group, NULL);
++      if (gdp != NULL)
++              free = ext4_free_group_clusters(sb, gdp);
++
+       memcpy(&sg, ext4_get_group_info(sb, group), i);
+       if (buddy_loaded)
+               ext4_mb_unload_buddy(&e4b);
+-      seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
+-                      sg.info.bb_fragments, sg.info.bb_first_free);
++      seq_printf(seq, "#%-5lu: %-5u %-5u %-5u %-5u %-5lu [",
++                      (long unsigned int)group, sg.info.bb_free, free,
++                      sg.info.bb_fragments, sg.info.bb_first_free,
++                      sg.info.bb_prealloc_nr);
+       for (i = 0; i <= 13; i++)
+               seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
+                               sg.info.bb_counters[i] : 0);
+@@ -3742,22 +3754,71 @@ static void ext4_mb_generate_from_freeli
+ }
+ /*
++ * check free blocks in bitmap match free block in group descriptor
++ * do this before taking preallocated blocks into account to be able
++ * to detect on-disk corruptions. The group lock should be hold by the
++ * caller.
++ */
++int ext4_mb_check_ondisk_bitmap(struct super_block *sb, void *bitmap,
++                              struct ext4_group_desc *gdp, int group)
++{
++      unsigned short max = EXT4_CLUSTERS_PER_GROUP(sb);
++      unsigned short i, first, free = 0;
++      unsigned short free_in_gdp = ext4_free_group_clusters(sb, gdp);
++
++      if (free_in_gdp == 0 && gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
++              return 0;
++
++      i = mb_find_next_zero_bit(bitmap, max, 0);
++
++      while (i < max) {
++              first = i;
++              i = mb_find_next_bit(bitmap, max, i);
++              if (i > max)
++                      i = max;
++              free += i - first;
++              if (i < max)
++                      i = mb_find_next_zero_bit(bitmap, max, i);
++      }
++
++      if (free != free_in_gdp) {
++              ext4_error(sb, "on-disk bitmap for group %d"
++                      "corrupted: %u blocks free in bitmap, %u - in gd\n",
++                      group, free, free_in_gdp);
++              return -EIO;
++      }
++      return 0;
++}
++
++/*
+  * the function goes through all preallocation in this group and marks them
+  * used in in-core bitmap. buddy must be generated from this bitmap
+  * Need to be called with ext4 group lock held
+  */
+ static noinline_for_stack
+-void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+                                       ext4_group_t group)
+ {
+       struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+       struct ext4_prealloc_space *pa;
++      struct ext4_group_desc *gdp;
+       struct list_head *cur;
+       ext4_group_t groupnr;
+       ext4_grpblk_t start;
+       int preallocated = 0;
++      int skip = 0, count = 0;
++      int err;
+       int len;
++      gdp = ext4_get_group_desc(sb, group, NULL);
++      if (gdp == NULL)
++              return -EIO;
++
++      /* before applying preallocations, check bitmap consistency */
++      err = ext4_mb_check_ondisk_bitmap(sb, bitmap, gdp, group);
++      if (err)
++              return err;
++
+       /* all form of preallocation discards first load group,
+        * so the only competing code is preallocation use.
+        * we don't need any locking here
+@@ -3773,13 +3834,23 @@ void ext4_mb_generate_from_pa(struct sup
+                                            &groupnr, &start);
+               len = pa->pa_len;
+               spin_unlock(&pa->pa_lock);
+-              if (unlikely(len == 0))
++              if (unlikely(len == 0)) {
++                      skip++;
+                       continue;
++              }
+               BUG_ON(groupnr != group);
+               ext4_set_bits(bitmap, start, len);
+               preallocated += len;
++              count++;
++      }
++      if (count + skip != grp->bb_prealloc_nr) {
++              ext4_error(sb, "lost preallocations: "
++                         "count %d, bb_prealloc_nr %lu, skip %d\n",
++                         count, grp->bb_prealloc_nr, skip);
++              return -EIO;
+       }
+       mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
++      return 0;
+ }
+ static void ext4_mb_pa_callback(struct rcu_head *head)
+@@ -3843,6 +3914,7 @@ static void ext4_mb_put_pa(struct ext4_a
+        */
+       ext4_lock_group(sb, grp);
+       list_del(&pa->pa_group_list);
++      ext4_get_group_info(sb, grp)->bb_prealloc_nr--;
+       ext4_unlock_group(sb, grp);
+       spin_lock(pa->pa_obj_lock);
+@@ -3934,6 +4006,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
+       pa->pa_inode = ac->ac_inode;
+       list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
++      grp->bb_prealloc_nr++;
+       spin_lock(pa->pa_obj_lock);
+       list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
+@@ -3988,6 +4061,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
+       pa->pa_inode = NULL;
+       list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
++      grp->bb_prealloc_nr++;
+       /*
+        * We will later add the new pa to the right bucket
+@@ -4155,6 +4229,8 @@ repeat:
+               spin_unlock(&pa->pa_lock);
++              BUG_ON(grp->bb_prealloc_nr == 0);
++              grp->bb_prealloc_nr--;
+               list_del(&pa->pa_group_list);
+               list_add(&pa->u.pa_tmp_list, &list);
+       }
+@@ -4291,7 +4367,7 @@ repeat:
+               if (err) {
+                       ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
+                                      err, group);
+-                      continue;
++                      return;
+               }
+               bitmap_bh = ext4_read_block_bitmap(sb, group);
+@@ -4304,6 +4380,8 @@ repeat:
+               }
+               ext4_lock_group(sb, group);
++              BUG_ON(e4b.bd_info->bb_prealloc_nr == 0);
++              e4b.bd_info->bb_prealloc_nr--;
+               list_del(&pa->pa_group_list);
+               ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
+               ext4_unlock_group(sb, group);
+@@ -4598,6 +4676,7 @@ ext4_mb_discard_lg_preallocations(struct
+               }
+               ext4_lock_group(sb, group);
+               list_del(&pa->pa_group_list);
++              ext4_get_group_info(sb, group)->bb_prealloc_nr--;
+               ext4_mb_release_group_pa(&e4b, pa);
+               ext4_unlock_group(sb, group);
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -66,7 +66,7 @@
+ /*
+  * for which requests use 2^N search using buddies
+  */
+-#define MB_DEFAULT_ORDER2_REQS                2
++#define MB_DEFAULT_ORDER2_REQS                8
+ /*
+  * default group prealloc size 512 blocks
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-mballoc-pa-free-mismatch.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-mballoc-pa-free-mismatch.patch
new file mode 100644 (file)
index 0000000..be5ec6f
--- /dev/null
@@ -0,0 +1,111 @@
+---
+ fs/ext4/mballoc.c |   43 +++++++++++++++++++++++++++++++++++++------
+ fs/ext4/mballoc.h |    2 ++
+ 2 files changed, 39 insertions(+), 6 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3992,6 +3992,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
+       INIT_LIST_HEAD(&pa->pa_group_list);
+       pa->pa_deleted = 0;
+       pa->pa_type = MB_INODE_PA;
++      pa->pa_error = 0;
+       mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
+                pa->pa_len, pa->pa_lstart);
+@@ -4046,6 +4047,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
+       INIT_LIST_HEAD(&pa->pa_group_list);
+       pa->pa_deleted = 0;
+       pa->pa_type = MB_GROUP_PA;
++      pa->pa_error = 0;
+       mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
+                pa->pa_len, pa->pa_lstart);
+@@ -4098,7 +4100,9 @@ ext4_mb_release_inode_pa(struct ext4_bud
+       unsigned long long grp_blk_start;
+       int free = 0;
++      assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
+       BUG_ON(pa->pa_deleted == 0);
++      BUG_ON(pa->pa_inode == NULL);
+       ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+       grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
+       BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+@@ -4121,12 +4125,18 @@ ext4_mb_release_inode_pa(struct ext4_bud
+               mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
+               bit = next + 1;
+       }
+-      if (free != pa->pa_free) {
+-              ext4_msg(e4b->bd_sb, KERN_CRIT,
+-                       "pa %p: logic %lu, phys. %lu, len %d",
+-                       pa, (unsigned long) pa->pa_lstart,
+-                       (unsigned long) pa->pa_pstart,
+-                       pa->pa_len);
++
++      /* "free < pa->pa_free" means we maybe double alloc the same blocks,
++       * otherwise maybe leave some free blocks unavailable, no need to BUG.*/
++      if ((free > pa->pa_free && !pa->pa_error) || (free < pa->pa_free)) {
++              ext4_error(sb, "pa free mismatch: [pa %p] "
++                              "[phy %lu] [logic %lu] [len %u] [free %u] "
++                              "[error %u] [inode %d] [freed %u]", pa,
++                              (unsigned long)pa->pa_pstart,
++                              (unsigned long)pa->pa_lstart,
++                              pa->pa_len, (unsigned)pa->pa_free,
++                              (unsigned)pa->pa_error, pa->pa_inode->i_ino,
++                              free);
+               ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
+                                       free, pa->pa_free);
+               /*
+@@ -4134,6 +4144,8 @@ ext4_mb_release_inode_pa(struct ext4_bud
+                * from the bitmap and continue.
+                */
+       }
++      /* do not verify if the file system is being umounted */
++      BUG_ON(atomic_read(&sb->s_active) > 0 && pa->pa_free != free);
+       atomic_add(free, &sbi->s_mb_discarded);
+       return 0;
+@@ -4955,6 +4967,25 @@ errout:
+               ac->ac_b_ex.fe_len = 0;
+               ar->len = 0;
+               ext4_mb_show_ac(ac);
++              if (ac->ac_pa) {
++                      struct ext4_prealloc_space *pa = ac->ac_pa;
++
++                      /* We can not make sure whether the bitmap has
++                       * been updated or not when fail case. So can
++                       * not revert pa_free back, just mark pa_error*/
++                      pa->pa_error++;
++                      ext4_error(sb,
++                              "Updating bitmap error: [err %d] "
++                              "[pa %p] [phy %lu] [logic %lu] "
++                              "[len %u] [free %u] [error %u] "
++                              "[inode %lu]", *errp, pa,
++                              (unsigned long)pa->pa_pstart,
++                              (unsigned long)pa->pa_lstart,
++                              (unsigned)pa->pa_len,
++                              (unsigned)pa->pa_free,
++                              (unsigned)pa->pa_error,
++                              pa->pa_inode ? pa->pa_inode->i_ino : 0);
++              }
+       }
+       ext4_mb_release_context(ac);
+ out:
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -20,6 +20,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/blkdev.h>
+ #include <linux/mutex.h>
++#include <linux/genhd.h>
+ #include "ext4_jbd2.h"
+ #include "ext4.h"
+@@ -107,6 +108,7 @@ struct ext4_prealloc_space {
+       ext4_grpblk_t           pa_len;         /* len of preallocated chunk */
+       ext4_grpblk_t           pa_free;        /* how many blocks are free */
+       unsigned short          pa_type;        /* pa type. inode or group */
++      unsigned short          pa_error;
+       spinlock_t              *pa_obj_lock;
+       struct inode            *pa_inode;      /* hack, for history only */
+ };
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-misc.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-misc.patch
new file mode 100644 (file)
index 0000000..56d0db3
--- /dev/null
@@ -0,0 +1,187 @@
+---
+ fs/ext4/ext4.h   |   23 ++++++++++++++++++++++-
+ fs/ext4/ialloc.c |    3 ++-
+ fs/ext4/inode.c  |   15 +++++++++++++++
+ fs/ext4/namei.c  |    9 ++++++---
+ fs/ext4/super.c  |   10 ++--------
+ 5 files changed, 47 insertions(+), 13 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1759,6 +1759,8 @@ static inline bool ext4_verity_in_progre
+ #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
++#define JOURNAL_START_HAS_3ARGS       1
++
+ /*
+  * Codes for operating systems
+  */
+@@ -1990,7 +1992,21 @@ static inline bool ext4_has_unknown_ext#
+ EXTN_FEATURE_FUNCS(2)
+ EXTN_FEATURE_FUNCS(3)
+-EXTN_FEATURE_FUNCS(4)
++static inline bool ext4_has_unknown_ext4_compat_features(struct super_block *sb)
++{
++      return ((EXT4_SB(sb)->s_es->s_feature_compat &
++              cpu_to_le32(~EXT4_FEATURE_COMPAT_SUPP)) != 0);
++}
++static inline bool ext4_has_unknown_ext4_ro_compat_features(struct super_block *sb)
++{
++      return ((EXT4_SB(sb)->s_es->s_feature_ro_compat &
++              cpu_to_le32(~EXT4_FEATURE_RO_COMPAT_SUPP)) != 0);
++}
++static inline bool ext4_has_unknown_ext4_incompat_features(struct super_block *sb)
++{
++      return ((EXT4_SB(sb)->s_es->s_feature_incompat &
++              cpu_to_le32(~EXT4_FEATURE_INCOMPAT_SUPP)) != 0);
++}
+ static inline bool ext4_has_compat_features(struct super_block *sb)
+ {
+@@ -3393,6 +3409,11 @@ struct ext4_extent;
+ #define EXT_MAX_BLOCKS        0xffffffff
+ extern void ext4_ext_tree_init(handle_t *handle, struct inode *inode);
++extern struct buffer_head *ext4_read_inode_bitmap(struct super_block *sb,
++                                                ext4_group_t block_group);
++extern struct buffer_head *ext4_append(handle_t *handle,
++                                     struct inode *inode,
++                                     ext4_lblk_t *block);
+ extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
+ extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+                              struct ext4_map_blocks *map, int flags);
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -115,7 +115,7 @@ verified:
+  *
+  * Return buffer_head of bitmap on success, or an ERR_PTR on error.
+  */
+-static struct buffer_head *
++struct buffer_head *
+ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
+ {
+       struct ext4_group_desc *desc;
+@@ -213,6 +213,7 @@ out:
+       put_bh(bh);
+       return ERR_PTR(err);
+ }
++EXPORT_SYMBOL(ext4_read_inode_bitmap);
+ /*
+  * NOTE! When we get the inode, we're the only people
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -6065,3 +6065,18 @@ vm_fault_t ext4_filemap_fault(struct vm_
+       return ret;
+ }
++EXPORT_SYMBOL(ext4_map_blocks);
++EXPORT_SYMBOL(ext4_truncate);
++EXPORT_SYMBOL(ext4_iget);
++EXPORT_SYMBOL(ext4_bread);
++EXPORT_SYMBOL(ext4_itable_unused_count);
++EXPORT_SYMBOL(ext4_force_commit);
++EXPORT_SYMBOL(__ext4_mark_inode_dirty);
++EXPORT_SYMBOL(ext4_get_group_desc);
++EXPORT_SYMBOL(__ext4_journal_get_write_access);
++EXPORT_SYMBOL(__ext4_journal_start_sb);
++EXPORT_SYMBOL(__ext4_journal_stop);
++EXPORT_SYMBOL(__ext4_handle_dirty_metadata);
++EXPORT_SYMBOL(__ext4_std_error);
++EXPORT_SYMBOL(ext4fs_dirhash);
++EXPORT_SYMBOL(ext4_get_inode_loc);
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -50,7 +50,7 @@
+ #define NAMEI_RA_BLOCKS  4
+ #define NAMEI_RA_SIZE      (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+-static struct buffer_head *ext4_append(handle_t *handle,
++struct buffer_head *ext4_append(handle_t *handle,
+                                       struct inode *inode,
+                                       ext4_lblk_t *block)
+ {
+@@ -181,6 +181,7 @@ static struct buffer_head *__ext4_read_d
+       }
+       return bh;
+ }
++EXPORT_SYMBOL(ext4_append);
+ #ifndef assert
+ #define assert(test) J_ASSERT(test)
+@@ -2572,23 +2573,25 @@ EXPORT_SYMBOL(ext4_delete_entry);
+  * for checking S_ISDIR(inode) (since the INODE_INDEX feature will not be set
+  * on regular files) and to avoid creating huge/slow non-HTREE directories.
+  */
+-static void ext4_inc_count(handle_t *handle, struct inode *inode)
++void ext4_inc_count(handle_t *handle, struct inode *inode)
+ {
+       inc_nlink(inode);
+       if (is_dx(inode) &&
+           (inode->i_nlink > EXT4_LINK_MAX || inode->i_nlink == 2))
+               set_nlink(inode, 1);
+ }
++EXPORT_SYMBOL(ext4_inc_count);
+ /*
+  * If a directory had nlink == 1, then we should let it be 1. This indicates
+  * directory has >EXT4_LINK_MAX subdirs.
+  */
+-static void ext4_dec_count(handle_t *handle, struct inode *inode)
++void ext4_dec_count(handle_t *handle, struct inode *inode)
+ {
+       if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
+               drop_nlink(inode);
+ }
++EXPORT_SYMBOL(ext4_dec_count);
+ /*
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -347,7 +347,7 @@ static void __save_error_info(struct sup
+               return;
+       es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+       ext4_update_tstamp(es, s_last_error_time);
+-      strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
++      strlcpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
+       es->s_last_error_line = cpu_to_le32(line);
+       es->s_last_error_ino = cpu_to_le32(ino);
+       es->s_last_error_block = cpu_to_le64(block);
+@@ -408,7 +408,7 @@ static void __save_error_info(struct sup
+       if (!es->s_first_error_time) {
+               es->s_first_error_time = es->s_last_error_time;
+               es->s_first_error_time_hi = es->s_last_error_time_hi;
+-              strncpy(es->s_first_error_func, func,
++              strlcpy(es->s_first_error_func, func,
+                       sizeof(es->s_first_error_func));
+               es->s_first_error_line = cpu_to_le32(line);
+               es->s_first_error_ino = es->s_last_error_ino;
+@@ -6315,16 +6315,12 @@ static int __init ext4_init_fs(void)
+       err = init_inodecache();
+       if (err)
+               goto out1;
+-      register_as_ext3();
+-      register_as_ext2();
+       err = register_filesystem(&ext4_fs_type);
+       if (err)
+               goto out;
+       return 0;
+ out:
+-      unregister_as_ext2();
+-      unregister_as_ext3();
+       destroy_inodecache();
+ out1:
+       ext4_exit_mballoc();
+@@ -6347,8 +6343,6 @@ out7:
+ static void __exit ext4_exit_fs(void)
+ {
+       ext4_destroy_lazyinit_thread();
+-      unregister_as_ext2();
+-      unregister_as_ext3();
+       unregister_filesystem(&ext4_fs_type);
+       destroy_inodecache();
+       ext4_exit_mballoc();
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-no-max-dir-size-limit-for-iam-objects.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-no-max-dir-size-limit-for-iam-objects.patch
new file mode 100644 (file)
index 0000000..cd28c6b
--- /dev/null
@@ -0,0 +1,30 @@
+---
+ fs/ext4/ext4.h  |    1 +
+ fs/ext4/namei.c |    6 ++++--
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1716,6 +1716,7 @@ enum {
+       EXT4_STATE_NO_EXPAND,           /* No space for expansion */
+       EXT4_STATE_DA_ALLOC_CLOSE,      /* Alloc DA blks on close */
+       EXT4_STATE_EXT_MIGRATE,         /* Inode is migrating */
++      EXT4_STATE_IAM,                 /* Lustre IAM objects */
+       EXT4_STATE_NEWENTRY,            /* File just added to dir */
+       EXT4_STATE_MAY_INLINE_DATA,     /* may have in-inode data */
+       EXT4_STATE_EXT_PRECACHED,       /* extents have been precached */
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -60,8 +60,10 @@ struct buffer_head *ext4_append(handle_t
+       if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
+                    ((inode->i_size >> 10) >=
+-                    EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
+-              return ERR_PTR(-ENOSPC);
++                    EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) {
++              if (!ext4_test_inode_state(inode, EXT4_STATE_IAM))
++                      return ERR_PTR(-ENOSPC);
++      }
+       /* with parallel dir operations all appends
+       * have to be serialized -bzzz */
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-pdirop.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-pdirop.patch
new file mode 100644 (file)
index 0000000..73e4bcc
--- /dev/null
@@ -0,0 +1,890 @@
+From 1a0f7f0b9c13ef0aa86e125f350b6733bff8db3c Mon Sep 17 00:00:00 2001
+From: Shaun Tancheff <stancheff@cray.com>
+Date: Wed, 15 Jan 2020 07:35:13 -0600
+Subject: [PATCH] Single directory performance is a critical for HPC workloads.
+ In a typical use case an application creates a separate output file for each
+ node and task in a job. As nodes and tasks increase, hundreds of thousands of
+ files may be created in a single directory within a short window of time.
+ Today, both filename lookup and file system modifying operations (such as
+ create and unlink) are protected with a single lock for an entire ldiskfs
+ directory. PDO project will remove this bottleneck by introducing a parallel
+ locking mechanism for entire ldiskfs directories. This work will enable
+ multiple application threads to simultaneously lookup, create and unlink in
+ parallel.
+
+This patch contains:
+ - pdirops support for ldiskfs
+ - integrate with osd-ldiskfs
+---
+ fs/ext4/Makefile |    1 
+ fs/ext4/ext4.h   |   78 +++++++++
+ fs/ext4/namei.c  |  454 ++++++++++++++++++++++++++++++++++++++++++++++++++-----
+ fs/ext4/super.c  |    1 
+ 4 files changed, 494 insertions(+), 40 deletions(-)
+ create mode 100644 fs/ext4/htree_lock.c
+ create mode 100644 include/linux/htree_lock.h
+
+--- a/fs/ext4/Makefile
++++ b/fs/ext4/Makefile
+@@ -7,6 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
+ ext4-y        := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
+               extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
++              htree_lock.o \
+               indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
+               mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
+               super.o symlink.o sysfs.o xattr.o xattr_hurd.o xattr_trusted.o \
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -29,6 +29,7 @@
+ #include <linux/timer.h>
+ #include <linux/version.h>
+ #include <linux/wait.h>
++#include <linux/htree_lock.h>
+ #include <linux/sched/signal.h>
+ #include <linux/blockgroup_lock.h>
+ #include <linux/percpu_counter.h>
+@@ -987,6 +988,9 @@ struct ext4_inode_info {
+       __u32   i_dtime;
+       ext4_fsblk_t    i_file_acl;
++      /* following fields for parallel directory operations -bzzz */
++      struct semaphore i_append_sem;
++
+       /*
+        * i_block_group is the number of the block group which contains
+        * this file's inode.  Constant across the lifetime of the inode,
+@@ -2299,6 +2303,72 @@ struct dx_hash_info
+  */
+ #define HASH_NB_ALWAYS                1
++/* assume name-hash is protected by upper layer */
++#define EXT4_HTREE_LOCK_HASH  0
++
++enum ext4_pdo_lk_types {
++#if EXT4_HTREE_LOCK_HASH
++      EXT4_LK_HASH,
++#endif
++      EXT4_LK_DX,             /* index block */
++      EXT4_LK_DE,             /* directory entry block */
++      EXT4_LK_SPIN,           /* spinlock */
++      EXT4_LK_MAX,
++};
++
++/* read-only bit */
++#define EXT4_LB_RO(b)         (1 << (b))
++/* read + write, high bits for writer */
++#define EXT4_LB_RW(b)         ((1 << (b)) | (1 << (EXT4_LK_MAX + (b))))
++
++enum ext4_pdo_lock_bits {
++      /* DX lock bits */
++      EXT4_LB_DX_RO           = EXT4_LB_RO(EXT4_LK_DX),
++      EXT4_LB_DX              = EXT4_LB_RW(EXT4_LK_DX),
++      /* DE lock bits */
++      EXT4_LB_DE_RO           = EXT4_LB_RO(EXT4_LK_DE),
++      EXT4_LB_DE              = EXT4_LB_RW(EXT4_LK_DE),
++      /* DX spinlock bits */
++      EXT4_LB_SPIN_RO         = EXT4_LB_RO(EXT4_LK_SPIN),
++      EXT4_LB_SPIN            = EXT4_LB_RW(EXT4_LK_SPIN),
++      /* accurate searching */
++      EXT4_LB_EXACT           = EXT4_LB_RO(EXT4_LK_MAX << 1),
++};
++
++enum ext4_pdo_lock_opc {
++      /* external */
++      EXT4_HLOCK_READDIR      = (EXT4_LB_DE_RO | EXT4_LB_DX_RO),
++      EXT4_HLOCK_LOOKUP       = (EXT4_LB_DE_RO | EXT4_LB_SPIN_RO |
++                                 EXT4_LB_EXACT),
++      EXT4_HLOCK_DEL          = (EXT4_LB_DE | EXT4_LB_SPIN_RO |
++                                 EXT4_LB_EXACT),
++      EXT4_HLOCK_ADD          = (EXT4_LB_DE | EXT4_LB_SPIN_RO),
++
++      /* internal */
++      EXT4_HLOCK_LOOKUP_SAFE  = (EXT4_LB_DE_RO | EXT4_LB_DX_RO |
++                                 EXT4_LB_EXACT),
++      EXT4_HLOCK_DEL_SAFE     = (EXT4_LB_DE | EXT4_LB_DX_RO | EXT4_LB_EXACT),
++      EXT4_HLOCK_SPLIT        = (EXT4_LB_DE | EXT4_LB_DX | EXT4_LB_SPIN),
++};
++
++extern struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits);
++#define ext4_htree_lock_head_free(lhead)      htree_lock_head_free(lhead)
++
++extern struct htree_lock *ext4_htree_lock_alloc(void);
++#define ext4_htree_lock_free(lck)             htree_lock_free(lck)
++
++extern void ext4_htree_lock(struct htree_lock *lck,
++                          struct htree_lock_head *lhead,
++                          struct inode *dir, unsigned flags);
++#define ext4_htree_unlock(lck)                  htree_unlock(lck)
++
++extern struct buffer_head *ext4_find_entry_locked(struct inode *dir,
++                                      const struct qstr *d_name,
++                                      struct ext4_dir_entry_2 **res_dir,
++                                      int *inlined, struct htree_lock *lck);
++extern int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
++                    struct inode *inode, struct htree_lock *lck);
++
+ struct ext4_filename {
+       const struct qstr *usr_fname;
+       struct fscrypt_str disk_name;
+@@ -2666,11 +2736,19 @@ void ext4_insert_dentry(struct inode *in
+                       struct ext4_filename *fname, void *data);
+ static inline void ext4_update_dx_flag(struct inode *inode)
+ {
++      /* Disable it for ldiskfs, because going from a DX directory to
++       * a non-DX directory while it is in use will completely break
++       * the htree-locking.
++       * If we really want to support this operation in the future,
++       * we need to exclusively lock the directory at here which will
++       * increase complexity of code */
++#if 0
+       if (!ext4_has_feature_dir_index(inode->i_sb)) {
+               /* ext4_iget() should have caught this... */
+               WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
+               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+       }
++#endif
+ }
+ static const unsigned char ext4_filetype_table[] = {
+       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -55,6 +55,7 @@ struct buffer_head *ext4_append(handle_t
+                                       ext4_lblk_t *block)
+ {
+       struct buffer_head *bh;
++      struct ext4_inode_info *ei = EXT4_I(inode);
+       int err;
+       if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
+@@ -62,15 +63,22 @@ struct buffer_head *ext4_append(handle_t
+                     EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
+               return ERR_PTR(-ENOSPC);
++      /* with parallel dir operations all appends
++      * have to be serialized -bzzz */
++      down(&ei->i_append_sem);
++
+       *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+       bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
+-      if (IS_ERR(bh))
++      if (IS_ERR(bh)) {
++              up(&ei->i_append_sem);
+               return bh;
++      }
+       inode->i_size += inode->i_sb->s_blocksize;
+       EXT4_I(inode)->i_disksize = inode->i_size;
+       BUFFER_TRACE(bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, bh);
++      up(&ei->i_append_sem);
+       if (err) {
+               brelse(bh);
+               ext4_std_error(inode->i_sb, err);
+@@ -271,7 +279,8 @@ static unsigned dx_node_limit(struct ino
+ static struct dx_frame *dx_probe(struct ext4_filename *fname,
+                                struct inode *dir,
+                                struct dx_hash_info *hinfo,
+-                               struct dx_frame *frame);
++                               struct dx_frame *frame,
++                               struct htree_lock *lck);
+ static void dx_release(struct dx_frame *frames);
+ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
+                      unsigned blocksize, struct dx_hash_info *hinfo,
+@@ -285,12 +294,13 @@ static void dx_insert_block(struct dx_fr
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+                                struct dx_frame *frame,
+                                struct dx_frame *frames,
+-                               __u32 *start_hash);
++                               __u32 *start_hash, struct htree_lock *lck);
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+               struct ext4_filename *fname,
+-              struct ext4_dir_entry_2 **res_dir);
++              struct ext4_dir_entry_2 **res_dir, struct htree_lock *lck);
+ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+-                           struct inode *dir, struct inode *inode);
++                           struct inode *dir, struct inode *inode,
++                           struct htree_lock *lck);
+ /* checksumming functions */
+ void ext4_initialize_dirent_tail(struct buffer_head *bh,
+@@ -755,6 +765,227 @@ struct stats dx_show_entries(struct dx_h
+ }
+ #endif /* DX_DEBUG */
++/* private data for htree_lock */
++struct ext4_dir_lock_data {
++      unsigned                ld_flags;  /* bits-map for lock types */
++      unsigned                ld_count;  /* # entries of the last DX block */
++      struct dx_entry         ld_at_entry; /* copy of leaf dx_entry */
++      struct dx_entry         *ld_at;    /* position of leaf dx_entry */
++};
++
++#define ext4_htree_lock_data(l)       ((struct ext4_dir_lock_data *)(l)->lk_private)
++#define ext4_find_entry(dir, name, dirent, inline) \
++                      ext4_find_entry_locked(dir, name, dirent, inline, NULL)
++#define ext4_add_entry(handle, dentry, inode) \
++                      ext4_add_entry_locked(handle, dentry, inode, NULL)
++
++/* NB: ext4_lblk_t is 32 bits so we use high bits to identify invalid blk */
++#define EXT4_HTREE_NODE_CHANGED       (0xcafeULL << 32)
++
++static void ext4_htree_event_cb(void *target, void *event)
++{
++      u64 *block = (u64 *)target;
++
++      if (*block == dx_get_block((struct dx_entry *)event))
++              *block = EXT4_HTREE_NODE_CHANGED;
++}
++
++struct htree_lock_head *ext4_htree_lock_head_alloc(unsigned hbits)
++{
++      struct htree_lock_head *lhead;
++
++      lhead = htree_lock_head_alloc(EXT4_LK_MAX, hbits, 0);
++      if (lhead != NULL) {
++              htree_lock_event_attach(lhead, EXT4_LK_SPIN, HTREE_EVENT_WR,
++                                      ext4_htree_event_cb);
++      }
++      return lhead;
++}
++EXPORT_SYMBOL(ext4_htree_lock_head_alloc);
++
++struct htree_lock *ext4_htree_lock_alloc(void)
++{
++      return htree_lock_alloc(EXT4_LK_MAX,
++                              sizeof(struct ext4_dir_lock_data));
++}
++EXPORT_SYMBOL(ext4_htree_lock_alloc);
++
++static htree_lock_mode_t ext4_htree_mode(unsigned flags)
++{
++      switch (flags) {
++      default: /* 0 or unknown flags require EX lock */
++              return HTREE_LOCK_EX;
++      case EXT4_HLOCK_READDIR:
++              return HTREE_LOCK_PR;
++      case EXT4_HLOCK_LOOKUP:
++              return HTREE_LOCK_CR;
++      case EXT4_HLOCK_DEL:
++      case EXT4_HLOCK_ADD:
++              return HTREE_LOCK_CW;
++      }
++}
++
++/* return PR for read-only operations, otherwise return EX */
++static inline htree_lock_mode_t ext4_htree_safe_mode(unsigned flags)
++{
++      int writer = (flags & EXT4_LB_DE) == EXT4_LB_DE;
++
++      /* 0 requires EX lock */
++      return (flags == 0 || writer) ? HTREE_LOCK_EX : HTREE_LOCK_PR;
++}
++
++static int ext4_htree_safe_locked(struct htree_lock *lck)
++{
++      int writer;
++
++      if (lck == NULL || lck->lk_mode == HTREE_LOCK_EX)
++              return 1;
++
++      writer = (ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_DE) ==
++               EXT4_LB_DE;
++      if (writer) /* all readers & writers are excluded? */
++              return lck->lk_mode == HTREE_LOCK_EX;
++
++      /* all writers are excluded? */
++      return lck->lk_mode == HTREE_LOCK_PR ||
++             lck->lk_mode == HTREE_LOCK_PW ||
++             lck->lk_mode == HTREE_LOCK_EX;
++}
++
++/* relock htree_lock with EX mode if it's change operation, otherwise
++ * relock it with PR mode. It's noop if PDO is disabled. */
++static void ext4_htree_safe_relock(struct htree_lock *lck)
++{
++      if (!ext4_htree_safe_locked(lck)) {
++              unsigned flags = ext4_htree_lock_data(lck)->ld_flags;
++
++              htree_change_lock(lck, ext4_htree_safe_mode(flags));
++      }
++}
++
++void ext4_htree_lock(struct htree_lock *lck, struct htree_lock_head *lhead,
++                   struct inode *dir, unsigned flags)
++{
++      htree_lock_mode_t mode = is_dx(dir) ? ext4_htree_mode(flags) :
++                                            ext4_htree_safe_mode(flags);
++
++      ext4_htree_lock_data(lck)->ld_flags = flags;
++      htree_lock(lck, lhead, mode);
++      if (!is_dx(dir))
++              ext4_htree_safe_relock(lck); /* make sure it's safe locked */
++}
++EXPORT_SYMBOL(ext4_htree_lock);
++
++static int ext4_htree_node_lock(struct htree_lock *lck, struct dx_entry *at,
++                              unsigned lmask, int wait, void *ev)
++{
++      u32     key = (at == NULL) ? 0 : dx_get_block(at);
++      u32     mode;
++
++      /* NOOP if htree is well protected or caller doesn't require the lock */
++      if (ext4_htree_safe_locked(lck) ||
++         !(ext4_htree_lock_data(lck)->ld_flags & lmask))
++              return 1;
++
++      mode = (ext4_htree_lock_data(lck)->ld_flags & lmask) == lmask ?
++              HTREE_LOCK_PW : HTREE_LOCK_PR;
++      while (1) {
++              if (htree_node_lock_try(lck, mode, key, ffz(~lmask), wait, ev))
++                      return 1;
++              if (!(lmask & EXT4_LB_SPIN)) /* not a spinlock */
++                      return 0;
++              cpu_relax(); /* spin until granted */
++      }
++}
++
++static int ext4_htree_node_locked(struct htree_lock *lck, unsigned lmask)
++{
++      return ext4_htree_safe_locked(lck) ||
++             htree_node_is_granted(lck, ffz(~lmask));
++}
++
++static void ext4_htree_node_unlock(struct htree_lock *lck,
++                                 unsigned lmask, void *buf)
++{
++      /* NB: it's safe to call mutiple times or even it's not locked */
++      if (!ext4_htree_safe_locked(lck) &&
++           htree_node_is_granted(lck, ffz(~lmask)))
++              htree_node_unlock(lck, ffz(~lmask), buf);
++}
++
++#define ext4_htree_dx_lock(lck, key)          \
++      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 1, NULL)
++#define ext4_htree_dx_lock_try(lck, key)      \
++      ext4_htree_node_lock(lck, key, EXT4_LB_DX, 0, NULL)
++#define ext4_htree_dx_unlock(lck)             \
++      ext4_htree_node_unlock(lck, EXT4_LB_DX, NULL)
++#define ext4_htree_dx_locked(lck)             \
++      ext4_htree_node_locked(lck, EXT4_LB_DX)
++
++static void ext4_htree_dx_need_lock(struct htree_lock *lck)
++{
++      struct ext4_dir_lock_data *ld;
++
++      if (ext4_htree_safe_locked(lck))
++              return;
++
++      ld = ext4_htree_lock_data(lck);
++      switch (ld->ld_flags) {
++      default:
++              return;
++      case EXT4_HLOCK_LOOKUP:
++              ld->ld_flags = EXT4_HLOCK_LOOKUP_SAFE;
++              return;
++      case EXT4_HLOCK_DEL:
++              ld->ld_flags = EXT4_HLOCK_DEL_SAFE;
++              return;
++      case EXT4_HLOCK_ADD:
++              ld->ld_flags = EXT4_HLOCK_SPLIT;
++              return;
++      }
++}
++
++#define ext4_htree_de_lock(lck, key)          \
++      ext4_htree_node_lock(lck, key, EXT4_LB_DE, 1, NULL)
++#define ext4_htree_de_unlock(lck)             \
++      ext4_htree_node_unlock(lck, EXT4_LB_DE, NULL)
++
++#define ext4_htree_spin_lock(lck, key, event) \
++      ext4_htree_node_lock(lck, key, EXT4_LB_SPIN, 0, event)
++#define ext4_htree_spin_unlock(lck)           \
++      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, NULL)
++#define ext4_htree_spin_unlock_listen(lck, p) \
++      ext4_htree_node_unlock(lck, EXT4_LB_SPIN, p)
++
++static void ext4_htree_spin_stop_listen(struct htree_lock *lck)
++{
++      if (!ext4_htree_safe_locked(lck) &&
++          htree_node_is_listening(lck, ffz(~EXT4_LB_SPIN)))
++              htree_node_stop_listen(lck, ffz(~EXT4_LB_SPIN));
++}
++
++enum {
++      DX_HASH_COL_IGNORE,     /* ignore collision while probing frames */
++      DX_HASH_COL_YES,        /* there is collision and it does matter */
++      DX_HASH_COL_NO,         /* there is no collision */
++};
++
++static int dx_probe_hash_collision(struct htree_lock *lck,
++                                 struct dx_entry *entries,
++                                 struct dx_entry *at, u32 hash)
++{
++      if (!(lck && ext4_htree_lock_data(lck)->ld_flags & EXT4_LB_EXACT)) {
++              return DX_HASH_COL_IGNORE; /* don't care about collision */
++
++      } else if (at == entries + dx_get_count(entries) - 1) {
++              return DX_HASH_COL_IGNORE; /* not in any leaf of this DX */
++
++      } else { /* hash collision? */
++              return ((dx_get_hash(at + 1) & ~1) == hash) ?
++                      DX_HASH_COL_YES : DX_HASH_COL_NO;
++      }
++}
++
+ /*
+  * Probe for a directory leaf block to search.
+  *
+@@ -766,10 +997,11 @@ struct stats dx_show_entries(struct dx_h
+  */
+ static struct dx_frame *
+ dx_probe(struct ext4_filename *fname, struct inode *dir,
+-       struct dx_hash_info *hinfo, struct dx_frame *frame_in)
++       struct dx_hash_info *hinfo, struct dx_frame *frame_in,
++       struct htree_lock *lck)
+ {
+       unsigned count, indirect;
+-      struct dx_entry *at, *entries, *p, *q, *m;
++      struct dx_entry *at, *entries, *p, *q, *m, *dx = NULL;
+       struct dx_root_info *info;
+       struct dx_frame *frame = frame_in;
+       struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
+@@ -831,8 +1063,15 @@ dx_probe(struct ext4_filename *fname, st
+       dxtrace(printk("Look up %x", hash));
+       while (1) {
++              if (indirect == 0) { /* the last index level */
++                      /* NB: ext4_htree_dx_lock() could be noop if
++                       * DX-lock flag is not set for current operation */
++                      ext4_htree_dx_lock(lck, dx);
++                      ext4_htree_spin_lock(lck, dx, NULL);
++              }
+               count = dx_get_count(entries);
+-              if (!count || count > dx_get_limit(entries)) {
++              if (count == 0 || count > dx_get_limit(entries)) {
++                      ext4_htree_spin_unlock(lck); /* release spin */
+                       ext4_warning_inode(dir,
+                                          "dx entry: count %u beyond limit %u",
+                                          count, dx_get_limit(entries));
+@@ -871,8 +1110,70 @@ dx_probe(struct ext4_filename *fname, st
+                              dx_get_block(at)));
+               frame->entries = entries;
+               frame->at = at;
+-              if (!indirect--)
++
++              if (indirect == 0) { /* the last index level */
++                      struct ext4_dir_lock_data *ld;
++                      u64 myblock;
++
++                      /* By default we only lock DE-block, however, we will
++                       * also lock the last level DX-block if:
++                       * a) there is hash collision
++                       *    we will set DX-lock flag (a few lines below)
++                       *    and redo to lock DX-block
++                       *    see detail in dx_probe_hash_collision()
++                       * b) it's a retry from splitting
++                       *    we need to lock the last level DX-block so nobody
++                       *    else can split any leaf blocks under the same
++                       *    DX-block, see detail in ext4_dx_add_entry()
++                       */
++                      if (ext4_htree_dx_locked(lck)) {
++                              /* DX-block is locked, just lock DE-block
++                               * and return */
++                              ext4_htree_spin_unlock(lck);
++                              if (!ext4_htree_safe_locked(lck))
++                                      ext4_htree_de_lock(lck, frame->at);
++                              return frame;
++                      }
++                      /* it's pdirop and no DX lock */
++                      if (dx_probe_hash_collision(lck, entries, at, hash) ==
++                          DX_HASH_COL_YES) {
++                              /* found hash collision, set DX-lock flag
++                               * and retry to abtain DX-lock */
++                              ext4_htree_spin_unlock(lck);
++                              ext4_htree_dx_need_lock(lck);
++                              continue;
++                      }
++                      ld = ext4_htree_lock_data(lck);
++                      /* because I don't lock DX, so @at can't be trusted
++                       * after I release spinlock so I have to save it */
++                      ld->ld_at = at;
++                      ld->ld_at_entry = *at;
++                      ld->ld_count = dx_get_count(entries);
++
++                      frame->at = &ld->ld_at_entry;
++                      myblock = dx_get_block(at);
++
++                      /* NB: ordering locking */
++                      ext4_htree_spin_unlock_listen(lck, &myblock);
++                      /* other thread can split this DE-block because:
++                       * a) I don't have lock for the DE-block yet
++                       * b) I released spinlock on DX-block
++                       * if it happened I can detect it by listening
++                       * splitting event on this DE-block */
++                      ext4_htree_de_lock(lck, frame->at);
++                      ext4_htree_spin_stop_listen(lck);
++
++                      if (myblock == EXT4_HTREE_NODE_CHANGED) {
++                              /* someone split this DE-block before
++                               * I locked it, I need to retry and lock
++                               * valid DE-block */
++                              ext4_htree_de_unlock(lck);
++                              continue;
++                      }
+                       return frame;
++              }
++              dx = at;
++              indirect--;
+               frame++;
+               frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
+               if (IS_ERR(frame->bh)) {
+@@ -941,7 +1242,7 @@ static void dx_release(struct dx_frame *
+ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+                                struct dx_frame *frame,
+                                struct dx_frame *frames,
+-                               __u32 *start_hash)
++                               __u32 *start_hash, struct htree_lock *lck)
+ {
+       struct dx_frame *p;
+       struct buffer_head *bh;
+@@ -956,12 +1257,22 @@ static int ext4_htree_next_block(struct
+        * this loop, num_frames indicates the number of interior
+        * nodes need to be read.
+        */
++      ext4_htree_de_unlock(lck);
+       while (1) {
+-              if (++(p->at) < p->entries + dx_get_count(p->entries))
+-                      break;
++              if (num_frames > 0 || ext4_htree_dx_locked(lck)) {
++                      /* num_frames > 0 :
++                       *   DX block
++                       * ext4_htree_dx_locked:
++                       *   frame->at is reliable pointer returned by dx_probe,
++                       *   otherwise dx_probe already knew no collision */
++                      if (++(p->at) < p->entries + dx_get_count(p->entries))
++                              break;
++              }
+               if (p == frames)
+                       return 0;
+               num_frames++;
++              if (num_frames == 1)
++                      ext4_htree_dx_unlock(lck);
+               p--;
+       }
+@@ -984,6 +1295,13 @@ static int ext4_htree_next_block(struct
+        * block so no check is necessary
+        */
+       while (num_frames--) {
++              if (num_frames == 0) {
++                      /* it's not always necessary, we just don't want to
++                       * detect hash collision again */
++                      ext4_htree_dx_need_lock(lck);
++                      ext4_htree_dx_lock(lck, p->at);
++              }
++
+               bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
+               if (IS_ERR(bh))
+                       return PTR_ERR(bh);
+@@ -992,6 +1310,7 @@ static int ext4_htree_next_block(struct
+               p->bh = bh;
+               p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
+       }
++      ext4_htree_de_lock(lck, p->at);
+       return 1;
+ }
+@@ -1136,10 +1455,10 @@ int ext4_htree_fill_tree(struct file *di
+       }
+       hinfo.hash = start_hash;
+       hinfo.minor_hash = 0;
+-      frame = dx_probe(NULL, dir, &hinfo, frames);
++      /* assume it's PR locked */
++      frame = dx_probe(NULL, dir, &hinfo, frames, NULL);
+       if (IS_ERR(frame))
+               return PTR_ERR(frame);
+-
+       /* Add '.' and '..' from the htree header */
+       if (!start_hash && !start_minor_hash) {
+               de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
+@@ -1179,7 +1498,7 @@ int ext4_htree_fill_tree(struct file *di
+               count += ret;
+               hashval = ~0;
+               ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
+-                                          frame, frames, &hashval);
++                                          frame, frames, &hashval, NULL);
+               *next_hash = hashval;
+               if (ret < 0) {
+                       err = ret;
+@@ -1455,7 +1774,7 @@ static int is_dx_internal_node(struct in
+ static struct buffer_head *__ext4_find_entry(struct inode *dir,
+                                            struct ext4_filename *fname,
+                                            struct ext4_dir_entry_2 **res_dir,
+-                                           int *inlined)
++                                           int *inlined, struct htree_lock *lck)
+ {
+       struct super_block *sb;
+       struct buffer_head *bh_use[NAMEI_RA_SIZE];
+@@ -1497,7 +1816,7 @@ static struct buffer_head *__ext4_find_e
+               goto restart;
+       }
+       if (is_dx(dir)) {
+-              ret = ext4_dx_find_entry(dir, fname, res_dir);
++              ret = ext4_dx_find_entry(dir, fname, res_dir, lck);
+               /*
+                * On success, or if the error was file not found,
+                * return.  Otherwise, fall back to doing a search the
+@@ -1507,6 +1826,7 @@ static struct buffer_head *__ext4_find_e
+                       goto cleanup_and_exit;
+               dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
+                              "falling back\n"));
++              ext4_htree_safe_relock(lck);
+               ret = NULL;
+       }
+       nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+@@ -1597,10 +1917,10 @@ cleanup_and_exit:
+       return ret;
+ }
+-static struct buffer_head *ext4_find_entry(struct inode *dir,
++struct buffer_head *ext4_find_entry_locked(struct inode *dir,
+                                          const struct qstr *d_name,
+                                          struct ext4_dir_entry_2 **res_dir,
+-                                         int *inlined)
++                                         int *inlined, struct htree_lock *lck)
+ {
+       int err;
+       struct ext4_filename fname;
+@@ -1612,12 +1932,14 @@ static struct buffer_head *ext4_find_ent
+       if (err)
+               return ERR_PTR(err);
+-      bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
++      bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lck);
+       ext4_fname_free_filename(&fname);
+       return bh;
+ }
++EXPORT_SYMBOL(ext4_find_entry_locked);
++
+ static struct buffer_head *ext4_lookup_entry(struct inode *dir,
+                                            struct dentry *dentry,
+                                            struct ext4_dir_entry_2 **res_dir)
+@@ -1632,7 +1954,7 @@ static struct buffer_head *ext4_lookup_e
+       if (err)
+               return ERR_PTR(err);
+-      bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
++      bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
+       ext4_fname_free_filename(&fname);
+       return bh;
+@@ -1640,7 +1962,8 @@ static struct buffer_head *ext4_lookup_e
+ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+                       struct ext4_filename *fname,
+-                      struct ext4_dir_entry_2 **res_dir)
++                      struct ext4_dir_entry_2 **res_dir,
++                      struct htree_lock *lck)
+ {
+       struct super_block * sb = dir->i_sb;
+       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+@@ -1651,7 +1974,7 @@ static struct buffer_head * ext4_dx_find
+ #ifdef CONFIG_FS_ENCRYPTION
+       *res_dir = NULL;
+ #endif
+-      frame = dx_probe(fname, dir, NULL, frames);
++      frame = dx_probe(fname, dir, NULL, frames, lck);
+       if (IS_ERR(frame))
+               return (struct buffer_head *) frame;
+       do {
+@@ -1673,7 +1996,7 @@ static struct buffer_head * ext4_dx_find
+               /* Check to see if we should continue to search */
+               retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
+-                                             frames, NULL);
++                                             frames, NULL, lck);
+               if (retval < 0) {
+                       ext4_warning_inode(dir,
+                               "error %d reading directory index block",
+@@ -1853,8 +2176,9 @@ static struct ext4_dir_entry_2* dx_pack_
+  * Returns pointer to de in block into which the new entry will be inserted.
+  */
+ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+-                      struct buffer_head **bh,struct dx_frame *frame,
+-                      struct dx_hash_info *hinfo)
++                      struct buffer_head **bh, struct dx_frame *frames,
++                      struct dx_frame *frame, struct dx_hash_info *hinfo,
++                      struct htree_lock *lck)
+ {
+       unsigned blocksize = dir->i_sb->s_blocksize;
+       unsigned count, continued;
+@@ -1915,8 +2239,14 @@ static struct ext4_dir_entry_2 *do_split
+                                       hash2, split, count-split));
+       /* Fancy dance to stay within two buffers */
+-      de2 = dx_move_dirents(data1, data2, map + split, count - split,
+-                            blocksize);
++      if (hinfo->hash < hash2) {
++              de2 = dx_move_dirents(data1, data2, map + split,
++                                    count - split, blocksize);
++      } else {
++              /* make sure we will add entry to the same block which
++               * we have already locked */
++              de2 = dx_move_dirents(data1, data2, map, split, blocksize);
++      }
+       de = dx_pack_dirents(data1, blocksize);
+       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+                                          (char *) de,
+@@ -1934,12 +2264,21 @@ static struct ext4_dir_entry_2 *do_split
+       dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
+                       blocksize, 1));
+-      /* Which block gets the new entry? */
+-      if (hinfo->hash >= hash2) {
+-              swap(*bh, bh2);
+-              de = de2;
++      ext4_htree_spin_lock(lck, frame > frames ? (frame - 1)->at : NULL,
++                           frame->at); /* notify block is being split */
++      if (hinfo->hash < hash2) {
++              dx_insert_block(frame, hash2 + continued, newblock);
++
++      } else {
++              /* switch block number */
++              dx_insert_block(frame, hash2 + continued,
++                              dx_get_block(frame->at));
++              dx_set_block(frame->at, newblock);
++              (frame->at)++;
+       }
+-      dx_insert_block(frame, hash2 + continued, newblock);
++      ext4_htree_spin_unlock(lck);
++      ext4_htree_dx_unlock(lck);
++
+       err = ext4_handle_dirty_dirblock(handle, dir, bh2);
+       if (err)
+               goto journal_error;
+@@ -2209,7 +2548,7 @@ static int make_indexed_dir(handle_t *ha
+       if (retval)
+               goto out_frames;        
+-      de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
++      de = do_split(handle, dir, &bh2, frames, frame, &fname->hinfo, NULL);
+       if (IS_ERR(de)) {
+               retval = PTR_ERR(de);
+               goto out_frames;
+@@ -2319,8 +2658,8 @@ out:
+  * may not sleep between calling this and putting something into
+  * the entry, as someone else might have used it while you slept.
+  */
+-static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+-                        struct inode *inode)
++int ext4_add_entry_locked(handle_t *handle, struct dentry *dentry,
++                        struct inode *inode, struct htree_lock *lck)
+ {
+       struct inode *dir = d_inode(dentry->d_parent);
+       struct buffer_head *bh = NULL;
+@@ -2370,9 +2709,10 @@ static int ext4_add_entry(handle_t *hand
+               if (dentry->d_name.len == 2 &&
+                    memcmp(dentry->d_name.name, "..", 2) == 0)
+                        return ext4_update_dotdot(handle, dentry, inode);
+-              retval = ext4_dx_add_entry(handle, &fname, dir, inode);
++              retval = ext4_dx_add_entry(handle, &fname, dir, inode, lck);
+               if (!retval || (retval != ERR_BAD_DX_DIR))
+                       goto out;
++              ext4_htree_safe_relock(lck);
+               /* Can we just ignore htree data? */
+               if (ext4_has_metadata_csum(sb)) {
+                       EXT4_ERROR_INODE(dir,
+@@ -2435,12 +2775,14 @@ out:
+               ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+       return retval;
+ }
++EXPORT_SYMBOL(ext4_add_entry_locked);
+ /*
+  * Returns 0 for success, or a negative error value
+  */
+ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+-                           struct inode *dir, struct inode *inode)
++                           struct inode *dir, struct inode *inode,
++                           struct htree_lock *lck)
+ {
+       struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
+       struct dx_entry *entries, *at;
+@@ -2452,7 +2794,7 @@ static int ext4_dx_add_entry(handle_t *h
+ again:
+       restart = 0;
+-      frame = dx_probe(fname, dir, NULL, frames);
++      frame = dx_probe(fname, dir, NULL, frames, lck);
+       if (IS_ERR(frame))
+               return PTR_ERR(frame);
+       entries = frame->entries;
+@@ -2487,6 +2829,12 @@ again:
+               struct dx_node *node2;
+               struct buffer_head *bh2;
++              if (!ext4_htree_safe_locked(lck)) { /* retry with EX lock */
++                      ext4_htree_safe_relock(lck);
++                      restart = 1;
++                      goto cleanup;
++              }
++
+               while (frame > frames) {
+                       if (dx_get_count((frame - 1)->entries) <
+                           dx_get_limit((frame - 1)->entries)) {
+@@ -2589,8 +2937,32 @@ again:
+                       restart = 1;
+                       goto journal_error;
+               }
++      } else if (!ext4_htree_dx_locked(lck)) {
++              struct ext4_dir_lock_data *ld = ext4_htree_lock_data(lck);
++
++              /* not well protected, require DX lock */
++              ext4_htree_dx_need_lock(lck);
++              at = frame > frames ? (frame - 1)->at : NULL;
++
++              /* NB: no risk of deadlock because it's just a try.
++               *
++               * NB: we check ld_count for twice, the first time before
++               * having DX lock, the second time after holding DX lock.
++               *
++               * NB: We never free blocks for directory so far, which
++               * means value returned by dx_get_count() should equal to
++               * ld->ld_count if nobody split any DE-block under @at,
++               * and ld->ld_at still points to valid dx_entry. */
++              if ((ld->ld_count != dx_get_count(entries)) ||
++                  !ext4_htree_dx_lock_try(lck, at) ||
++                  (ld->ld_count != dx_get_count(entries))) {
++                      restart = 1;
++                      goto cleanup;
++              }
++              /* OK, I've got DX lock and nothing changed */
++              frame->at = ld->ld_at;
+       }
+-      de = do_split(handle, dir, &bh, frame, &fname->hinfo);
++      de = do_split(handle, dir, &bh, frames, frame, &fname->hinfo, lck);
+       if (IS_ERR(de)) {
+               err = PTR_ERR(de);
+               goto cleanup;
+@@ -2601,6 +2973,8 @@ again:
+ journal_error:
+       ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
+ cleanup:
++      ext4_htree_dx_unlock(lck);
++      ext4_htree_de_unlock(lck);
+       brelse(bh);
+       dx_release(frames);
+       /* @restart is true means htree-path has been changed, we need to
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1122,6 +1122,7 @@ static struct inode *ext4_alloc_inode(st
+       inode_set_iversion(&ei->vfs_inode, 1);
+       spin_lock_init(&ei->i_raw_lock);
++      sema_init(&ei->i_append_sem, 1);
+       INIT_LIST_HEAD(&ei->i_prealloc_list);
+       spin_lock_init(&ei->i_prealloc_lock);
+       ext4_es_init_tree(&ei->i_es_tree);
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-prealloc.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-prealloc.patch
new file mode 100644 (file)
index 0000000..6aa6261
--- /dev/null
@@ -0,0 +1,385 @@
+---
+ fs/ext4/ext4.h    |    7 +
+ fs/ext4/inode.c   |    3 
+ fs/ext4/mballoc.c |  215 +++++++++++++++++++++++++++++++++++++++++-------------
+ fs/ext4/sysfs.c   |    8 +-
+ 4 files changed, 180 insertions(+), 53 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1241,6 +1241,8 @@ extern void ext4_set_bits(void *bm, int
+ /* Metadata checksum algorithm codes */
+ #define EXT4_CRC32C_CHKSUM            1
++#define EXT4_MAX_PREALLOC_TABLE       64
++
+ /*
+  * Structure of the super block
+  */
+@@ -1497,11 +1499,13 @@ struct ext4_sb_info {
+       /* tunables */
+       unsigned long s_stripe;
+-      unsigned int s_mb_stream_request;
++      unsigned long s_mb_small_req;
++      unsigned long s_mb_large_req;
+       unsigned int s_mb_max_to_scan;
+       unsigned int s_mb_min_to_scan;
+       unsigned int s_mb_stats;
+       unsigned int s_mb_order2_reqs;
++      unsigned long *s_mb_prealloc_table;
+       unsigned int s_mb_group_prealloc;
+       unsigned int s_max_dir_size_kb;
+       /* where last allocation was done - for stream allocation */
+@@ -2645,6 +2649,7 @@ extern int ext4_init_inode_table(struct
+ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
+ /* mballoc.c */
++extern const struct proc_ops ext4_seq_prealloc_table_fops;
+ extern const struct seq_operations ext4_mb_seq_groups_ops;
+ extern long ext4_mb_stats;
+ extern long ext4_mb_max_to_scan;
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2698,6 +2698,9 @@ static int ext4_writepages(struct addres
+                                               PAGE_SIZE >> inode->i_blkbits);
+       }
++      if (wbc->nr_to_write < sbi->s_mb_small_req)
++              wbc->nr_to_write = sbi->s_mb_small_req;
++
+       if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+               range_whole = 1;
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2455,6 +2455,99 @@ const struct seq_operations ext4_mb_seq_
+       .show   = ext4_mb_seq_groups_show,
+ };
++static int ext4_mb_check_and_update_prealloc(struct ext4_sb_info *sbi,
++                                               char *str, size_t cnt,
++                                               int update)
++{
++      unsigned long value;
++      unsigned long prev = 0;
++      char *cur;
++      char *next;
++      char *end;
++      int num = 0;
++
++      cur = str;
++      end = str + cnt;
++      while (cur < end) {
++              while ((cur < end) && (*cur == ' ')) cur++;
++              value = simple_strtol(cur, &next, 0);
++              if (value == 0)
++                      break;
++              if (cur == next)
++                      return -EINVAL;
++
++              cur = next;
++
++              if (value > (sbi->s_blocks_per_group - 1 - 1 - sbi->s_itb_per_group))
++                      return -EINVAL;
++
++              /* they should add values in order */
++              if (value <= prev)
++                      return -EINVAL;
++
++              if (update)
++                      sbi->s_mb_prealloc_table[num] = value;
++
++              prev = value;
++              num++;
++      }
++
++      if (num > EXT4_MAX_PREALLOC_TABLE - 1)
++              return -EOVERFLOW;
++
++      if (update)
++              sbi->s_mb_prealloc_table[num] = 0;
++
++      return 0;
++}
++
++static ssize_t ext4_mb_prealloc_table_proc_write(struct file *file,
++                                           const char __user *buf,
++                                           size_t cnt, loff_t *pos)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(PDE_DATA(file_inode(file)));
++      char str[128];
++      int rc;
++
++      if (cnt >= sizeof(str))
++              return -EINVAL;
++      if (copy_from_user(str, buf, cnt))
++              return -EFAULT;
++
++      rc = ext4_mb_check_and_update_prealloc(sbi, str, cnt, 0);
++      if (rc)
++              return rc;
++
++      rc = ext4_mb_check_and_update_prealloc(sbi, str, cnt, 1);
++      return rc ? rc : cnt;
++}
++
++static int mb_prealloc_table_seq_show(struct seq_file *m, void *v)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(m->private);
++      int i;
++
++      for (i = 0; i < EXT4_MAX_PREALLOC_TABLE &&
++                      sbi->s_mb_prealloc_table[i] != 0; i++)
++              seq_printf(m, "%ld ", sbi->s_mb_prealloc_table[i]);
++      seq_printf(m, "\n");
++
++      return 0;
++}
++
++static int mb_prealloc_table_seq_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, mb_prealloc_table_seq_show, PDE_DATA(inode));
++}
++
++const struct proc_ops ext4_seq_prealloc_table_fops = {
++      .proc_open      = mb_prealloc_table_seq_open,
++      .proc_read      = seq_read,
++      .proc_lseek     = seq_lseek,
++      .proc_release   = single_release,
++      .proc_write     = ext4_mb_prealloc_table_proc_write,
++};
++
+ static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
+ {
+       int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
+@@ -2685,7 +2778,7 @@ static int ext4_groupinfo_create_slab(si
+ int ext4_mb_init(struct super_block *sb)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      unsigned i, j;
++      unsigned i, j, k, l;
+       unsigned offset, offset_incr;
+       unsigned max;
+       int ret;
+@@ -2734,7 +2827,6 @@ int ext4_mb_init(struct super_block *sb)
+       sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
+       sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
+       sbi->s_mb_stats = MB_DEFAULT_STATS;
+-      sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
+       sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
+       /*
+        * The default group preallocation is 512, which for 4k block
+@@ -2758,9 +2850,29 @@ int ext4_mb_init(struct super_block *sb)
+        * RAID stripe size so that preallocations don't fragment
+        * the stripes.
+        */
+-      if (sbi->s_stripe > 1) {
+-              sbi->s_mb_group_prealloc = roundup(
+-                      sbi->s_mb_group_prealloc, sbi->s_stripe);
++
++      /* Allocate table once */
++      sbi->s_mb_prealloc_table = kzalloc(
++              EXT4_MAX_PREALLOC_TABLE * sizeof(unsigned long), GFP_NOFS);
++      if (sbi->s_mb_prealloc_table == NULL) {
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      if (sbi->s_stripe == 0) {
++              for (k = 0, l = 4; k <= 9; ++k, l *= 2)
++                      sbi->s_mb_prealloc_table[k] = l;
++
++              sbi->s_mb_small_req = 256;
++              sbi->s_mb_large_req = 1024;
++              sbi->s_mb_group_prealloc = 512;
++      } else {
++              for (k = 0, l = sbi->s_stripe; k <= 2; ++k, l *= 2)
++                      sbi->s_mb_prealloc_table[k] = l;
++
++              sbi->s_mb_small_req = sbi->s_stripe;
++              sbi->s_mb_large_req = sbi->s_stripe * 8;
++              sbi->s_mb_group_prealloc = sbi->s_stripe * 4;
+       }
+       sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
+@@ -2788,6 +2900,7 @@ out_free_locality_groups:
+       free_percpu(sbi->s_locality_groups);
+       sbi->s_locality_groups = NULL;
+ out:
++      kfree(sbi->s_mb_prealloc_table);
+       kfree(sbi->s_mb_offsets);
+       sbi->s_mb_offsets = NULL;
+       kfree(sbi->s_mb_maxs);
+@@ -3057,7 +3170,6 @@ ext4_mb_mark_diskspace_used(struct ext4_
+       int err, len;
+       BUG_ON(ac->ac_status != AC_STATUS_FOUND);
+-      BUG_ON(ac->ac_b_ex.fe_len <= 0);
+       sb = ac->ac_sb;
+       sbi = EXT4_SB(sb);
+@@ -3187,13 +3299,14 @@ ext4_mb_normalize_request(struct ext4_al
+                               struct ext4_allocation_request *ar)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+-      int bsbits, max;
++      int bsbits, i, wind;
+       ext4_lblk_t end;
+-      loff_t size, start_off;
++      loff_t size;
+       loff_t orig_size __maybe_unused;
+       ext4_lblk_t start;
+       struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+       struct ext4_prealloc_space *pa;
++      unsigned long value, last_non_zero;
+       /* do normalize only data requests, metadata requests
+          do not need preallocation */
+@@ -3222,51 +3335,46 @@ ext4_mb_normalize_request(struct ext4_al
+       size = size << bsbits;
+       if (size < i_size_read(ac->ac_inode))
+               size = i_size_read(ac->ac_inode);
+-      orig_size = size;
++      size = (size + ac->ac_sb->s_blocksize - 1) >> bsbits;
++
++      start = wind = 0;
++      value = last_non_zero = 0;
+-      /* max size of free chunks */
+-      max = 2 << bsbits;
++      /* let's choose preallocation window depending on file size */
++      for (i = 0; i < EXT4_MAX_PREALLOC_TABLE; i++) {
++              value = sbi->s_mb_prealloc_table[i];
++              if (value == 0)
++                      break;
++              else
++                      last_non_zero = value;
+-#define NRL_CHECK_SIZE(req, size, max, chunk_size)    \
+-              (req <= (size) || max <= (chunk_size))
++              if (size <= value) {
++                      wind = value;
++                      break;
++              }
++      }
+-      /* first, try to predict filesize */
+-      /* XXX: should this table be tunable? */
+-      start_off = 0;
+-      if (size <= 16 * 1024) {
+-              size = 16 * 1024;
+-      } else if (size <= 32 * 1024) {
+-              size = 32 * 1024;
+-      } else if (size <= 64 * 1024) {
+-              size = 64 * 1024;
+-      } else if (size <= 128 * 1024) {
+-              size = 128 * 1024;
+-      } else if (size <= 256 * 1024) {
+-              size = 256 * 1024;
+-      } else if (size <= 512 * 1024) {
+-              size = 512 * 1024;
+-      } else if (size <= 1024 * 1024) {
+-              size = 1024 * 1024;
+-      } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
+-              start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+-                                              (21 - bsbits)) << 21;
+-              size = 2 * 1024 * 1024;
+-      } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
+-              start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+-                                                      (22 - bsbits)) << 22;
+-              size = 4 * 1024 * 1024;
+-      } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
+-                                      (8<<20)>>bsbits, max, 8 * 1024)) {
+-              start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
+-                                                      (23 - bsbits)) << 23;
+-              size = 8 * 1024 * 1024;
++      if (wind == 0) {
++              if (last_non_zero != 0) {
++                      __u64 tstart, tend;
++                      /* file is quite large, we now preallocate with
++                      * the biggest configured window with regart to
++                      * logical offset */
++                      wind = last_non_zero;
++                      tstart = ac->ac_o_ex.fe_logical;
++                      do_div(tstart, wind);
++                      start = tstart * wind;
++                      tend = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len - 1;
++                      do_div(tend, wind);
++                      tend = tend * wind + wind;
++                      size = tend - start;
++              }
+       } else {
+-              start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
+-              size      = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
+-                                            ac->ac_o_ex.fe_len) << bsbits;
++              size = wind;
+       }
+-      size = size >> bsbits;
+-      start = start_off >> bsbits;
++
++
++      orig_size = size;
+       /* don't cover already allocated blocks in selected range */
+       if (ar->pleft && start <= ar->lleft) {
+@@ -3348,7 +3456,6 @@ ext4_mb_normalize_request(struct ext4_al
+                        (unsigned long) ac->ac_o_ex.fe_logical);
+               BUG();
+       }
+-      BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
+       /* now prepare goal request */
+@@ -4341,11 +4448,19 @@ static void ext4_mb_group_or_file(struct
+       /* don't use group allocation for large files */
+       size = max(size, isize);
+-      if (size > sbi->s_mb_stream_request) {
++      if ((ac->ac_o_ex.fe_len >= sbi->s_mb_small_req) ||
++          (size >= sbi->s_mb_large_req)) {
+               ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+               return;
+       }
++      /*
++       * request is so large that we don't care about
++       * streaming - it overweights any possible seek
++       */
++      if (ac->ac_o_ex.fe_len >= sbi->s_mb_large_req)
++              return;
++
+       BUG_ON(ac->ac_lg != NULL);
+       /*
+        * locality group prealloc space are per cpu. The reason for having
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -213,7 +213,8 @@ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
+-EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
++EXT4_RW_ATTR_SBI_UI(mb_small_req, s_mb_small_req);
++EXT4_RW_ATTR_SBI_UI(mb_large_req, s_mb_large_req);
+ EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
+ EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
+@@ -255,7 +256,8 @@ static struct attribute *ext4_attrs[] =
+       ATTR_LIST(mb_max_to_scan),
+       ATTR_LIST(mb_min_to_scan),
+       ATTR_LIST(mb_order2_req),
+-      ATTR_LIST(mb_stream_req),
++      ATTR_LIST(mb_small_req),
++      ATTR_LIST(mb_large_req),
+       ATTR_LIST(mb_group_prealloc),
+       ATTR_LIST(max_writeback_mb_bump),
+       ATTR_LIST(extent_max_zeroout_kb),
+@@ -510,6 +512,8 @@ int ext4_register_sysfs(struct super_blo
+                               sb);
+               proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc,
+                               &ext4_mb_seq_groups_ops, sb);
++              proc_create_data("prealloc_table", S_IRUGO, sbi->s_proc,
++                              &ext4_seq_prealloc_table_fops, sb);
+       }
+       return 0;
+ }
diff --git a/ldiskfs/kernel_patches/patches/linux-5.8/ext4-simple-blockalloc.patch b/ldiskfs/kernel_patches/patches/linux-5.8/ext4-simple-blockalloc.patch
new file mode 100644 (file)
index 0000000..e266034
--- /dev/null
@@ -0,0 +1,343 @@
+---
+ fs/ext4/ext4.h    |    7 ++
+ fs/ext4/mballoc.c |  133 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ fs/ext4/mballoc.h |    3 +
+ fs/ext4/sysfs.c   |   52 +++++++++++++++++++++
+ 4 files changed, 195 insertions(+)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1518,6 +1518,9 @@ struct ext4_sb_info {
+       unsigned int s_mb_min_to_scan;
+       unsigned int s_mb_stats;
+       unsigned int s_mb_order2_reqs;
++      ext4_fsblk_t s_mb_c1_blocks;
++      ext4_fsblk_t s_mb_c2_blocks;
++      ext4_fsblk_t s_mb_c3_blocks;
+       unsigned long *s_mb_prealloc_table;
+       unsigned int s_mb_group_prealloc;
+       unsigned int s_max_dir_size_kb;
+@@ -1534,6 +1537,9 @@ struct ext4_sb_info {
+       atomic_t s_bal_goals;   /* goal hits */
+       atomic_t s_bal_breaks;  /* too long searches */
+       atomic_t s_bal_2orders; /* 2^order hits */
++      /* cX loop didn't find blocks */
++      atomic64_t s_bal_cX_failed[3];
++      atomic64_t s_bal_cX_skipped[3];
+       spinlock_t s_bal_lock;
+       unsigned long s_mb_buddies_generated;
+       unsigned long long s_mb_generation_time;
+@@ -2813,6 +2819,7 @@ ext4_read_inode_bitmap(struct super_bloc
+ /* mballoc.c */
+ extern const struct proc_ops ext4_seq_prealloc_table_fops;
+ extern const struct seq_operations ext4_mb_seq_groups_ops;
++extern const struct proc_ops ext4_mb_seq_alloc_fops;
+ extern const struct proc_ops ext4_seq_mb_last_group_fops;
+ extern int ext4_mb_seq_last_start_seq_show(struct seq_file *m, void *v);
+ extern long ext4_mb_stats;
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2218,6 +2218,20 @@ out:
+       return ret;
+ }
++static u64 available_blocks_count(struct ext4_sb_info *sbi)
++{
++      ext4_fsblk_t resv_blocks;
++      u64 bfree;
++      struct ext4_super_block *es = sbi->s_es;
++
++      resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
++      bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
++               percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
++
++      bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
++      return bfree - (ext4_r_blocks_count(es) + resv_blocks);
++}
++
+ static noinline_for_stack int
+ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ {
+@@ -2227,6 +2241,7 @@ ext4_mb_regular_allocator(struct ext4_al
+       struct ext4_sb_info *sbi;
+       struct super_block *sb;
+       struct ext4_buddy e4b;
++      ext4_fsblk_t avail_blocks;
+       sb = ac->ac_sb;
+       sbi = EXT4_SB(sb);
+@@ -2279,6 +2294,21 @@ ext4_mb_regular_allocator(struct ext4_al
+       /* Let's just scan groups to find more-less suitable blocks */
+       cr = ac->ac_2order ? 0 : 1;
++
++      /* Choose what loop to pass based on disk fullness */
++      avail_blocks = available_blocks_count(sbi) ;
++
++      if (avail_blocks < sbi->s_mb_c3_blocks) {
++              cr = 3;
++              atomic64_inc(&sbi->s_bal_cX_skipped[2]);
++      } else if(avail_blocks < sbi->s_mb_c2_blocks) {
++              cr = 2;
++              atomic64_inc(&sbi->s_bal_cX_skipped[1]);
++      } else if(avail_blocks < sbi->s_mb_c1_blocks) {
++              cr = 1;
++              atomic64_inc(&sbi->s_bal_cX_skipped[0]);
++      }
++
+       /*
+        * cr == 0 try to get exact allocation,
+        * cr == 3  try to get anything
+@@ -2342,6 +2372,9 @@ repeat:
+                       if (ac->ac_status != AC_STATUS_CONTINUE)
+                               break;
+               }
++              /* Processed all groups and haven't found blocks */
++              if (i == ngroups)
++                      atomic64_inc(&sbi->s_bal_cX_failed[cr]);
+       }
+       if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
+@@ -2624,6 +2657,92 @@ const struct proc_ops ext4_seq_mb_last_g
+       .proc_write     = ext4_mb_last_group_write,
+ };
++static int mb_seq_alloc_show(struct seq_file *seq, void *v)
++{
++      struct super_block *sb = seq->private;
++      struct ext4_sb_info *sbi = EXT4_SB(sb);
++
++      seq_printf(seq, "mballoc:\n");
++      seq_printf(seq, "\tblocks: %u\n", atomic_read(&sbi->s_bal_allocated));
++      seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
++      seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
++
++      seq_printf(seq, "\textents_scanned: %u\n",
++                 atomic_read(&sbi->s_bal_ex_scanned));
++      seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
++      seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
++      seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
++      seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
++
++      seq_printf(seq, "\tuseless_c1_loops: %llu\n",
++                 atomic64_read(&sbi->s_bal_cX_failed[0]));
++      seq_printf(seq, "\tuseless_c2_loops: %llu\n",
++                 atomic64_read(&sbi->s_bal_cX_failed[1]));
++      seq_printf(seq, "\tuseless_c3_loops: %llu\n",
++                 atomic64_read(&sbi->s_bal_cX_failed[2]));
++      seq_printf(seq, "\tskipped_c1_loops: %llu\n",
++                 atomic64_read(&sbi->s_bal_cX_skipped[0]));
++      seq_printf(seq, "\tskipped_c2_loops: %llu\n",
++                 atomic64_read(&sbi->s_bal_cX_skipped[1]));
++      seq_printf(seq, "\tskipped_c3_loops: %llu\n",
++                 atomic64_read(&sbi->s_bal_cX_skipped[2]));
++      seq_printf(seq, "\tbuddies_generated: %lu\n",
++                 sbi->s_mb_buddies_generated);
++      seq_printf(seq, "\tbuddies_time_used: %llu\n", sbi->s_mb_generation_time);
++      seq_printf(seq, "\tpreallocated: %u\n",
++                 atomic_read(&sbi->s_mb_preallocated));
++      seq_printf(seq, "\tdiscarded: %u\n",
++                 atomic_read(&sbi->s_mb_discarded));
++      return 0;
++}
++
++static ssize_t mb_seq_alloc_write(struct file *file,
++                            const char __user *buf,
++                            size_t cnt, loff_t *pos)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(PDE_DATA(file_inode(file)));
++
++      atomic_set(&sbi->s_bal_allocated, 0),
++      atomic_set(&sbi->s_bal_reqs, 0),
++      atomic_set(&sbi->s_bal_success, 0);
++
++      atomic_set(&sbi->s_bal_ex_scanned, 0),
++      atomic_set(&sbi->s_bal_goals, 0),
++      atomic_set(&sbi->s_bal_2orders, 0),
++      atomic_set(&sbi->s_bal_breaks, 0),
++      atomic_set(&sbi->s_mb_lost_chunks, 0);
++
++      atomic64_set(&sbi->s_bal_cX_failed[0], 0),
++      atomic64_set(&sbi->s_bal_cX_failed[1], 0),
++      atomic64_set(&sbi->s_bal_cX_failed[2], 0);
++
++      atomic64_set(&sbi->s_bal_cX_skipped[0], 0),
++      atomic64_set(&sbi->s_bal_cX_skipped[1], 0),
++      atomic64_set(&sbi->s_bal_cX_skipped[2], 0);
++
++
++      sbi->s_mb_buddies_generated = 0;
++      sbi->s_mb_generation_time = 0;
++
++      atomic_set(&sbi->s_mb_preallocated, 0),
++      atomic_set(&sbi->s_mb_discarded, 0);
++
++      return cnt;
++}
++
++static int mb_seq_alloc_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, mb_seq_alloc_show, PDE_DATA(inode));
++}
++
++const struct proc_ops ext4_mb_seq_alloc_fops = {
++      .proc_open      = mb_seq_alloc_open,
++      .proc_read      = seq_read,
++      .proc_lseek     = seq_lseek,
++      .proc_release   = single_release,
++      .proc_write     = mb_seq_alloc_write,
++};
++
+ int ext4_mb_seq_last_start_seq_show(struct seq_file *m, void *v)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(m->private);
+@@ -2850,6 +2969,7 @@ static int ext4_groupinfo_create_slab(si
+       return 0;
+ }
++#define THRESHOLD_BLOCKS(ts) (ext4_blocks_count(sbi->s_es) / 100 * ts)
+ int ext4_mb_init(struct super_block *sb)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -2903,6 +3023,9 @@ int ext4_mb_init(struct super_block *sb)
+       sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
+       sbi->s_mb_stats = MB_DEFAULT_STATS;
+       sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
++      sbi->s_mb_c1_blocks = THRESHOLD_BLOCKS(MB_DEFAULT_C1_THRESHOLD);
++      sbi->s_mb_c2_blocks = THRESHOLD_BLOCKS(MB_DEFAULT_C2_THRESHOLD);
++      sbi->s_mb_c3_blocks = THRESHOLD_BLOCKS(MB_DEFAULT_C3_THRESHOLD);
+       /*
+        * The default group preallocation is 512, which for 4k block
+        * sizes translates to 2 megabytes.  However for bigalloc file
+@@ -3042,6 +3165,16 @@ int ext4_mb_release(struct super_block *
+                               atomic_read(&sbi->s_bal_reqs),
+                               atomic_read(&sbi->s_bal_success));
+               ext4_msg(sb, KERN_INFO,
++                      "mballoc: (%llu, %llu, %llu) useless c(0,1,2) loops",
++                              atomic64_read(&sbi->s_bal_cX_failed[0]),
++                              atomic64_read(&sbi->s_bal_cX_failed[1]),
++                              atomic64_read(&sbi->s_bal_cX_failed[2]));
++              ext4_msg(sb, KERN_INFO,
++                      "mballoc: (%llu, %llu, %llu) skipped c(0,1,2) loops",
++                              atomic64_read(&sbi->s_bal_cX_skipped[0]),
++                              atomic64_read(&sbi->s_bal_cX_skipped[1]),
++                              atomic64_read(&sbi->s_bal_cX_skipped[2]));
++              ext4_msg(sb, KERN_INFO,
+                     "mballoc: %u extents scanned, %u goal hits, "
+                               "%u 2^N hits, %u breaks, %u lost",
+                               atomic_read(&sbi->s_bal_ex_scanned),
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -68,6 +68,9 @@
+  * for which requests use 2^N search using buddies
+  */
+ #define MB_DEFAULT_ORDER2_REQS                8
++#define MB_DEFAULT_C1_THRESHOLD               25
++#define MB_DEFAULT_C2_THRESHOLD               15
++#define MB_DEFAULT_C3_THRESHOLD               5
+ /*
+  * default group prealloc size 512 blocks
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -21,6 +21,9 @@
+ typedef enum {
+       attr_noop,
+       attr_delayed_allocation_blocks,
++      attr_mb_c1_threshold,
++      attr_mb_c2_threshold,
++      attr_mb_c3_threshold,
+       attr_session_write_kbytes,
+       attr_lifetime_write_kbytes,
+       attr_reserved_clusters,
+@@ -140,6 +143,32 @@ static ssize_t journal_task_show(struct
+                       task_pid_vnr(sbi->s_journal->j_task));
+ }
++#define THRESHOLD_PERCENT(ts) (ts * 100 / ext4_blocks_count(sbi->s_es))
++
++static int save_threshold_percent(struct ext4_sb_info *sbi, const char *buf,
++                                ext4_fsblk_t *blocks)
++{
++      unsigned long long val;
++
++      int ret;
++
++      ret = kstrtoull(skip_spaces(buf), 0, &val);
++      if (ret || val > 100)
++              return -EINVAL;
++
++      *blocks = val * ext4_blocks_count(sbi->s_es) / 100;
++      return 0;
++}
++
++static ssize_t mb_threshold_store(struct ext4_sb_info *sbi,
++                                const char *buf, size_t count,
++                                ext4_fsblk_t *blocks)
++{
++      int ret = save_threshold_percent(sbi, buf, blocks);
++
++      return ret ?: count;
++}
++
+ #define EXT4_ATTR(_name,_mode,_id)                                    \
+ static struct ext4_attr ext4_attr_##_name = {                         \
+       .attr = {.name = __stringify(_name), .mode = _mode },           \
+@@ -205,6 +234,9 @@ EXT4_ATTR_FUNC(delayed_allocation_blocks
+ EXT4_ATTR_FUNC(session_write_kbytes, 0444);
+ EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444);
+ EXT4_ATTR_FUNC(reserved_clusters, 0644);
++EXT4_ATTR_FUNC(mb_c1_threshold, 0644);
++EXT4_ATTR_FUNC(mb_c2_threshold, 0644);
++EXT4_ATTR_FUNC(mb_c3_threshold, 0644);
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
+                ext4_sb_info, s_inode_readahead_blks);
+@@ -253,6 +285,9 @@ static struct attribute *ext4_attrs[] =
+       ATTR_LIST(session_write_kbytes),
+       ATTR_LIST(lifetime_write_kbytes),
+       ATTR_LIST(reserved_clusters),
++      ATTR_LIST(mb_c1_threshold),
++      ATTR_LIST(mb_c2_threshold),
++      ATTR_LIST(mb_c3_threshold),
+       ATTR_LIST(inode_readahead_blks),
+       ATTR_LIST(inode_goal),
+       ATTR_LIST(max_dir_size),
+@@ -365,6 +400,15 @@ static ssize_t ext4_attr_show(struct kob
+               return snprintf(buf, PAGE_SIZE, "%llu\n",
+                               (s64) EXT4_C2B(sbi,
+                      percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
++      case attr_mb_c1_threshold:
++              return scnprintf(buf, PAGE_SIZE, "%llu\n",
++                               THRESHOLD_PERCENT(sbi->s_mb_c1_blocks));
++      case attr_mb_c2_threshold:
++              return scnprintf(buf, PAGE_SIZE, "%llu\n",
++                               THRESHOLD_PERCENT(sbi->s_mb_c2_blocks));
++      case attr_mb_c3_threshold:
++              return scnprintf(buf, PAGE_SIZE, "%llu\n",
++                               THRESHOLD_PERCENT(sbi->s_mb_c3_blocks));
+       case attr_session_write_kbytes:
+               return session_write_kbytes_show(sbi, buf);
+       case attr_lifetime_write_kbytes:
+@@ -466,6 +510,12 @@ static ssize_t ext4_attr_store(struct ko
+               return inode_readahead_blks_store(sbi, buf, len);
+       case attr_trigger_test_error:
+               return trigger_test_error(sbi, buf, len);
++      case attr_mb_c1_threshold:
++              return mb_threshold_store(sbi, buf, len, &sbi->s_mb_c1_blocks);
++      case attr_mb_c2_threshold:
++              return mb_threshold_store(sbi, buf, len, &sbi->s_mb_c2_blocks);
++      case attr_mb_c3_threshold:
++              return mb_threshold_store(sbi, buf, len, &sbi->s_mb_c3_blocks);
+       }
+       return 0;
+ }
+@@ -528,6 +578,8 @@ int ext4_register_sysfs(struct super_blo
+                               &ext4_seq_mb_last_group_fops, sb);
+               proc_create_single_data("mb_last_start", S_IRUGO, sbi->s_proc,
+                               ext4_mb_seq_last_start_seq_show, sb);
++              proc_create_data("mb_alloc_stats", S_IFREG | S_IRUGO | S_IWUSR,
++                               sbi->s_proc, &ext4_mb_seq_alloc_fops, sb);
+       }
+       return 0;
+ }
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-5.8.0-ml.series b/ldiskfs/kernel_patches/series/ldiskfs-5.8.0-ml.series
new file mode 100644 (file)
index 0000000..c539481
--- /dev/null
@@ -0,0 +1,28 @@
+rhel8/ext4-inode-version.patch
+linux-5.4/ext4-lookup-dotdot.patch
+suse15/ext4-print-inum-in-htree-warning.patch
+linux-5.8/ext4-prealloc.patch
+ubuntu18/ext4-osd-iop-common.patch
+linux-5.8/ext4-misc.patch
+linux-5.8/ext4-mballoc-extra-checks.patch
+linux-5.4/ext4-hash-indexed-dir-dotdot-update.patch
+linux-5.8/ext4-kill-dx-root.patch
+linux-5.8/ext4-mballoc-pa-free-mismatch.patch
+linux-5.8/ext4-data-in-dirent.patch
+rhel8/ext4-nocmtime.patch
+base/ext4-htree-lock.patch
+linux-5.8/ext4-pdirop.patch
+linux-5.8/ext4-max-dir-size.patch
+linux-5.8/ext4-corrupted-inode-block-bitmaps-handling-patches.patch
+linux-5.4/ext4-give-warning-with-dir-htree-growing.patch
+ubuntu18/ext4-jcb-optimization.patch
+linux-5.4/ext4-attach-jinode-in-writepages.patch
+rhel8/ext4-dont-check-before-replay.patch
+rhel7.6/ext4-use-GFP_NOFS-in-ext4_inode_attach_jinode.patch
+rhel7.6/ext4-export-orphan-add.patch
+linux-5.8/ext4-export-mb-stream-allocator-variables.patch
+ubuntu19/ext4-iget-with-flags.patch
+linux-5.4/export-ext4fs-dirhash-helper.patch
+linux-5.4/ext4-misc.patch
+linux-5.8/ext4-simple-blockalloc.patch
+linux-5.8/ext4-no-max-dir-size-limit-for-iam-objects.patch
index e6bfe10..6ff7aa3 100644 (file)
@@ -637,7 +637,7 @@ rhashtable_walk_init, [
        AC_DEFINE(HAVE_3ARG_RHASHTABLE_WALK_INIT, 1,
                [rhashtable_walk_init() has 3 args])
 ])
-]) # LIBCFS_RHASHTABLE_REPLACE
+]) # LIBCFS_RHASHTABLE_WALK_INIT_3ARG
 
 #
 # Kernel version 4.8-rc6 commit ca26893f05e86497a86732768ec53cd38c0819ca
@@ -713,7 +713,7 @@ rhashtable_walk_enter, [
        AC_DEFINE(HAVE_RHASHTABLE_WALK_ENTER, 1,
                [rhashtable_walk_enter() is available])
 ])
-]) # LIBCFS_RHASHTABLE_REPLACE
+]) # LIBCFS_RHASHTABLE_WALK_ENTER
 
 #
 # Kernel version 4.9 commit 768ae309a96103ed02eb1e111e838c87854d8b51
@@ -804,7 +804,7 @@ kref_read, [
        AC_DEFINE(HAVE_KREF_READ, 1,
                [kref_read() is available])
 ])
-]) LIBCFS_KREF_READ
+]) LIBCFS_KREF_READ
 
 #
 # Kernel version 4.11-rc1 commit da20420f83ea0fbcf3d03afda08d971ea1d8a356
@@ -985,7 +985,7 @@ EXTRA_KCFLAGS="$tmp_flags"
 ]) # LIBCFS_MM_TOTALRAM_PAGES_FUNC
 
 #
-# LIBCFS_NEW_KERNEL_WRITE
+# LIBCFS_NEW_KERNEL_READ
 #
 # 4.14 commit bdd1d2d3d251c65b74ac4493e08db18971c09240 changed
 # the signature of kernel_read to match other read/write helpers
@@ -1149,6 +1149,46 @@ clear_and_wake_up_bit, [
 ]) # LIBCFS_CLEAR_AND_WAKE_UP_BIT
 
 #
+# LIBCFS_TCP_SOCK_SET_NODELAY
+#
+# kernel 4.18.0-293.el8
+# tcp_sock_set_nodelay() was added
+AC_DEFUN([LIBCFS_TCP_SOCK_SET_NODELAY], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if 'tcp_sock_set_nodelay()' exists],
+tcp_sock_set_nodelay_exists, [
+       #include <linux/tcp.h>
+],[
+       tcp_sock_set_nodelay(NULL);
+],[
+       AC_DEFINE(HAVE_TCP_SOCK_SET_NODELAY, 1,
+               ['tcp_sock_set_nodelay()' exists])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LIBCFS_TCP_SOCK_SET_NODELAY
+
+#
+# LIBCFS_TCP_SOCK_SET_KEEPIDLE
+#
+# kernel 4.18.0-293.el8
+# tcp_sock_set_keepidle() was added
+AC_DEFUN([LIBCFS_TCP_SOCK_SET_KEEPIDLE], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if 'tcp_sock_set_keepidle()' exists],
+tcp_sock_set_keepidle_exists, [
+       #include <linux/tcp.h>
+],[
+       tcp_sock_set_keepidle(NULL, 0);
+],[
+       AC_DEFINE(HAVE_TCP_SOCK_SET_KEEPIDLE, 1,
+               ['tcp_sock_set_keepidle()' exists])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LIBCFS_TCP_SOCK_SET_KEEPIDLE
+
+#
 # LIBCFS_XARRAY_SUPPORT
 #
 # 4.19-rc5 kernel commit 3159f943aafdbacb2f94c38fdaadabf2bbde2a14
@@ -1301,8 +1341,6 @@ LB_CHECK_EXPORT([kallsyms_lookup_name], [kernel/kallsyms.c],
 ]) # LIBCFS_KALLSYMS_LOOKUP
 
 #
-# LIBCFS_HAVE_PROC_OPS
-#
 # v5.5-8862-gd56c0d45f0e2
 # proc: decouple proc from VFS with "struct proc_ops"
 #
@@ -1314,7 +1352,7 @@ AC_DEFUN([LIBCFS_SRC_HAVE_PROC_OPS], [
        ],[
                my_proc->proc_lseek = NULL;
        ],[-Werror])
-])
+]) # LIBCFS_SRC_HAVE_PROC_OPS
 AC_DEFUN([LIBCFS_HAVE_PROC_OPS], [
        AC_MSG_CHECKING([if struct proc_ops exists])
        LB2_LINUX_TEST_RESULT([proc_ops], [
@@ -1405,6 +1443,23 @@ EXTRA_KCFLAGS="$tmp_flags"
 ]) # LIBCFS_KERNEL_SETSOCKOPT
 
 #
+# LIBCFS_SEC_RELEASE_SECCTX
+#
+# kernel linux-hwe-5.8 (5.8.0-22.23~20.04.1)
+# LSM: Use lsmcontext in security_release_secctx
+AC_DEFUN([LIBCFS_SEC_RELEASE_SECCTX], [
+LB_CHECK_COMPILE([if security_release_secctx has 1 arg],
+security_release_secctx_1arg, [
+       #include <linux/security.h>
+],[
+       security_release_secctx(NULL);
+],[
+       AC_DEFINE(HAVE_SEC_RELEASE_SECCTX_1ARG, 1,
+               [security_release_secctx has 1 arg.])
+])
+]) # LIBCFS_SEC_RELEASE_SECCTX
+
+#
 # LIBCFS_HAVE_KFREE_SENSITIVE
 #
 # kernel v5.10-rc1~3
@@ -1422,7 +1477,7 @@ kfree_sensitive_exists, [
                [kfree_sensitive() is available.])
 ])
 EXTRA_KCFLAGS="$tmp_flags"
-]) # LIBCFS_HAVE_NR_UNSTABLE_NFS
+]) # LIBCFS_HAVE_KFREE_SENSITIVE
 
 AC_DEFUN([LIBCFS_PROG_LINUX_SRC], [
        LIBCFS_SRC_HAVE_PROC_OPS
@@ -1529,6 +1584,9 @@ LIBCFS_WAIT_VAR_EVENT
 # 4.17
 LIBCFS_BITMAP_ALLOC
 LIBCFS_CLEAR_AND_WAKE_UP_BIT
+# 4.18
+LIBCFS_TCP_SOCK_SET_NODELAY
+LIBCFS_TCP_SOCK_SET_KEEPIDLE
 # 4.19
 LIBCFS_XARRAY_SUPPORT
 # 4.20
@@ -1547,6 +1605,7 @@ LIBCFS_KALLSYMS_LOOKUP
 LIBCFS_HAVE_MMAP_LOCK
 LIBCFS_KERNEL_SETSOCKOPT
 LIBCFS_VMALLOC_2ARGS
+LIBCFS_SEC_RELEASE_SECCTX
 # 5.10
 LIBCFS_HAVE_KFREE_SENSITIVE
 ]) # LIBCFS_PROG_LINUX
index 41484bd..98951f7 100644 (file)
@@ -35,6 +35,7 @@ static inline void tcp_sock_set_quickack(struct sock *sk, int opt)
                          (char *)&opt, sizeof(opt));
 }
 
+#if !defined(HAVE_TCP_SOCK_SET_NODELAY)
 static inline void tcp_sock_set_nodelay(struct sock *sk)
 {
        int opt = 1;
@@ -43,7 +44,9 @@ static inline void tcp_sock_set_nodelay(struct sock *sk)
        kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
                          (char *)&opt, sizeof(opt));
 }
+#endif /* HAVE_TCP_SOCK_SET_NODELAY */
 
+#if !defined(HAVE_TCP_SOCK_SET_KEEPIDLE)
 static inline int tcp_sock_set_keepidle(struct sock *sk, int opt)
 {
        struct socket *sock = sk->sk_socket;
@@ -51,6 +54,7 @@ static inline int tcp_sock_set_keepidle(struct sock *sk, int opt)
        return kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
                                 (char *)&opt, sizeof(opt));
 }
+#endif /* HAVE_TCP_SOCK_SET_KEEPIDLE */
 
 static inline int tcp_sock_set_keepintvl(struct sock *sk, int opt)
 {
index fa32e37..5549417 100644 (file)
@@ -48,14 +48,14 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
        int newmask = minmask, i, len, found = 0;
 
        ENTRY;
-       /* <str> must be a list of tokens separated by whitespace
+       /* <str> must be a list of tokens separated by whitespace or comma,
         * and optionally an operator ('+' or '-').  If an operator
         * appears first in <str>, '*oldmask' is used as the starting point
         * (relative), otherwise minmask is used (absolute).  An operator
         * applies to all following tokens up to the next operator.
         */
        while (*str != 0) {
-               while (isspace(*str))
+               while (isspace(*str) || *str == ',')
                        str++;
                if (*str == 0)
                        break;
@@ -72,7 +72,8 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
 
                /* find token length */
                for (len = 0; str[len] != 0 && !isspace(str[len]) &&
-                    str[len] != '+' && str[len] != '-'; len++);
+                       str[len] != '+' && str[len] != '-' && str[len] != ',';
+                    len++);
 
                /* match token */
                found = 0;
index 5b21ef7..6f19bca 100644 (file)
@@ -116,6 +116,9 @@ static struct shash_alg alg = {
                .cra_name               = "adler32",
                .cra_driver_name        = "adler32-zlib",
                .cra_priority           = 100,
+#ifdef CRYPTO_ALG_OPTIONAL_KEY
+               .cra_flags              = CRYPTO_ALG_OPTIONAL_KEY,
+#endif
                .cra_blocksize          = CHKSUM_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(u32),
                .cra_module             = NULL,
index ed52e0a..87867ad 100644 (file)
@@ -652,6 +652,8 @@ struct lnet_peer {
 
        /* source NID to use during discovery */
        lnet_nid_t              lp_disc_src_nid;
+       /* destination NID to use during discovery */
+       lnet_nid_t              lp_disc_dst_nid;
 
        /* net to perform discovery on */
        __u32                   lp_disc_net_id;
index 6481457..1017dd3 100644 (file)
@@ -191,6 +191,8 @@ struct lnet_ioctl_local_ni_hstats {
        __u32 hlni_local_timeout;
        __u32 hlni_local_error;
        __s32 hlni_health_value;
+       __u32 hlni_ping_count;
+       __u64 hlni_next_ping;
 };
 
 struct lnet_ioctl_peer_ni_hstats {
@@ -199,6 +201,8 @@ struct lnet_ioctl_peer_ni_hstats {
        __u32 hlpni_remote_error;
        __u32 hlpni_network_timeout;
        __s32 hlpni_health_value;
+       __u32 hlpni_ping_count;
+       __u64 hlpni_next_ping;
 };
 
 struct lnet_ioctl_element_msg_stats {
index c5da2ee..bfe044c 100644 (file)
@@ -87,16 +87,13 @@ kgnilnd_start_sd_threads(void)
 int
 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
 {
-       kgn_conn_t         *conn;
-       struct list_head   *ctmp, *cnxt;
+       kgn_conn_t *conn, *cnxt;
        int                 loopback;
        int                 count = 0;
 
        loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
 
-       list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
-               conn = list_entry(ctmp, kgn_conn_t, gnc_list);
-
+       list_for_each_entry_safe(conn, cnxt, &peer->gnp_conns, gnc_list) {
                if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
                        continue;
 
@@ -161,14 +158,12 @@ int
 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
 {
        kgn_conn_t       *conn;
-       struct list_head *tmp;
        int               loopback;
        ENTRY;
 
        loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
 
-       list_for_each(tmp, &peer->gnp_conns) {
-               conn = list_entry(tmp, kgn_conn_t, gnc_list);
+       list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
                CDEBUG(D_NET, "checking conn 0x%p for peer %s"
                        " lo %d new %llu existing %llu"
                        " new peer %llu existing peer %llu"
@@ -560,7 +555,7 @@ kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
             */
 
                for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
-                       list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
+                       list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
                                /* if gnn_shutdown set for any net shutdown is in progress just return */
                                if (net->gnn_shutdown) {
                                        up_read(&kgnilnd_data.kgn_net_rw_sem);
@@ -1322,7 +1317,6 @@ kgnilnd_get_peer_info(int index,
                      lnet_nid_t *id, __u32 *nic_addr,
                      int *refcount, int *connecting)
 {
-       struct list_head  *ptmp;
        kgn_peer_t        *peer;
        int               i;
        int               rc = -ENOENT;
@@ -1330,10 +1324,7 @@ kgnilnd_get_peer_info(int index,
        read_lock(&kgnilnd_data.kgn_peer_conn_lock);
 
        for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
-
-               list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
-                       peer = list_entry(ptmp, kgn_peer_t, gnp_list);
-
+               list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
                        if (index-- > 0)
                                continue;
 
@@ -1509,8 +1500,7 @@ kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
 {
        LIST_HEAD               (souls);
        LIST_HEAD               (zombies);
-       struct list_head        *ptmp, *pnxt;
-       kgn_peer_t              *peer;
+       kgn_peer_t *peer, *pnxt;
        int                     lo;
        int                     hi;
        int                     i;
@@ -1528,9 +1518,8 @@ kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
        }
 
        for (i = lo; i <= hi; i++) {
-               list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
-                       peer = list_entry(ptmp, kgn_peer_t, gnp_list);
-
+               list_for_each_entry_safe(peer, pnxt, &kgnilnd_data.kgn_peers[i],
+                                        gnp_list) {
                        LASSERTF(peer->gnp_net != NULL,
                                "peer %p (%s) with NULL net\n",
                                 peer, libcfs_nid2str(peer->gnp_nid));
@@ -1608,21 +1597,14 @@ kgn_conn_t *
 kgnilnd_get_conn_by_idx(int index)
 {
        kgn_peer_t        *peer;
-       struct list_head  *ptmp;
        kgn_conn_t        *conn;
-       struct list_head  *ctmp;
        int                i;
 
 
        for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
                read_lock(&kgnilnd_data.kgn_peer_conn_lock);
-               list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
-
-                       peer = list_entry(ptmp, kgn_peer_t, gnp_list);
-
-                       list_for_each(ctmp, &peer->gnp_conns) {
-                               conn = list_entry(ctmp, kgn_conn_t, gnc_list);
-
+               list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
+                       list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
                                if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
                                        continue;
 
index ab2eda9..0cdc6aa 100644 (file)
@@ -754,7 +754,8 @@ typedef struct {
 int
 kgnilnd_conn_seq_seek(kgn_conn_seq_iter_t *gseq, loff_t off)
 {
-       struct list_head       *list, *tmp;
+       struct list_head       *list;
+       kgn_conn_t *conn;
        loff_t                  here = 0;
        int                     rc = 0;
 
@@ -793,10 +794,8 @@ kgnilnd_conn_seq_seek(kgn_conn_seq_iter_t *gseq, loff_t off)
 
 start_list:
 
-       list_for_each(tmp, list) {
+       list_for_each_entry(conn, list, gnc_hashlist) {
                if (here == off) {
-                       kgn_conn_t *conn;
-                       conn = list_entry(tmp, kgn_conn_t, gnc_hashlist);
                        gseq->gconn_conn = conn;
                        rc = 0;
                        goto out;
@@ -1107,7 +1106,8 @@ typedef struct {
 int
 kgnilnd_peer_seq_seek(kgn_peer_seq_iter_t *gseq, loff_t off)
 {
-       struct list_head       *list, *tmp;
+       struct list_head *list;
+       kgn_peer_t *peer;
        loff_t                  here = 0;
        int                     rc = 0;
 
@@ -1146,10 +1146,8 @@ kgnilnd_peer_seq_seek(kgn_peer_seq_iter_t *gseq, loff_t off)
 
 start_list:
 
-       list_for_each(tmp, list) {
+       list_for_each_entry(peer, list, gnp_list) {
                if (here == off) {
-                       kgn_peer_t *peer;
-                       peer = list_entry(tmp, kgn_peer_t, gnp_list);
                        gseq->gpeer_peer = peer;
                        rc = 0;
                        goto out;
index ae84f06..b28c5b2 100644 (file)
@@ -525,7 +525,6 @@ kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
 {
        struct kib_peer_ni *peer_ni;
        struct kib_conn *conn;
-       struct list_head *ctmp;
        int i;
        unsigned long flags;
 
@@ -537,11 +536,11 @@ kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
                if (peer_ni->ibp_ni != ni)
                        continue;
 
-               list_for_each(ctmp, &peer_ni->ibp_conns) {
+               list_for_each_entry(conn, &peer_ni->ibp_conns,
+                                   ibc_list) {
                        if (index-- > 0)
                                continue;
 
-                       conn = list_entry(ctmp, struct kib_conn, ibc_list);
                        kiblnd_conn_addref(conn);
                        read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
                                               flags);
@@ -2023,7 +2022,7 @@ kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
        spin_lock(&ps->ps_lock);
        while ((po = list_first_entry_or_null(&ps->ps_pool_list,
                                              struct kib_pool,
-                                             po_list)) == NULL) {
+                                             po_list)) != NULL) {
                po->po_failed = 1;
                if (po->po_allocated == 0)
                        list_move(&po->po_list, zombies);
index 4e79dfd..9fd0baa 100644 (file)
@@ -242,11 +242,9 @@ out:
 static struct kib_tx *
 kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
 {
-       struct list_head *tmp;
-
-       list_for_each(tmp, &conn->ibc_active_txs) {
-               struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
+       struct kib_tx *tx;
 
+       list_for_each_entry(tx, &conn->ibc_active_txs, tx_list) {
                LASSERT(!tx->tx_queued);
                LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
 
@@ -3308,11 +3306,8 @@ static int
 kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
 {
        struct kib_tx *tx;
-       struct list_head *ttmp;
-
-       list_for_each(ttmp, txs) {
-               tx = list_entry(ttmp, struct kib_tx, tx_list);
 
+       list_for_each_entry(tx, txs, tx_list) {
                if (txs != &conn->ibc_active_txs) {
                        LASSERT(tx->tx_queued);
                } else {
@@ -3354,7 +3349,6 @@ kiblnd_check_conns (int idx)
        struct kib_peer_ni *peer_ni;
        struct kib_conn *conn;
        struct kib_tx *tx, *tx_tmp;
-       struct list_head *ctmp;
        unsigned long flags;
 
        /* NB. We expect to have a look at all the peers and not find any
@@ -3375,12 +3369,10 @@ kiblnd_check_conns (int idx)
                        }
                }
 
-               list_for_each(ctmp, &peer_ni->ibp_conns) {
+               list_for_each_entry(conn, &peer_ni->ibp_conns, ibc_list) {
                        int timedout;
                        int sendnoop;
 
-                       conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
                        LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
 
                        spin_lock(&conn->ibc_lock);
index 35d12d9..d9c428a 100644 (file)
@@ -486,7 +486,6 @@ static void
 ksocknal_add_conn_cb_locked(struct ksock_peer_ni *peer_ni,
                            struct ksock_conn_cb *conn_cb)
 {
-       struct list_head *tmp;
        struct ksock_conn *conn;
        struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
 
@@ -506,9 +505,7 @@ ksocknal_add_conn_cb_locked(struct ksock_peer_ni *peer_ni,
        /* peer_ni's route list takes over my ref on 'route' */
        peer_ni->ksnp_conn_cb = conn_cb;
 
-       list_for_each(tmp, &peer_ni->ksnp_conns) {
-               conn = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+       list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
                if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
                                  (struct sockaddr *)&conn_cb->ksnr_addr))
                        continue;
@@ -688,7 +685,6 @@ ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
 {
        struct ksock_peer_ni *peer_ni;
        struct ksock_conn *conn;
-       struct list_head *ctmp;
        int i;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
@@ -699,12 +695,11 @@ ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
                if (peer_ni->ksnp_ni != ni)
                        continue;
 
-               list_for_each(ctmp, &peer_ni->ksnp_conns) {
+               list_for_each_entry(conn, &peer_ni->ksnp_conns,
+                                   ksnc_list) {
                        if (index-- > 0)
                                continue;
 
-                       conn = list_entry(ctmp, struct ksock_conn,
-                                         ksnc_list);
                        ksocknal_conn_addref(conn);
                        read_unlock(&ksocknal_data.ksnd_global_lock);
                        return conn;
@@ -743,7 +738,10 @@ ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
        struct sockaddr_storage peer;
 
        rc = lnet_sock_getaddr(sock, true, &peer);
-       LASSERT(rc == 0);               /* we succeeded before */
+       if (rc != 0) {
+               CERROR("Can't determine new connection's address\n");
+               return rc;
+       }
 
        LIBCFS_ALLOC(cr, sizeof(*cr));
        if (cr == NULL) {
@@ -782,7 +780,6 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_conn_cb *conn_cb,
        rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
        LIST_HEAD(zombies);
        struct lnet_process_id peerid;
-       struct list_head *tmp;
        u64 incarnation;
        struct ksock_conn *conn;
        struct ksock_conn *conn2;
@@ -980,9 +977,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_conn_cb *conn_cb,
         * loopback connection */
        if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
                          (struct sockaddr *)&conn->ksnc_myaddr)) {
-               list_for_each(tmp, &peer_ni->ksnp_conns) {
-                       conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+               list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
                        if (!rpc_cmp_addr(
                                    (struct sockaddr *)&conn2->ksnc_peeraddr,
                                    (struct sockaddr *)&conn->ksnc_peeraddr) ||
@@ -1200,7 +1195,6 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
        struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
        struct ksock_conn_cb *conn_cb;
        struct ksock_conn *conn2;
-       struct list_head *tmp;
 
        LASSERT(peer_ni->ksnp_error == 0);
        LASSERT(!conn->ksnc_closing);
@@ -1222,19 +1216,13 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
                        LASSERT((conn_cb->ksnr_connected &
                                BIT(conn->ksnc_type)) != 0);
 
-               conn2 = NULL;
-               list_for_each(tmp, &peer_ni->ksnp_conns) {
-                       conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+               list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
                        if (conn2->ksnc_conn_cb == conn_cb &&
                            conn2->ksnc_type == conn->ksnc_type)
-                               break;
-
-                       conn2 = NULL;
+                               goto conn2_found;
                }
-               if (conn2 == NULL)
-                       conn_cb->ksnr_connected &= ~BIT(conn->ksnc_type);
-
+               conn_cb->ksnr_connected &= ~BIT(conn->ksnc_type);
+conn2_found:
                conn->ksnc_conn_cb = NULL;
 
                /* drop conn's ref on conn_cb */
@@ -1323,7 +1311,8 @@ ksocknal_finalize_zcreq(struct ksock_conn *conn)
 
        spin_lock(&peer_ni->ksnp_lock);
 
-       list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
+       list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list,
+                                tx_zc_list) {
                if (tx->tx_conn != conn)
                        continue;
 
@@ -1591,7 +1580,6 @@ ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
 {
        int index;
        int i;
-       struct list_head *tmp;
        struct ksock_conn *conn;
 
         for (index = 0; ; index++) {
@@ -1600,10 +1588,8 @@ ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
                 i = 0;
                 conn = NULL;
 
-               list_for_each(tmp, &peer_ni->ksnp_conns) {
+               list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
                         if (i++ == index) {
-                               conn = list_entry(tmp, struct ksock_conn,
-                                                 ksnc_list);
                                 ksocknal_conn_addref(conn);
                                 break;
                         }
@@ -1611,7 +1597,7 @@ ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
 
                read_unlock(&ksocknal_data.ksnd_global_lock);
 
-                if (conn == NULL)
+               if (i <= index)
                         break;
 
                 ksocknal_lib_push_conn (conn);
index c0d626a..24889b6 100644 (file)
@@ -674,16 +674,14 @@ ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
 struct ksock_conn *
 ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
 {
-       struct list_head *tmp;
+       struct ksock_conn *c;
        struct ksock_conn *conn;
        struct ksock_conn *typed = NULL;
        struct ksock_conn *fallback = NULL;
        int tnob = 0;
        int fnob = 0;
 
-       list_for_each(tmp, &peer_ni->ksnp_conns) {
-               struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
-                                                 ksnc_list);
+       list_for_each_entry(c, &peer_ni->ksnp_conns, ksnc_list) {
                int nob = atomic_read(&c->ksnc_tx_nob) +
                          c->ksnc_sock->sk->sk_wmem_queued;
                int rc;
@@ -2303,14 +2301,11 @@ ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
 {
         /* We're called with a shared lock on ksnd_global_lock */
        struct ksock_conn *conn;
-       struct list_head *ctmp;
        struct ksock_tx *tx;
 
-       list_for_each(ctmp, &peer_ni->ksnp_conns) {
+       list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
                int error;
 
-               conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
-
                 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
                 LASSERT (!conn->ksnc_closing);
 
index 49267c0..533e8bb 100644 (file)
@@ -63,8 +63,7 @@ ksocknal_next_tx_carrier(struct ksock_conn *conn)
                 /* no more packets queued */
                 conn->ksnc_tx_carrier = NULL;
         } else {
-               conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
-                                                  struct ksock_tx, tx_list);
+               conn->ksnc_tx_carrier = list_next_entry(tx, tx_list);
                LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type ==
                        tx->tx_msg.ksm_type);
         }
@@ -422,8 +421,8 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
 
        spin_lock(&peer_ni->ksnp_lock);
 
-       list_for_each_entry_safe(tx, tmp,
-                                     &peer_ni->ksnp_zc_req_list, tx_zc_list) {
+       list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list,
+                                tx_zc_list) {
                 __u64 c = tx->tx_msg.ksm_zc_cookies[0];
 
                 if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
index aa8d2e3..f287ae6 100644 (file)
@@ -205,7 +205,10 @@ lnet_accept(struct socket *sock, __u32 magic)
        LASSERT(sizeof(cr) <= 16);              /* not too big for the stack */
 
        rc = lnet_sock_getaddr(sock, true, &peer);
-       LASSERT(rc == 0);                       /* we succeeded before */
+       if (rc != 0) {
+               CERROR("Can't determine new connection's address\n");
+               return rc;
+       }
 
        if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) {
 
index 9ea4719..75cad7e 100644 (file)
@@ -3702,6 +3702,8 @@ lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
        stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
        stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
        stats->hlni_health_value = atomic_read(&ni->ni_healthv);
+       stats->hlni_ping_count = ni->ni_ping_count;
+       stats->hlni_next_ping = ni->ni_next_ping;
 
 unlock:
        lnet_net_unlock(cpt);
index 1c9bd4f..f6ade39 100644 (file)
@@ -376,37 +376,32 @@ lnet_net_alloc(__u32 net_id, struct list_head *net_list)
 static int
 lnet_ni_add_interface(struct lnet_ni *ni, char *iface)
 {
-       int niface = 0;
+       size_t iface_len = strlen(iface) + 1;
 
        if (ni == NULL)
                return -ENOMEM;
 
-       /* Allocate a separate piece of memory and copy
-        * into it the string, so we don't have
-        * a depencency on the tokens string.  This way we
-        * can free the tokens at the end of the function.
-        * The newly allocated ni_interface can be
-        * freed when freeing the NI */
-       if (ni->ni_interface != NULL)
-               niface++;
-
-       if (niface >= 1) {
-               LCONSOLE_ERROR_MSG(0x115, "Too many interfaces "
-                                  "for net %s\n",
-                                  libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
+       if (ni->ni_interface != NULL) {
+               LCONSOLE_ERROR_MSG(0x115, "%s: interface %s already set for net %s: rc = %d\n",
+                                  iface, ni->ni_interface,
+                                  libcfs_net2str(LNET_NIDNET(ni->ni_nid)),
+                                  -EINVAL);
                return -EINVAL;
        }
 
-       LIBCFS_ALLOC(ni->ni_interface,
-                    strlen(iface) + 1);
+       /* Allocate memory for the interface, so the code parsing input into
+        * tokens and adding interfaces can free the input safely.
+        * ni->ni_interface is freed in lnet_ni_free().
+        */
+       LIBCFS_ALLOC(ni->ni_interface, iface_len);
 
        if (ni->ni_interface == NULL) {
-               CERROR("Can't allocate net interface name\n");
+               CERROR("%s: cannot allocate net interface name: rc = %d\n",
+                       iface, -ENOMEM);
                return -ENOMEM;
        }
 
-       strncpy(ni->ni_interface, iface,
-               strlen(iface) + 1);
+       strscpy(ni->ni_interface, iface, iface_len);
 
        return 0;
 }
index 4a86114..d443ae4 100644 (file)
@@ -343,7 +343,6 @@ struct socket *
 lnet_sock_listen(int local_port, int backlog, struct net *ns)
 {
        struct socket *sock;
-       mm_segment_t oldfs;
        int val = 0;
        int rc;
 
@@ -360,11 +359,34 @@ lnet_sock_listen(int local_port, int backlog, struct net *ns)
         * This is the default, but it can be overridden so
         * we force it back.
         */
-       oldfs = get_fs();
-       set_fs(KERNEL_DS);
-       sock->ops->setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
-                             (char __user __force *) &val, sizeof(val));
-       set_fs(oldfs);
+#ifdef HAVE_KERNEL_SETSOCKOPT
+       kernel_setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
+                         (char *) &val, sizeof(val));
+#elif defined(_LINUX_SOCKPTR_H)
+       /* sockptr_t was introduced around v5.8-rc4-1952-ga7b75c5a8c41
+        * and allows a kernel address to be passed to ->setsockopt
+        */
+       if (ipv6_only_sock(sock->sk)) {
+               sockptr_t optval = KERNEL_SOCKPTR(&val);
+               sock->ops->setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
+                                     optval, sizeof(val));
+       }
+#else
+       /* From v5.7-rc6-2614-g5a892ff2facb when kernel_setsockopt()
+        * was removed until sockptr_t (above) there is no clean
+        * way to pass kernel address to setsockopt.  We could use
+        * get_fs()/set_fs(), but in this particular situation there
+        * is an easier way.
+        * It depends on the fact that at least for these few kernels
+        * a NULL address to ipv6_setsockopt() is treated like the address
+        * of a zero.
+        */
+       if (ipv6_only_sock(sock->sk) && !val) {
+               void *optval = NULL;
+               sock->ops->setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
+                               optval, sizeof(val));
+       }
+#endif
 
        rc = kernel_listen(sock, backlog);
        if (rc == 0)
index d70415e..709fe24 100644 (file)
@@ -263,6 +263,7 @@ lnet_peer_alloc(lnet_nid_t nid)
        spin_lock_init(&lp->lp_lock);
        lp->lp_primary_nid = nid;
        lp->lp_disc_src_nid = LNET_NID_ANY;
+       lp->lp_disc_dst_nid = LNET_NID_ANY;
        if (lnet_peers_start_down())
                lp->lp_alive = false;
        else
@@ -2531,6 +2532,7 @@ lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
        spin_lock(&lp->lp_lock);
 
        lp->lp_disc_src_nid = ev->target.nid;
+       lp->lp_disc_dst_nid = ev->source.nid;
 
        /*
         * If some kind of error happened the contents of message
@@ -2563,20 +2565,41 @@ lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
                goto out;
        }
 
-
        /*
         * The peer may have discovery disabled at its end. Set
         * NO_DISCOVERY as appropriate.
         */
-       if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
-           !lnet_peer_discovery_disabled) {
-               CDEBUG(D_NET, "Peer %s has discovery enabled\n",
-                      libcfs_nid2str(lp->lp_primary_nid));
-               lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
-       } else {
+       if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
+           lnet_peer_discovery_disabled) {
                CDEBUG(D_NET, "Peer %s has discovery disabled\n",
                       libcfs_nid2str(lp->lp_primary_nid));
+
+               /* Detect whether this peer has toggled discovery from on to
+                * off and whether we can delete and re-create the peer. Peers
+                * that were manually configured cannot be deleted by discovery.
+                * We need to delete this peer and re-create it if the peer was
+                * not configured manually, is currently considered DD capable,
+                * and either:
+                * 1. We've already discovered the peer (the peer has toggled
+                *    the discovery feature from on to off), or
+                * 2. The peer is considered MR, but it was not user configured
+                *    (this was a "temporary" peer created via the kernel APIs
+                *     that we're discovering for the first time)
+                */
+               if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
+                                     LNET_PEER_NO_DISCOVERY)) &&
+                   (lp->lp_state & (LNET_PEER_DISCOVERED |
+                                    LNET_PEER_MULTI_RAIL))) {
+                       CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
+                              libcfs_nid2str(lp->lp_primary_nid),
+                              lp->lp_state);
+                       lp->lp_state |= LNET_PEER_MARK_DELETION;
+               }
                lp->lp_state |= LNET_PEER_NO_DISCOVERY;
+       } else {
+               CDEBUG(D_NET, "Peer %s has discovery enabled\n",
+                      libcfs_nid2str(lp->lp_primary_nid));
+               lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
        }
 
        /*
@@ -2784,7 +2807,8 @@ static void lnet_discovery_event_handler(struct lnet_event *event)
 
        /* put peer back at end of request queue, if discovery not already
         * done */
-       if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
+       if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
+           lnet_peer_queue_for_discovery(lp)) {
                list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
                wake_up(&the_lnet.ln_dc_waitq);
        }
@@ -3097,7 +3121,7 @@ __must_hold(&lp->lp_lock)
         * of deleting it.
         */
        if (!list_empty(&lp->lp_dc_list))
-               list_del(&lp->lp_dc_list);
+               list_del_init(&lp->lp_dc_list);
        list_for_each_entry_safe(route, tmp,
                                 &lp->lp_routes,
                                 lr_gwlist)
@@ -3237,8 +3261,10 @@ __must_hold(&lp->lp_lock)
                         * received by lp, we need to set the discovery source
                         * NID for new_lp to the NID stored in lp.
                         */
-                       if (lp->lp_disc_src_nid != LNET_NID_ANY)
+                       if (lp->lp_disc_src_nid != LNET_NID_ANY) {
                                new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
+                               new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
+                       }
                        spin_unlock(&new_lp->lp_lock);
                        spin_unlock(&lp->lp_lock);
 
@@ -3288,41 +3314,10 @@ __must_hold(&lp->lp_lock)
        return rc ? rc : LNET_REDISCOVER_PEER;
 }
 
-/*
- * Select NID to send a Ping or Push to.
- */
-static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
-{
-       struct lnet_peer_ni *lpni;
-
-       /* Look for a direct-connected NID for this peer. */
-       lpni = NULL;
-       while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
-               if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
-                       continue;
-               break;
-       }
-       if (lpni)
-               return lpni->lpni_nid;
-
-       /* Look for a routed-connected NID for this peer. */
-       lpni = NULL;
-       while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
-               if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
-                       continue;
-               break;
-       }
-       if (lpni)
-               return lpni->lpni_nid;
-
-       return LNET_NID_ANY;
-}
-
 /* Active side of ping. */
 static int lnet_peer_send_ping(struct lnet_peer *lp)
 __must_hold(&lp->lp_lock)
 {
-       lnet_nid_t pnid;
        int nnis;
        int rc;
        int cpt;
@@ -3334,12 +3329,11 @@ __must_hold(&lp->lp_lock)
        cpt = lnet_net_lock_current();
        /* Refcount for MD. */
        lnet_peer_addref_locked(lp);
-       pnid = lnet_peer_select_nid(lp);
        lnet_net_unlock(cpt);
 
        nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
 
-       rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
+       rc = lnet_send_ping(lp->lp_primary_nid, &lp->lp_ping_mdh, nnis, lp,
                            the_lnet.ln_dc_handler, false);
 
        /*
@@ -3464,18 +3458,17 @@ __must_hold(&lp->lp_lock)
                CERROR("Can't bind push source MD: %d\n", rc);
                goto fail_error;
        }
+
        cpt = lnet_net_lock_current();
        /* Refcount for MD. */
        lnet_peer_addref_locked(lp);
        id.pid = LNET_PID_LUSTRE;
-       id.nid = lnet_peer_select_nid(lp);
+       if (lp->lp_disc_dst_nid != LNET_NID_ANY)
+               id.nid = lp->lp_disc_dst_nid;
+       else
+               id.nid = lp->lp_primary_nid;
        lnet_net_unlock(cpt);
 
-       if (id.nid == LNET_NID_ANY) {
-               rc = -EHOSTUNREACH;
-               goto fail_unlink;
-       }
-
        rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
                     LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
                     LNET_PROTO_PING_MATCHBITS, 0, 0);
@@ -3487,6 +3480,7 @@ __must_hold(&lp->lp_lock)
         * scratch
         */
        lp->lp_disc_src_nid = LNET_NID_ANY;
+       lp->lp_disc_dst_nid = LNET_NID_ANY;
 
        if (rc)
                goto fail_unlink;
@@ -4025,6 +4019,8 @@ int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
                  atomic_read(&lpni->lpni_hstats.hlt_remote_error);
                lpni_hstats->hlpni_health_value =
                  atomic_read(&lpni->lpni_healthv);
+               lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
+               lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
                if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
                        goto out_free_hstats;
                bulk += sizeof(*lpni_hstats);
@@ -4119,7 +4115,7 @@ lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
                        lnet_net_unlock(LNET_LOCK_EX);
                        return;
                }
-               atomic_set(&lpni->lpni_healthv, value);
+               lnet_set_lpni_healthv_locked(lpni, value);
                lnet_peer_ni_add_to_recoveryq_locked(lpni,
                                             &the_lnet.ln_mt_peerNIRecovq, now);
                lnet_peer_ni_decref_locked(lpni);
@@ -4140,7 +4136,8 @@ lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
                        list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
                                list_for_each_entry(lpni, &lpn->lpn_peer_nis,
                                                    lpni_peer_nis) {
-                                       atomic_set(&lpni->lpni_healthv, value);
+                                       lnet_set_lpni_healthv_locked(lpni,
+                                                                    value);
                                        lnet_peer_ni_add_to_recoveryq_locked(lpni,
                                             &the_lnet.ln_mt_peerNIRecovq, now);
                                }
index e371771..dd59a33 100644 (file)
@@ -2250,6 +2250,14 @@ continue_without_udsp_info:
                                                hstats.hlni_local_error)
                                                        == NULL)
                                goto out;
+                       if (cYAML_create_number(yhstats, "ping_count",
+                                               hstats.hlni_ping_count)
+                                                       == NULL)
+                               goto out;
+                       if (cYAML_create_number(yhstats, "next_ping",
+                                               hstats.hlni_next_ping)
+                                                       == NULL)
+                               goto out;
 
 continue_without_msg_stats:
                        tunables = cYAML_create_object(item, "tunables");
@@ -3134,6 +3142,16 @@ continue_without_udsp_info:
                                                hstats->hlpni_network_timeout)
                                                        == NULL)
                                goto out;
+                       if (cYAML_create_number(yhstats, "ping_count",
+                                               hstats->hlpni_ping_count)
+                                                       == NULL)
+                               goto out;
+
+                       if (cYAML_create_number(yhstats, "next_ping",
+                                               hstats->hlpni_next_ping)
+                                                       == NULL)
+                               goto out;
+
                }
        }
 
index 336d249..9dc1399 100755 (executable)
@@ -31,7 +31,7 @@ debug () {
 }
 
 usage() {
-       printf $"Usage: iokit-gather-stats [-help] config_file [start|stop|cleanup] <log_name>\n"
+       printf $"Usage: iokit-gather-stats [--help|-h] config_file [start|stop|cleanup] <log_name>\n"
        if [ x$1 = x-h ]; then
                 printf $"
 The distribution script will run on a single node.  It is parameterised
index ee73a8c..4d7bda5 100644 (file)
@@ -5,7 +5,7 @@ TBD Whamcloud
        * Server primary kernels built and tested during release cycle:
          3.10.0-1062.18.1.el7 (RHEL7.7)
          3.10.0-1127.19.1.el7 (RHEL7.8)
-         3.10.0-1160.21.1.el7 (RHEL7.9)
+         3.10.0-1160.25.1.el7 (RHEL7.9)
        * Other server kernels known to build and work at some point (others may also work):
          3.10.0-862.14.4.el7  (RHEL7.5)
          3.10.0-957.27.2.el7  (RHEL7.6)
@@ -23,11 +23,11 @@ TBD Whamcloud
        * Client primary kernels built and tested during release cycle:
          3.10.0-1062.18.1.el7 (RHEL7.7)
          3.10.0-1127.19.1.el7 (RHEL7.8)
-         3.10.0-1160.21.1.el7 (RHEL7.9)
+         3.10.0-1160.25.1.el7 (RHEL7.9)
          4.12.14-95.48        (SLES12 SP4)
-         4.12.14-122.63       (SLES12 SP5)
+         4.12.14-122.66       (SLES12 SP5)
          4.12.14-197.75       (SLES15 SP1)
-         5.3.18-24.52         (SLES15 SP2)
+         5.3.18-24.61         (SLES15 SP2)
          4.15.0-48            (Ubuntu 18.04)
        * Other clients known to build on these kernels at some point (others may also work):
          3.10.0-862.14.4.el7  (RHEL7.5)
@@ -38,12 +38,13 @@ TBD Whamcloud
          4.18.0-147.8.1.el8   (RHEL8.1)
          4.18.0-193.28.1.el8  (RHEL8.2)
          4.18.0-240.22.1.el8  (RHEL8.3)
+         4.18.0-305.3.1.el8   (RHEL8.4)
          4.4.120-92.70        (SLES12 SP2)
          4.4.180-94.100       (SLES12 SP3)
          4.4.0-131            (Ubuntu 16.04)
          5.4.0-37             (Ubuntu 20.04)
          5.4.0                (vanilla kernel.org)
-       * Recommended e2fsprogs version: 1.45.6.wc5 or newer
+       * Recommended e2fsprogs version: 1.46.2.wc1 or newer
        * Recommended ZFS version: 2.0.0
        * NFS export disabled when stack size < 8192 (32-bit Lustre clients),
          since the NFSv4 export of Lustre filesystem with 4K stack may cause a
index 0c6643b..8598f26 100644 (file)
@@ -2953,6 +2953,7 @@ lustre/doc/Makefile
 lustre/include/Makefile
 lustre/include/lustre/Makefile
 lustre/include/uapi/linux/lustre/Makefile
+lustre/kernel_patches/targets/4.18-rhel8.4.target
 lustre/kernel_patches/targets/4.18-rhel8.3.target
 lustre/kernel_patches/targets/4.18-rhel8.2.target
 lustre/kernel_patches/targets/4.18-rhel8.1.target
index 7598f26..4360dd7 100644 (file)
@@ -26,6 +26,7 @@ lfs-find \- Lustre client utility to list files with specific attributes
 [[\fB!\fR] \fB--name\fR|\fB-n <\fIpattern\fR>]
       [[\fB!\fR] \fB--newer\fR[\fBXY\fR] <\fIreference\fR>]
       [[\fB!\fR] \fB--ost\fR|\fB-O\fR <\fIindex\fR,...>]
+      [[\fB!\fR] \fB--perm\fR [\fB/-\fR]<\fImode\fR> ]
 [[\fB!\fR] \fB--pool\fR <\fIpool\fR>]
 [\fB--print\fR|\fB-P\fR]
       [\fB--print0\fR|\fB-0\fR]
@@ -254,6 +255,16 @@ filesystem only once when migrating objects off multiple OSTs for evacuation
 and replacement using
 .BR lfs-migrate (1).
 .TP
+.BR "--perm \fImode\fR"
+File's permission are exactly \fImode\fR (octal or symbolic).
+.TP
+.BR "--perm /\fImode\fR"
+All of the permission bits \fImode\fR are set for the file.
+.TP
+.BR "--perm -\fImode\fR"
+Any of the permission bits \fImode\fR are set for the file. If no permission
+bits in \fImode\fR are set, this test matches any file.
+.TP
 .BR --pool
 Layout was created with the specified
 .I pool
index cb38515..626b3ef 100644 (file)
@@ -1912,6 +1912,9 @@ struct cl_io {
                        loff_t                   ls_result;
                        int                      ls_whence;
                } ci_lseek;
+               struct cl_misc_io {
+                       time64_t                 lm_next_rpc_time;
+               } ci_misc;
         } u;
         struct cl_2queue     ci_queue;
         size_t               ci_nob;
@@ -2455,6 +2458,11 @@ static inline int cl_io_is_mkwrite(const struct cl_io *io)
        return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
 }
 
+static inline int cl_io_is_fault_writable(const struct cl_io *io)
+{
+       return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_writable;
+}
+
 /**
  * True, iff \a io is a truncate(2).
  */
index 6fe62bc..cf48167 100644 (file)
@@ -60,7 +60,9 @@ void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
 void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
 void lustre_swab_llog_id(struct llog_logid *lid);
 void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+#ifdef HAVE_SERVER_SUPPORT
 void lustre_swab_update_ops(struct update_ops *uops, unsigned int op_count);
+#endif
 void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
 void lustre_swab_cfg_marker(struct cfg_marker *marker,
                            int swab, int size);
index d0ab7e1..2b04a14 100644 (file)
@@ -318,6 +318,7 @@ static inline int opcode_offset(__u32 opc) {
                         OPC_RANGE(LDLM) +
                         OPC_RANGE(MDS) +
                         OPC_RANGE(OST));
+#ifdef HAVE_SERVER_SUPPORT
        } else if (opc < OUT_UPDATE_LAST_OPC) {
                /* update opcode */
                return (opc - OUT_UPDATE_FIRST_OPC +
@@ -345,25 +346,31 @@ static inline int opcode_offset(__u32 opc) {
                        OPC_RANGE(LDLM) +
                        OPC_RANGE(MDS) +
                        OPC_RANGE(OST));
+#endif /* HAVE_SERVER_SUPPORT */
        } else {
                /* Unknown Opcode */
                return -1;
        }
 }
 
+#define LUSTRE_MAX_OPCODES_CLIENT (OPC_RANGE(OST)  + \
+                                  OPC_RANGE(MDS)  + \
+                                  OPC_RANGE(LDLM) + \
+                                  OPC_RANGE(MGS)  + \
+                                  OPC_RANGE(OBD)  + \
+                                  OPC_RANGE(LLOG) + \
+                                  OPC_RANGE(SEC)  + \
+                                  OPC_RANGE(SEQ)  + \
+                                  OPC_RANGE(SEC)  + \
+                                  OPC_RANGE(FLD))
 
-#define LUSTRE_MAX_OPCODES (OPC_RANGE(OST)  + \
-                            OPC_RANGE(MDS)  + \
-                            OPC_RANGE(LDLM) + \
-                            OPC_RANGE(MGS)  + \
-                            OPC_RANGE(OBD)  + \
-                            OPC_RANGE(LLOG) + \
-                            OPC_RANGE(SEC)  + \
-                            OPC_RANGE(SEQ)  + \
-                            OPC_RANGE(SEC)  + \
-                           OPC_RANGE(FLD)  + \
+#ifdef HAVE_SERVER_SUPPORT
+#define LUSTRE_MAX_OPCODES (LUSTRE_MAX_OPCODES_CLIENT + \
                            OPC_RANGE(OUT_UPDATE) + \
                            OPC_RANGE(LFSCK))
+#else
+#define LUSTRE_MAX_OPCODES LUSTRE_MAX_OPCODES_CLIENT
+#endif
 
 #define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR)  + \
                             OPC_RANGE(EXTRA))
index 884d79e..4081923 100644 (file)
@@ -1528,12 +1528,12 @@ struct lu_tgt_pool {
        struct rw_semaphore op_rw_sem;  /* to protect lu_tgt_pool use */
 };
 
-int tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
-int tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count);
-int tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
-int tgt_pool_free(struct lu_tgt_pool *op);
-int tgt_check_index(int idx, struct lu_tgt_pool *osts);
-int tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
+int lu_tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
+int lu_tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count);
+int lu_tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
+int lu_tgt_pool_free(struct lu_tgt_pool *op);
+int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts);
+int lu_tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
 
 /* bitflags used in rr / qos allocation */
 enum lq_flag {
@@ -1588,6 +1588,12 @@ struct lu_tgt_qos {
 };
 
 /* target descriptor */
+#define LOV_QOS_DEF_THRESHOLD_RR_PCT   17
+#define LMV_QOS_DEF_THRESHOLD_RR_PCT   5
+
+#define LOV_QOS_DEF_PRIO_FREE          90
+#define LMV_QOS_DEF_PRIO_FREE          90
+
 struct lu_tgt_desc {
        union {
                struct dt_device        *ltd_tgt;
index 4d7c299..a938900 100644 (file)
@@ -166,6 +166,8 @@ int llapi_file_open_pool(const char *name, int flags, int mode,
                         unsigned long long stripe_size, int stripe_offset,
                         int stripe_count, int stripe_pattern, char *pool_name);
 int llapi_poollist(const char *name);
+int llapi_get_poolbuf(const char *name, char **buf,
+                     char ***poolist, int *poolcount);
 int llapi_get_poollist(const char *name, char **poollist, int list_size,
                       char *buffer, int buffer_size);
 int llapi_get_poolmembers(const char *poolname, char **members, int list_size,
@@ -221,12 +223,20 @@ enum {
        NEWERXY_MAX,
 };
 
+enum lfs_find_perm {
+       LFS_FIND_PERM_EXACT = -2,
+       LFS_FIND_PERM_ANY   = -1,
+       LFS_FIND_PERM_OFF   =  0,
+       LFS_FIND_PERM_ALL   =  1,
+};
+
 struct find_param {
        unsigned int             fp_max_depth;
        dev_t                    fp_dev;
        mode_t                   fp_type; /* S_IFIFO,... */
        uid_t                    fp_uid;
        gid_t                    fp_gid;
+       mode_t                   fp_perm;
        time_t                   fp_atime;
        time_t                   fp_mtime;
        time_t                   fp_ctime;
@@ -248,7 +258,7 @@ struct find_param {
                                 fp_mdt_count_sign:2,
                                 fp_blocks_sign:2,
                                 fp_ext_size_sign:2,
-                                fp_unused1_sign:2, /* Fields available to use*/
+                                fp_perm_sign:2,
                                 fp_unused2_sign:2, /* Once used we must add  */
                                 fp_unused3_sign:2, /* a separate flag field  */
                                 fp_unused4_sign:2; /* at end of the struct.  */
@@ -314,12 +324,11 @@ struct find_param {
                                 fp_lazy:1,
                                 fp_newerxy:1,
                                 fp_exclude_btime:1,
-                                fp_unused_bit3:1, /* All of these unused bit */
-                                fp_unused_bit4:1, /* fields available to use.*/
-                                fp_unused_bit5:1, /* Once all unused fields  */
-                                fp_unused_bit6:1, /* are used we need to add */
-                                fp_unused_bit7:1; /* a separate flag field at*/
-                                                  /* the end of the struct.  */
+                                fp_exclude_perm:1,
+                                fp_unused_bit4:1, /* Once all unused fields  */
+                                fp_unused_bit5:1, /* are used we need to add */
+                                fp_unused_bit6:1, /* a separate flag field at*/
+                                fp_unused_bit7:1; /* the end of the struct.  */
 
        enum llapi_layout_verbose fp_verbose;
        int                      fp_quiet;
@@ -837,6 +846,10 @@ int llapi_layout_stripe_count_get(const struct llapi_layout *layout,
  */
 int llapi_layout_stripe_count_set(struct llapi_layout *layout, uint64_t count);
 
+/**
+ * Check if the stripe count \a stripe_count \a is valid.
+ */
+bool llapi_layout_stripe_count_is_valid(int64_t stripe_count);
 /******************** Stripe Size ********************/
 
 /**
@@ -968,7 +981,7 @@ int llapi_layout_pool_name_get(const struct llapi_layout *layout,
  * \retval -1  Invalid argument, errno set to EINVAL.
  */
 int llapi_layout_pool_name_set(struct llapi_layout *layout,
-                             const char *pool_name);
+                              char *pool_name);
 
 /******************** File Creation ********************/
 
@@ -1030,7 +1043,7 @@ int llapi_layout_file_create(const char *path, int open_flags, int mode,
 int llapi_layout_flags_set(struct llapi_layout *layout, uint32_t flags);
 int llapi_layout_flags_get(struct llapi_layout *layout, uint32_t *flags);
 const char *llapi_layout_flags_string(uint32_t flags);
-const __u16 llapi_layout_string_flags(char *string);
+__u16 llapi_layout_string_flags(char *string);
 
 /**
  * llapi_layout_mirror_count_get() - Get mirror count from the header of
@@ -1189,7 +1202,8 @@ int llapi_mirror_punch(int fd, unsigned int id, off_t start, size_t length);
 int llapi_heat_get(int fd, struct lu_heat *heat);
 int llapi_heat_set(int fd, __u64 flags);
 
-int llapi_layout_sanity(struct llapi_layout *layout, bool incomplete, bool flr);
+int llapi_layout_sanity(struct llapi_layout *layout, const char *fname,
+                       bool incomplete, bool flr);
 void llapi_layout_sanity_perror(int error);
 int llapi_layout_dom_size(struct llapi_layout *layout, uint64_t *size);
 
index b5a704b..ac80bde 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/workqueue.h>
 #include <linux/blkdev.h>
 #include <linux/slab.h>
+#include <linux/security.h>
 
 #include <libcfs/linux/linux-fs.h>
 #include <obd_support.h>
@@ -594,4 +595,16 @@ static inline bool is_root_inode(struct inode *inode)
 #define register_shrinker(_s) (register_shrinker(_s), 0)
 #endif
 
+static inline void ll_security_release_secctx(char *secdata, u32 seclen)
+{
+#ifdef HAVE_SEC_RELEASE_SECCTX_1ARG
+       struct lsmcontext context = { };
+
+       lsmcontext_init(&context, secdata, seclen, 0);
+       return security_release_secctx(&context);
+#else
+       return security_release_secctx(secdata, seclen);
+#endif
+}
+
 #endif /* _LUSTRE_COMPAT_H */
index e87d7ee..b7dc9d0 100644 (file)
@@ -362,8 +362,10 @@ int lustre_put_lsi(struct super_block *sb);
 int lmd_parse(char *options, struct lustre_mount_data *lmd);
 
 /* mgc_request.c */
-int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type);
-int mgc_logname2resid(char *fsname, struct ldlm_res_id *res_id, int type);
+int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id,
+                    enum mgs_cfg_type type);
+int mgc_logname2resid(char *fsname, struct ldlm_res_id *res_id,
+                     enum mgs_cfg_type type);
 
 /** @} disk */
 
index fccb5ff..e82b847 100644 (file)
@@ -306,6 +306,13 @@ static inline int fid_is_name_llog(const struct lu_fid *fid)
        return fid_seq(fid) == FID_SEQ_LLOG_NAME;
 }
 
+static inline int fid_seq_in_fldb(u64 seq)
+{
+       return fid_seq_is_igif(seq) || fid_seq_is_norm(seq) ||
+              fid_seq_is_root(seq) || fid_seq_is_dot(seq);
+}
+
+#ifdef HAVE_SERVER_SUPPORT
 static inline int fid_is_namespace_visible(const struct lu_fid *fid)
 {
        const __u64 seq = fid_seq(fid);
@@ -317,12 +324,6 @@ static inline int fid_is_namespace_visible(const struct lu_fid *fid)
               fid_is_root(fid) || fid_seq_is_dot(seq);
 }
 
-static inline int fid_seq_in_fldb(__u64 seq)
-{
-       return fid_seq_is_igif(seq) || fid_seq_is_norm(seq) ||
-              fid_seq_is_root(seq) || fid_seq_is_dot(seq);
-}
-
 static inline void ost_layout_cpu_to_le(struct ost_layout *dst,
                                        const struct ost_layout *src)
 {
@@ -374,6 +375,7 @@ static inline void filter_fid_le_to_cpu(struct filter_fid *dst,
 
        /* XXX: Add more if filter_fid is enlarged in the future. */
 }
+#endif /* HAVE_SERVER_SUPPORT */
 
 static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx)
 {
index 3bf6e2b..422a17c 100644 (file)
@@ -61,8 +61,9 @@ void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen,
 int linkea_entry_pack(struct link_ea_entry *lee, const struct lu_name *lname,
                      const struct lu_fid *pfid);
 int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname,
-                  const struct lu_fid *pfid);
-void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname);
+                  const struct lu_fid *pfid, bool err_on_overflow);
+void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname,
+                   bool is_encrypted);
 int linkea_links_new(struct linkea_data *ldata, struct lu_buf *buf,
                     const struct lu_name *cname, const struct lu_fid *pfid);
 int linkea_overflow_shrink(struct linkea_data *ldata);
index 811da36..6f302b3 100644 (file)
@@ -116,16 +116,20 @@ lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
 
 static inline void lsm_md_dump(int mask, const struct lmv_stripe_md *lsm)
 {
+       bool valid_hash = lmv_dir_bad_hash(lsm);
        int i;
 
        /* If lsm_md_magic == LMV_MAGIC_FOREIGN pool_name may not be a null
         * terminated string so only print LOV_MAXPOOLNAME bytes.
         */
        CDEBUG(mask,
-              "magic %#x stripe count %d master mdt %d hash type %#x max inherit %hhu version %d migrate offset %d migrate hash %#x pool %.*s\n",
+              "magic %#x stripe count %d master mdt %d hash type %s:%#x max inherit %hhu version %d migrate offset %d migrate hash %#x pool %.*s\n",
               lsm->lsm_md_magic, lsm->lsm_md_stripe_count,
-              lsm->lsm_md_master_mdt_index, lsm->lsm_md_hash_type,
-              lsm->lsm_md_max_inherit, lsm->lsm_md_layout_version,
+              lsm->lsm_md_master_mdt_index,
+              valid_hash ? "invalid hash" :
+                           mdt_hash_name[lsm->lsm_md_hash_type & (LMV_HASH_TYPE_MAX - 1)],
+              lsm->lsm_md_hash_type, lsm->lsm_md_max_inherit,
+              lsm->lsm_md_layout_version,
               lsm->lsm_md_migrate_offset, lsm->lsm_md_migrate_hash,
               LOV_MAXPOOLNAME, lsm->lsm_md_pool_name);
 
@@ -403,6 +407,17 @@ static inline bool lmv_user_magic_supported(__u32 lum_magic)
               lum_magic == LMV_MAGIC_FOREIGN;
 }
 
+#define LMV_DEBUG(mask, lmv, msg)                                      \
+       CDEBUG(mask,                                                    \
+              "%s LMV: magic=%#x count=%u index=%u hash=%s:%#x version=%u migrate offset=%u migrate hash=%s:%u.\n",\
+              msg, (lmv)->lmv_magic, (lmv)->lmv_stripe_count,          \
+              (lmv)->lmv_master_mdt_index,                             \
+              mdt_hash_name[(lmv)->lmv_hash_type & (LMV_HASH_TYPE_MAX - 1)],\
+              (lmv)->lmv_hash_type, (lmv)->lmv_layout_version,         \
+              (lmv)->lmv_migrate_offset,                               \
+              mdt_hash_name[(lmv)->lmv_migrate_hash & (LMV_HASH_TYPE_MAX - 1)],\
+              (lmv)->lmv_migrate_hash)
+
 /* master LMV is sane */
 static inline bool lmv_is_sane(const struct lmv_mds_md_v1 *lmv)
 {
index 466e484..ed068b4 100644 (file)
@@ -943,6 +943,10 @@ struct ptlrpc_srv_req {
 #define rq_user_desc           rq_srv.sr_user_desc
 #define rq_ops                 rq_srv.sr_ops
 #define rq_rqbd                        rq_srv.sr_rqbd
+#define rq_reqmsg              rq_pill.rc_reqmsg
+#define rq_repmsg              rq_pill.rc_repmsg
+#define rq_req_swab_mask       rq_pill.rc_req_swab_mask
+#define rq_rep_swab_mask       rq_pill.rc_rep_swab_mask
 
 /**
  * Represents remote procedure call.
@@ -1029,16 +1033,14 @@ struct ptlrpc_request {
         int rq_replen;
        /** Pool if request is from preallocated list */
        struct ptlrpc_request_pool      *rq_pool;
-       /** Request message - what client sent */
-       struct lustre_msg *rq_reqmsg;
-        /** Reply message - server response */
-        struct lustre_msg *rq_repmsg;
         /** Transaction number */
         __u64 rq_transno;
         /** xid */
         __u64                           rq_xid;
        /** bulk match bits */
        __u64                            rq_mbits;
+       /** reply match bits */
+       __u64                            rq_rep_mbits;
        /**
         * List item to for replay list. Not yet committed requests get linked
         * there.
@@ -1101,10 +1103,6 @@ struct ptlrpc_request {
        unsigned int                     rq_reply_off;
        /** @} */
 
-       /** Fields that help to see if request and reply were swabbed or not */
-       __u32                            rq_req_swab_mask;
-       __u32                            rq_rep_swab_mask;
-
        /** how many early replies (for stats) */
        int                              rq_early_count;
        /** Server-side, export on which request was received */
@@ -1184,62 +1182,6 @@ static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
 /** @} nrs */
 
 /**
- * Returns true if request buffer at offset \a index was already swabbed
- */
-static inline bool lustre_req_swabbed(struct ptlrpc_request *req, size_t index)
-{
-       LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
-       return req->rq_req_swab_mask & BIT(index);
-}
-
-/**
- * Returns true if request reply buffer at offset \a index was already swabbed
- */
-static inline bool lustre_rep_swabbed(struct ptlrpc_request *req, size_t index)
-{
-       LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
-       return req->rq_rep_swab_mask & BIT(index);
-}
-
-/**
- * Returns true if request needs to be swabbed into local cpu byteorder
- */
-static inline bool ptlrpc_req_need_swab(struct ptlrpc_request *req)
-{
-       return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
-}
-
-/**
- * Returns true if request reply needs to be swabbed into local cpu byteorder
- */
-static inline bool ptlrpc_rep_need_swab(struct ptlrpc_request *req)
-{
-       return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
-}
-
-/**
- * Mark request buffer at offset \a index that it was already swabbed
- */
-static inline void lustre_set_req_swabbed(struct ptlrpc_request *req,
-                                         size_t index)
-{
-       LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
-       LASSERT((req->rq_req_swab_mask & BIT(index)) == 0);
-       req->rq_req_swab_mask |= BIT(index);
-}
-
-/**
- * Mark request reply buffer at offset \a index that it was already swabbed
- */
-static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req,
-                                         size_t index)
-{
-       LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
-       LASSERT((req->rq_rep_swab_mask & BIT(index)) == 0);
-       req->rq_rep_swab_mask |= BIT(index);
-}
-
-/**
  * Convert numerical request phase value \a phase into text string description
  */
 static inline const char *
@@ -2306,10 +2248,6 @@ int ptlrpc_reconnect_import(struct obd_import *imp);
                                 MDS_REG_MAXREQSIZE : OUT_MAXREQSIZE)
 #define PTLRPC_MAX_BUFLEN      (OST_IO_MAXREQSIZE > MD_MAX_BUFLEN ? \
                                 OST_IO_MAXREQSIZE : MD_MAX_BUFLEN)
-bool ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
-                         __u32 index);
-void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
-                           __u32 index);
 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
 
index 9bcb313..c4de03a 100644 (file)
@@ -621,12 +621,12 @@ static inline void osc_io_unplug(const struct lu_env *env,
 }
 
 typedef bool (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
-                                 struct osc_page *, void *);
+                                 void**, int, void *);
 bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
                          struct osc_object *osc, pgoff_t start, pgoff_t end,
                          osc_page_gang_cbt cb, void *cbdata);
 bool osc_discard_cb(const struct lu_env *env, struct cl_io *io,
-                   struct osc_page *ops, void *cbdata);
+                   void**, int, void *cbdata);
 
 /* osc_dev.c */
 int osc_device_init(const struct lu_env *env, struct lu_device *d,
index 38fed78..57c74aa 100644 (file)
@@ -62,9 +62,16 @@ enum req_location {
 
 struct req_capsule {
         struct ptlrpc_request   *rc_req;
-        const struct req_format *rc_fmt;
-        enum req_location        rc_loc;
-        __u32                    rc_area[RCL_NR][REQ_MAX_FIELD_NR];
+       /** Request message - what client sent */
+       struct lustre_msg       *rc_reqmsg;
+       /** Reply message - server response */
+       struct lustre_msg       *rc_repmsg;
+       /** Fields that help to see if request and reply were swabved or not */
+       __u32                    rc_req_swab_mask;
+       __u32                    rc_rep_swab_mask;
+       const struct req_format *rc_fmt;
+       enum req_location        rc_loc;
+       __u32                    rc_area[RCL_NR][REQ_MAX_FIELD_NR];
 };
 
 void req_capsule_init(struct req_capsule *pill, struct ptlrpc_request *req,
@@ -125,6 +132,69 @@ void req_capsule_shrink(struct req_capsule *pill,
 int req_capsule_server_grow(struct req_capsule *pill,
                            const struct req_msg_field *field,
                            __u32 newlen);
+bool req_capsule_need_swab(struct req_capsule *pill, enum req_location loc,
+                          __u32 index);
+void req_capsule_set_swabbed(struct req_capsule *pill, enum req_location loc,
+                            __u32 index);
+
+/**
+ * Returns true if request buffer at offset \a index was already swabbed
+ */
+static inline bool req_capsule_req_swabbed(struct req_capsule *pill,
+                                          size_t index)
+{
+       LASSERT(index < sizeof(pill->rc_req_swab_mask) * 8);
+       return pill->rc_req_swab_mask & BIT(index);
+}
+
+/**
+ * Returns true if request reply buffer at offset \a index was already swabbed
+ */
+static inline bool req_capsule_rep_swabbed(struct req_capsule *pill,
+                                          size_t index)
+{
+       LASSERT(index < sizeof(pill->rc_rep_swab_mask) * 8);
+       return pill->rc_rep_swab_mask & BIT(index);
+}
+
+/**
+ * Returns true if request needs to be swabbed into local cpu byteorder
+ */
+static inline bool req_capsule_req_need_swab(struct req_capsule *pill)
+{
+       return req_capsule_req_swabbed(pill, MSG_PTLRPC_HEADER_OFF);
+}
+
+/**
+ * Returns true if request reply needs to be swabbed into local cpu byteorder
+ */
+static inline bool req_capsule_rep_need_swab(struct req_capsule *pill)
+{
+       return req_capsule_rep_swabbed(pill, MSG_PTLRPC_HEADER_OFF);
+}
+
+/**
+ * Mark request buffer at offset \a index that it was already swabbed
+ */
+static inline void req_capsule_set_req_swabbed(struct req_capsule *pill,
+                                              size_t index)
+{
+       LASSERT(index < sizeof(pill->rc_req_swab_mask) * 8);
+       LASSERT((pill->rc_req_swab_mask & BIT(index)) == 0);
+       pill->rc_req_swab_mask |= BIT(index);
+}
+
+/**
+ * Mark request reply buffer at offset \a index that it was already swabbed
+ */
+static inline void req_capsule_set_rep_swabbed(struct req_capsule *pill,
+                                              size_t index)
+{
+       LASSERT(index < sizeof(pill->rc_rep_swab_mask) * 8);
+       LASSERT((pill->rc_rep_swab_mask & BIT(index)) == 0);
+       pill->rc_rep_swab_mask |= BIT(index);
+}
+
 int  req_layout_init(void);
 void req_layout_fini(void);
 #ifdef HAVE_SERVER_SUPPORT
index 7db43f6..2e9d9f5 100644 (file)
 
 #include <uapi/linux/lustre/lustre_idl.h>
 
+#ifdef HAVE_SERVER_SUPPORT
 void lustre_swab_orphan_ent(struct lu_orphan_ent *ent);
 void lustre_swab_orphan_ent_v2(struct lu_orphan_ent_v2 *ent);
 void lustre_swab_orphan_ent_v3(struct lu_orphan_ent_v3 *ent);
+void lustre_swab_gl_lquota_desc(struct ldlm_gl_lquota_desc *desc);
+void lustre_swab_gl_barrier_desc(struct ldlm_gl_barrier_desc *desc);
+void lustre_swab_object_update(struct object_update *ou);
+int lustre_swab_object_update_request(struct object_update_request *our,
+                                     __u32 len);
+void lustre_swab_out_update_header(struct out_update_header *ouh);
+void lustre_swab_out_update_buffer(struct out_update_buffer *oub);
+void lustre_swab_object_update_result(struct object_update_result *our);
+int lustre_swab_object_update_reply(struct object_update_reply *our, __u32 len);
+#endif /* HAVE_SERVER_SUPPORT */
 void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
 void lustre_swab_connect(struct obd_connect_data *ocd);
 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
@@ -76,8 +87,6 @@ void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
 void lustre_swab_lov_desc(struct lov_desc *ld);
 void lustre_swab_ldlm_res_id(struct ldlm_res_id *id);
 void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d);
-void lustre_swab_gl_lquota_desc(struct ldlm_gl_lquota_desc *);
-void lustre_swab_gl_barrier_desc(struct ldlm_gl_barrier_desc *);
 void lustre_swab_ldlm_intent(struct ldlm_intent *i);
 void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r);
 void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l);
@@ -111,13 +120,6 @@ void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
 void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
 void lustre_swab_hsm_request(struct hsm_request *hr);
-void lustre_swab_object_update(struct object_update *ou);
-int lustre_swab_object_update_request(struct object_update_request *our,
-                                     __u32 len);
-void lustre_swab_out_update_header(struct out_update_header *ouh);
-void lustre_swab_out_update_buffer(struct out_update_buffer *oub);
-void lustre_swab_object_update_result(struct object_update_result *our);
-int lustre_swab_object_update_reply(struct object_update_reply *our, __u32 len);
 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
 void lustre_swab_close_data(struct close_data *data);
 void lustre_swab_close_data_resync_done(struct close_data_resync_done *resync);
index 2d762b8..abab785 100644 (file)
@@ -173,8 +173,6 @@ struct md_op_spec {
                     sp_permitted:1, /* do not check permission */
                     sp_migrate_close:1, /* close the file during migrate */
                     sp_migrate_nsonly:1; /* migrate dirent only */
-       /** Current lock mode for parent dir where create is performing. */
-       mdl_mode_t sp_cr_mode;
 
        /** to create directory */
        const struct dt_index_features *sp_feat;
index ed93ccf..e4daa4c 100644 (file)
@@ -45,6 +45,7 @@
 #ifdef HAVE_SERVER_SUPPORT
 # include <lu_target.h>
 # include <obd_target.h>
+# include <lustre_quota.h>
 #endif
 #include <lu_ref.h>
 #include <lustre_export.h>
@@ -53,7 +54,6 @@
 #include <lustre_handles.h>
 #include <lustre_intent.h>
 #include <lvfs.h>
-#include <lustre_quota.h>
 
 #define MAX_OBD_DEVICES 8192
 
@@ -1175,7 +1175,7 @@ struct md_ops {
 
        int (*m_init_ea_size)(struct obd_export *, __u32, __u32);
 
-       int (*m_get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
+       int (*m_get_lustre_md)(struct obd_export *, struct req_capsule *,
                               struct obd_export *, struct obd_export *,
                               struct lustre_md *);
 
index b1fc854..093682f 100644 (file)
@@ -261,7 +261,7 @@ struct config_llog_data {
        struct config_llog_data    *cld_barrier;/* barrier log (for MDT only) */
        struct obd_export          *cld_mgcexp;
        struct mutex                cld_lock;
-       int                         cld_type;
+       enum mgs_cfg_type           cld_type;
        unsigned int                cld_stopping:1, /* we were told to stop
                                                     * watching */
                                    cld_lostlock:1; /* lock not requeued */
@@ -1046,13 +1046,14 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
                             struct obd_statfs *osfs, time64_t max_age,
                             __u32 flags)
 {
-       struct obd_device *obd = exp->exp_obd;
+       struct obd_device *obd;
        int rc = 0;
 
        ENTRY;
-       if (unlikely(obd == NULL))
+       if (unlikely(exp == NULL || exp->exp_obd == NULL))
                RETURN(-EINVAL);
 
+       obd = exp->exp_obd;
        OBD_CHECK_DEV_ACTIVE(obd);
 
        if (unlikely(!obd->obd_type || !obd->obd_type->typ_dt_ops->o_statfs)) {
@@ -1570,10 +1571,10 @@ static inline int md_unlink(struct obd_export *exp, struct md_op_data *op_data,
 }
 
 static inline int md_get_lustre_md(struct obd_export *exp,
-                                   struct ptlrpc_request *req,
-                                   struct obd_export *dt_exp,
-                                   struct obd_export *md_exp,
-                                   struct lustre_md *md)
+                                  struct req_capsule *pill,
+                                  struct obd_export *dt_exp,
+                                  struct obd_export *md_exp,
+                                  struct lustre_md *md)
 {
        int rc;
 
@@ -1581,7 +1582,7 @@ static inline int md_get_lustre_md(struct obd_export *exp,
        if (rc)
                return rc;
 
-       return MDP(exp->exp_obd, get_lustre_md)(exp, req, dt_exp, md_exp, md);
+       return MDP(exp->exp_obd, get_lustre_md)(exp, pill, dt_exp, md_exp, md);
 }
 
 static inline int md_free_lustre_md(struct obd_export *exp,
@@ -1846,6 +1847,21 @@ int class_check_uuid(struct obd_uuid *uuid, __u64 nid);
 /* class_obd.c */
 extern char obd_jobid_name[];
 
+extern unsigned int obd_lbug_on_eviction;
+extern unsigned int obd_dump_on_eviction;
+
+static inline bool do_dump_on_eviction(struct obd_device *exp_obd)
+{
+       if (obd_lbug_on_eviction &&
+           strncmp(exp_obd->obd_type->typ_name, LUSTRE_MGC_NAME,
+                   strlen(LUSTRE_MGC_NAME))) {
+               CERROR("LBUG upon eviction\n");
+               LBUG();
+       }
+
+       return obd_dump_on_eviction;
+}
+
 /* statfs_pack.c */
 struct kstatfs;
 void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
index 9dde5f7..29dafee 100644 (file)
@@ -245,6 +245,7 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_MDS_REINT_OPEN                 0x169
 #define OBD_FAIL_MDS_REINT_OPEN2        0x16a
 #define OBD_FAIL_MDS_COMMITRW_DELAY     0x16b
+#define OBD_FAIL_MDS_CHANGELOG_DEL      0x16c
 
 /* layout lock */
 #define OBD_FAIL_MDS_NO_LL_GETATTR      0x170
@@ -457,6 +458,7 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_PTLRPC_CONNECT_RACE    0x531
 #define OBD_FAIL_NET_ERROR_RPC          0x532
 #define OBD_FAIL_PTLRPC_IDLE_RACE       0x533
+#define OBD_FAIL_PTLRPC_ENQ_RESEND      0x534
 
 #define OBD_FAIL_OBD_PING_NET            0x600
 /*     OBD_FAIL_OBD_LOG_CANCEL_NET      0x601 obsolete since 1.5 */
@@ -595,6 +597,7 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_LLITE_SHORT_COMMIT                0x1415
 #define OBD_FAIL_LLITE_CREATE_FILE_PAUSE2          0x1416
 #define OBD_FAIL_LLITE_RACE_MOUNT                  0x1417
+#define OBD_FAIL_LLITE_PAGE_ALLOC                  0x1418
 
 #define OBD_FAIL_FID_INDIR     0x1501
 #define OBD_FAIL_FID_INLMA     0x1502
@@ -973,6 +976,7 @@ do {                                                                          \
 #define KEY_IS(str) \
         (keylen >= (sizeof(str)-1) && memcmp(key, str, (sizeof(str)-1)) == 0)
 
+#ifdef HAVE_SERVER_SUPPORT
 /* LUSTRE_LMA_FL_MASKS defines which flags will be stored in LMA */
 
 static inline int lma_to_lustre_flags(__u32 lma_flags)
@@ -986,6 +990,7 @@ static inline int lustre_to_lma_flags(__u32 la_flags)
        return (((la_flags & LUSTRE_ORPHAN_FL) ? LMAI_ORPHAN : 0) |
                ((la_flags & LUSTRE_ENCRYPT_FL) ? LMAI_ENCRYPT : 0));
 }
+#endif /* HAVE_SERVER_SUPPORT */
 
 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
  * for the client inode i_flags.  The LUSTRE_*_FL are the Lustre wire
index 1218328..de854ee 100644 (file)
@@ -850,6 +850,7 @@ struct ptlrpc_body_v2 {
 #define OBD_CONNECT2_GETATTR_PFID      0x20000ULL /* pack parent FID in getattr */
 #define OBD_CONNECT2_LSEEK            0x40000ULL /* SEEK_HOLE/DATA RPC */
 #define OBD_CONNECT2_DOM_LVB          0x80000ULL /* pack DOM glimpse data in LVB */
+#define OBD_CONNECT2_REP_MBITS         0x100000ULL /* match reply by mbits, not xid */
 /* XXX README XXX:
  * Please DO NOT add flag values here before first ensuring that this same
  * flag value is not in use on some other branch.  Please clear any such
@@ -909,7 +910,8 @@ struct ptlrpc_body_v2 {
                                OBD_CONNECT2_CRUSH | \
                                OBD_CONNECT2_ENCRYPT | \
                                OBD_CONNECT2_GETATTR_PFID |\
-                               OBD_CONNECT2_LSEEK | OBD_CONNECT2_DOM_LVB)
+                               OBD_CONNECT2_LSEEK | OBD_CONNECT2_DOM_LVB |\
+                               OBD_CONNECT2_REP_MBITS)
 
 #define OST_CONNECT_SUPPORTED  (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
                                OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
@@ -931,17 +933,19 @@ struct ptlrpc_body_v2 {
                                OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2)
 
 #define OST_CONNECT_SUPPORTED2 (OBD_CONNECT2_LOCKAHEAD | OBD_CONNECT2_INC_XID |\
-                               OBD_CONNECT2_ENCRYPT | OBD_CONNECT2_LSEEK)
+                               OBD_CONNECT2_ENCRYPT | OBD_CONNECT2_LSEEK |\
+                               OBD_CONNECT2_REP_MBITS)
 
-#define ECHO_CONNECT_SUPPORTED (OBD_CONNECT_FID)
-#define ECHO_CONNECT_SUPPORTED2 0
+#define ECHO_CONNECT_SUPPORTED (OBD_CONNECT_FID | OBD_CONNECT_FLAGS2)
+#define ECHO_CONNECT_SUPPORTED2 OBD_CONNECT2_REP_MBITS
 
 #define MGS_CONNECT_SUPPORTED  (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
                                OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
                                OBD_CONNECT_PINGLESS |\
-                               OBD_CONNECT_BULK_MBITS | OBD_CONNECT_BARRIER)
+                               OBD_CONNECT_BULK_MBITS | OBD_CONNECT_BARRIER | \
+                               OBD_CONNECT_FLAGS2)
 
-#define MGS_CONNECT_SUPPORTED2 0
+#define MGS_CONNECT_SUPPORTED2 OBD_CONNECT2_REP_MBITS
 
 /* Features required for this version of the client to work with server */
 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_FID |     \
@@ -2214,14 +2218,6 @@ struct lmv_mds_md_v1 {
        struct lu_fid lmv_stripe_fids[0];       /* FIDs for each stripe */
 };
 
-#define LMV_DEBUG(mask, lmv, msg)                                      \
-       CDEBUG(mask,                                                    \
-              "%s LMV: magic=%#x count=%u index=%u hash=%#x version=%u migrate offset=%u migrate hash=%u.\n",  \
-              msg, (lmv)->lmv_magic, (lmv)->lmv_stripe_count,          \
-              (lmv)->lmv_master_mdt_index, (lmv)->lmv_hash_type,       \
-              (lmv)->lmv_layout_version, (lmv)->lmv_migrate_offset,    \
-              (lmv)->lmv_migrate_hash)
-
 /* stripe count before directory split */
 #define lmv_split_offset       lmv_migrate_offset
 /* stripe count after directory merge */
@@ -2613,20 +2609,20 @@ struct mgs_nidtbl_entry {
         } u;
 };
 
-enum {
-       CONFIG_T_CONFIG  = 0,
-       CONFIG_T_SPTLRPC = 1,
-       CONFIG_T_RECOVER = 2,
-       CONFIG_T_PARAMS  = 3,
-       CONFIG_T_NODEMAP = 4,
-       CONFIG_T_BARRIER = 5,
-       CONFIG_T_MAX
+enum mgs_cfg_type {
+       MGS_CFG_T_CONFIG        = 0,
+       MGS_CFG_T_SPTLRPC       = 1,
+       MGS_CFG_T_RECOVER       = 2,
+       MGS_CFG_T_PARAMS        = 3,
+       MGS_CFG_T_NODEMAP       = 4,
+       MGS_CFG_T_BARRIER       = 5,
+       MGS_CFG_T_MAX
 };
 
 struct mgs_config_body {
        char     mcb_name[MTI_NAME_MAXLEN]; /* logname */
        __u64    mcb_offset;    /* next index of config log to request */
-       __u16    mcb_type;      /* type of log: CONFIG_T_[CONFIG|RECOVER] */
+       __u16    mcb_type;      /* type of log: MGS_CFG_T_[CONFIG|RECOVER] */
        __u8     mcb_nm_cur_pass;
        __u8     mcb_bits;      /* bits unit size of config log */
        __u32    mcb_units;     /* # of units for bulk transfer */
index 308a526..5bd1693 100644 (file)
@@ -718,12 +718,6 @@ struct fsxattr {
 #define LOV_OFFSET_DEFAULT      ((__u16)-1)
 #define LMV_OFFSET_DEFAULT      ((__u32)-1)
 
-#define LOV_QOS_DEF_THRESHOLD_RR_PCT   17
-#define LMV_QOS_DEF_THRESHOLD_RR_PCT    5
-
-#define LOV_QOS_DEF_PRIO_FREE          90
-#define LMV_QOS_DEF_PRIO_FREE          90
-
 static inline bool lov_pattern_supported(__u32 pattern)
 {
        return (pattern & ~LOV_PATTERN_F_RELEASED) == LOV_PATTERN_RAID0 ||
@@ -1010,7 +1004,7 @@ static __attribute__((unused)) const char *mdt_hash_name[] = {
        "crush",
 };
 
-#define LMV_HASH_TYPE_DEFAULT LMV_HASH_TYPE_FNV_1A_64
+#define LMV_HASH_TYPE_DEFAULT LMV_HASH_TYPE_CRUSH
 
 /* Right now only the lower part(0-16bits) of lmv_hash_type is being used,
  * and the higher part will be the flag to indicate the status of object,
@@ -1280,6 +1274,8 @@ static inline __u64 lustre_stoqb(__kernel_size_t space)
 #define LUSTRE_Q_SETQUOTAPOOL  0x800010  /* set user pool quota */
 #define LUSTRE_Q_GETINFOPOOL   0x800011  /* get pool quota info */
 #define LUSTRE_Q_SETINFOPOOL   0x800012  /* set pool quota info */
+#define LUSTRE_Q_GETDEFAULT_POOL       0x800013  /* get default pool quota*/
+#define LUSTRE_Q_SETDEFAULT_POOL       0x800014  /* set default pool quota */
 /* In the current Lustre implementation, the grace time is either the time
  * or the timestamp to be used after some quota ID exceeds the soft limt,
  * 48 bits should be enough, its high 16 bits can be used as quota flags.
@@ -1309,7 +1305,9 @@ static inline __u64 lustre_stoqb(__kernel_size_t space)
        (cmd == LUSTRE_Q_GETQUOTAPOOL ||        \
         cmd == LUSTRE_Q_SETQUOTAPOOL ||        \
         cmd == LUSTRE_Q_SETINFOPOOL ||         \
-        cmd == LUSTRE_Q_GETINFOPOOL)
+        cmd == LUSTRE_Q_GETINFOPOOL ||         \
+        cmd == LUSTRE_Q_SETDEFAULT_POOL ||     \
+        cmd == LUSTRE_Q_GETDEFAULT_POOL)
 
 #define ALLQUOTA 255       /* set all quota */
 static inline const char *qtype_name(int qtype)
index 1d1d43d..73ed8c9 100644 (file)
@@ -1,5 +1,5 @@
 lnxmaj="3.10.0"
-lnxrel="1160.21.1.el7"
+lnxrel="1160.25.1.el7"
 
 KERNEL_SRPM=kernel-${lnxmaj}-${lnxrel}.src.rpm
 SERIES=3.10-rhel7.9.series
index 8e85e3b..75c9990 100644 (file)
@@ -1,10 +1,10 @@
 lnxmaj="4.12"
 lnxmin=".14"
-lnxrel="122.63"
+lnxrel="122.66"
 # use this when there is an "RPM fix" which means that the name of the
 # (source) RPM has been updated but the version of the kernel inside the
 # RPM is not also updated
-rpmfix=".1"
+rpmfix=".2"
 
 # this is the delimeter that goes before the "smp" at the end of the version
 # defaults to empty
index 220f5a0..b86f851 100644 (file)
@@ -1,6 +1,6 @@
 lnxmaj="5.3"
 lnxmin=".18"
-lnxrel="24.52"
+lnxrel="24.61"
 # use this when there is an "RPM fix" which means that the name of the
 # (source) RPM has been updated but the version of the kernel inside the
 # RPM is not also updated
diff --git a/lustre/kernel_patches/targets/4.18-rhel8.4.target.in b/lustre/kernel_patches/targets/4.18-rhel8.4.target.in
new file mode 100644 (file)
index 0000000..d28a956
--- /dev/null
@@ -0,0 +1,26 @@
+lnxmaj="4.18.0"
+lnxrel="305.3.1.el8_4"
+
+KERNEL_SRPM=kernel-${lnxmaj}-${lnxrel}.src.rpm
+SERIES=""
+EXTRA_VERSION=${lnxrel}_lustre.@VERSION@
+LUSTRE_VERSION=@VERSION@
+
+DEVEL_PATH_ARCH_DELIMETER="."
+OFED_VERSION=inkernel
+
+BASE_ARCHS="i686 x86_64 ia64 ppc64"
+BIGMEM_ARCHS=""
+BOOT_ARCHS=""
+JENSEN_ARCHS=""
+#SMP_ARCHS="i686 x86_64 ia64 ppc64"
+# RHEL8 doesn't use smp specific kernels
+SMP_ARCHS=""
+UP_ARCHS=""
+
+for cc in gcc ; do
+    if which $cc >/dev/null 2>/dev/null ; then
+        export CC=$cc
+        break
+    fi
+done
index 29d90b3..0f5a2fb 100644 (file)
@@ -16,7 +16,7 @@ PATCH SERIES FOR SERVER KERNELS:
 3.10-rhel7.6.series     3.10.0-957.27.2.el7  (RHEL 7.6)
 3.10-rhel7.7.series     3.10.0-1062.18.1.el7 (RHEL 7.7)
 3.10-rhel7.8.series     3.10.0-1127.19.1.el7 (RHEL 7.8)
-3.10-rhel7.9.series     3.10.0-1160.21.1.el7 (RHEL 7.9)
+3.10-rhel7.9.series     3.10.0-1160.25.1.el7 (RHEL 7.9)
 4.18-rhel8.series       4.18.0-80.11.2.el8   (RHEL 8.0)
 4.18-rhel8.1.series     4.18.0-147.8.1.el8   (RHEL 8.1)
 4.18-rhel8.2.series     4.18.0-193.28.1.el8  (RHEL 8.2)
index efc80c1..4d37048 100644 (file)
@@ -982,6 +982,7 @@ static int rev_import_flags_update(struct obd_import *revimp,
 
        revimp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
 
+       revimp->imp_connect_data = *data;
        rc = sptlrpc_import_sec_adapt(revimp, req->rq_svc_ctx, &req->rq_flvr);
        if (rc) {
                CERROR("%s: cannot get reverse import %s security: rc = %d\n",
@@ -1747,6 +1748,8 @@ static void target_finish_recovery(struct lu_target *lut)
                              atomic_read(&obd->obd_connected_clients),
                              obd->obd_stale_clients,
                              obd->obd_stale_clients == 1 ? "was" : "were");
+               if (obd->obd_stale_clients && do_dump_on_eviction(obd))
+                       libcfs_debug_dumplog();
        }
 
        ldlm_reprocess_recovery_done(obd->obd_namespace);
index 23bd76f..ebaa261 100644 (file)
@@ -258,7 +258,8 @@ static int expired_lock_main(void *arg)
                                           lock->l_blast_sent,
                                           obd_export_nid2str(export));
                                ldlm_lock_to_ns(lock)->ns_timeouts++;
-                               do_dump++;
+                               if (do_dump_on_eviction(export->exp_obd))
+                                       do_dump++;
                                class_fail_export(export);
                        }
                        class_export_lock_put(export, lock);
@@ -272,7 +273,7 @@ static int expired_lock_main(void *arg)
                }
                spin_unlock_bh(&waiting_locks_spinlock);
 
-               if (do_dump && obd_dump_on_eviction) {
+               if (do_dump) {
                        CERROR("dump the log upon eviction\n");
                        libcfs_debug_dumplog();
                }
index a5725d7..cd25883 100644 (file)
@@ -830,7 +830,7 @@ static void lfsck_linkea_del_buf(struct linkea_data *ldata,
 
                ldata->ld_lee = NULL;
        } else {
-               linkea_del_buf(ldata, lname);
+               linkea_del_buf(ldata, lname, false);
        }
 }
 
@@ -5883,7 +5883,7 @@ nodata:
                                GOTO(stop, rc);
                }
 
-               rc = linkea_add_buf(&ldata, cname, pfid);
+               rc = linkea_add_buf(&ldata, cname, pfid, false);
                if (rc == 0)
                        rc = lfsck_links_write(env, obj, &ldata, handle);
                if (rc != 0)
index c7186c1..7116920 100644 (file)
@@ -238,21 +238,22 @@ void ll_prune_aliases(struct inode *inode)
 }
 
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
-                            struct lookup_intent *it,
-                            struct dentry *de)
+                           struct lookup_intent *it,
+                           struct dentry *de)
 {
-        int rc = 0;
+       int rc = 0;
+
         ENTRY;
 
-        if (!request)
-                RETURN(0);
+       if (!request)
+               RETURN(0);
 
-        if (it_disposition(it, DISP_LOOKUP_NEG))
-                RETURN(-ENOENT);
+       if (it_disposition(it, DISP_LOOKUP_NEG))
+               RETURN(-ENOENT);
 
-        rc = ll_prep_inode(&de->d_inode, request, NULL, it);
+       rc = ll_prep_inode(&de->d_inode, &request->rq_pill, NULL, it);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
index 93c2fc0..95fd4b2 100644 (file)
@@ -444,23 +444,17 @@ static int ll_dir_setdirstripe(struct dentry *dparent, struct lmv_user_md *lump,
            !OBD_FAIL_CHECK(OBD_FAIL_LLITE_NO_CHECK_DEAD))
                RETURN(-ENOENT);
 
+       /* MDS < 2.14 doesn't support 'crush' hash type, and cannot handle
+        * unknown hash if client doesn't set a valid one. switch to fnv_1a_64.
+        */
        if (!(exp_connect_flags2(sbi->ll_md_exp) & OBD_CONNECT2_CRUSH)) {
-               if ((lump->lum_hash_type & LMV_HASH_TYPE_MASK) ==
-                    LMV_HASH_TYPE_CRUSH) {
-                       /* if server doesn't support 'crush' hash type,
-                        * switch to fnv_1a_64.
-                        */
-                       lump->lum_hash_type &= ~LMV_HASH_TYPE_MASK;
-                       lump->lum_hash_type |= LMV_HASH_TYPE_FNV_1A_64;
-               } else if ((lump->lum_hash_type & LMV_HASH_TYPE_MASK) ==
-                    LMV_HASH_TYPE_UNKNOWN) {
-                       /* from 2.14 MDT will choose default hash type if client
-                        * doesn't set a valid one, while old server doesn't
-                        * handle it.
-                        */
-                       lump->lum_hash_type &= ~LMV_HASH_TYPE_MASK;
-                       lump->lum_hash_type |= LMV_HASH_TYPE_DEFAULT;
-               }
+               enum lmv_hash_type type = lump->lum_hash_type &
+                                         LMV_HASH_TYPE_MASK;
+
+               if (type == LMV_HASH_TYPE_CRUSH ||
+                   type == LMV_HASH_TYPE_UNKNOWN)
+                       lump->lum_hash_type = (lump->lum_hash_type ^ type) |
+                                             LMV_HASH_TYPE_FNV_1A_64;
        }
 
        if (unlikely(!lmv_user_magic_supported(cpu_to_le32(lump->lum_magic))))
@@ -517,7 +511,7 @@ static int ll_dir_setdirstripe(struct dentry *dparent, struct lmv_user_md *lump,
 
        CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_SETDIRSTRIPE_PAUSE, cfs_fail_val);
 
-       err = ll_prep_inode(&inode, request, parent->i_sb, NULL);
+       err = ll_prep_inode(&inode, &request->rq_pill, parent->i_sb, NULL);
        if (err)
                GOTO(out_inode, err);
 
@@ -1141,12 +1135,14 @@ int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
        case LUSTRE_Q_SETDEFAULT:
        case LUSTRE_Q_SETQUOTAPOOL:
        case LUSTRE_Q_SETINFOPOOL:
+       case LUSTRE_Q_SETDEFAULT_POOL:
                if (!capable(CAP_SYS_ADMIN))
                        RETURN(-EPERM);
                break;
        case Q_GETQUOTA:
        case LUSTRE_Q_GETDEFAULT:
        case LUSTRE_Q_GETQUOTAPOOL:
+       case LUSTRE_Q_GETDEFAULT_POOL:
                if (check_owner(type, id) &&
                    (!capable(CAP_SYS_ADMIN)))
                        RETURN(-EPERM);
diff --git