add_idmaps() {
local i
local cmd="$LCTL nodemap_add_idmap"
+ local do_proj=true
local rc=0
+ (( $MDS1_VERSION >= $(version_code 2.14.52) )) || do_proj=false
+
echo "Start to add idmaps ..."
for ((i = 0; i < NODEMAP_COUNT; i++)); do
local j
--idmap $client_id:$fs_id; then
rc=$((rc + 1))
fi
+ if $do_proj; then
+ if ! do_facet mgs $cmd --name $csum \
+ --idtype projid --idmap \
+ $client_id:$fs_id; then
+ rc=$((rc + 1))
+ fi
+ fi
done
done
delete_idmaps() {
local i
local cmd="$LCTL nodemap_del_idmap"
+ local do_proj=true
local rc=0
+ (( $MDS1_VERSION >= $(version_code 2.14.52) )) || do_proj=false
+
echo "Start to delete idmaps ..."
for ((i = 0; i < NODEMAP_COUNT; i++)); do
local j
--idmap $client_id:$fs_id; then
rc=$((rc + 1))
fi
+ if $do_proj; then
+ if ! do_facet mgs $cmd --name $csum \
+ --idtype projid --idmap \
+ $client_id:$fs_id; then
+ rc=$((rc + 1))
+ fi
+ fi
done
done
cmd[0]="$LCTL nodemap_modify --property squash_uid"
cmd[1]="$LCTL nodemap_modify --property squash_gid"
+ cmd[2]="$LCTL nodemap_modify --property squash_projid"
if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
return 1
wait_nm_sync default squash_uid '' inactive
squash_id default 99 1
wait_nm_sync default squash_gid '' inactive
+if [ "$MDS1_VERSION" -ge $(version_code 2.14.50) ]; then
+ squash_id default 99 2
+ wait_nm_sync default squash_projid '' inactive
+fi
test_nid() {
local cmd
[[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
rc=0
+ if (( $MDS1_VERSION >= $(version_code 2.14.52) )); then
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 2; then
+ rc=$((rc + 1))
+ fi
+ done
+ fi
+ [[ $rc != 0 ]] && error "nodemap squash_projid with $rc" && return 5
+
+ rc=0
delete_nodemaps
rc=$?
[[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
rc=$?
[[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
+ for (( i = 0; i < NODEMAP_COUNT; i++ )); do
+ local csum=${HOSTNAME_CHECKSUM}_${i}
+
+ if ! do_facet mgs $LCTL nodemap_modify --name $csum \
+ --property admin --value 0; then
+ rc=$((rc + 1))
+ fi
+ if ! do_facet mgs $LCTL nodemap_modify --name $csum \
+ --property trusted --value 0; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc != 0 ]] && error "nodemap_modify failed with $rc" && return 1
+
rc=0
for ((i = 0; i < NODEMAP_COUNT; i++)); do
if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
nodemap_exercise_fileset() {
local nm="$1"
local loop=0
+ local check_proj=true
+
+ (( $MDS1_VERSION >= $(version_code 2.14.52) )) || check_proj=false
# setup
if [ "$nm" == "default" ]; then
do_facet mgs $LCTL nodemap_activate 1
wait_nm_sync active
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property trusted --value 1
+ wait_nm_sync default admin_nodemap
+ wait_nm_sync default trusted_nodemap
+ check_proj=false
else
nodemap_test_setup
fi
error "unable to add fileset info to $nm nodemap for servers"
wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
+ if $check_proj; then
+ do_facet mgs $LCTL nodemap_modify --name $nm \
+ --property admin --value 1
+ wait_nm_sync $nm admin_nodemap
+ do_facet mgs $LCTL nodemap_modify --name $nm \
+ --property trusted --value 0
+ wait_nm_sync $nm trusted_nodemap
+ do_facet mgs $LCTL nodemap_modify --name $nm \
+ --property map_mode --value projid
+ wait_nm_sync $nm map_mode
+ do_facet mgs $LCTL nodemap_add_idmap --name $nm \
+ --idtype projid --idmap 1:1
+ do_facet mgs $LCTL nodemap_modify --name $nm \
+ --property deny_unknown --value 1
+ wait_nm_sync $nm deny_unknown
+ fi
+
# re-mount client
zconf_umount_clients ${clients_arr[0]} $MOUNT ||
error "unable to umount client ${clients_arr[0]}"
do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
error "fileset not taken into account"
+ if $check_proj; then
+ do_node ${clients_arr[0]} $LFS setquota -p 1 -b 10000 -B 11000 \
+ -i 0 -I 0 $MOUNT || error "setquota -p 1 failed"
+ do_node ${clients_arr[0]} $LFS setquota -p 2 -b 10000 -B 11000 \
+ -i 0 -I 0 $MOUNT && error "setquota -p 2 should fail"
+ fi
+
# re-mount client with sub-subdir
zconf_umount_clients ${clients_arr[0]} $MOUNT ||
error "unable to umount client ${clients_arr[0]}"
fi
fileset_test_cleanup "$nm"
if [ "$nm" == "default" ]; then
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property admin --value 0
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property trusted --value 0
+ wait_nm_sync default admin_nodemap
+ wait_nm_sync default trusted_nodemap
do_facet mgs $LCTL nodemap_activate 0
wait_nm_sync active 0
trap 0
}
cleanup_for_enc_tests() {
+ rm -rf $DIR/$tdir
+
# remount client normally
if is_mounted $MOUNT; then
umount_client $MOUNT || error "umount $MOUNT failed"
trace_cmd() {
local cmd="$@"
- local xattr_name="security.c"
cancel_lru_locks
$LCTL set_param debug=+info
eval $cmd
[ $? -eq 0 ] || error "$cmd failed"
- $LCTL dk | grep -E "get xattr '${xattr_name}'|get xattrs"
+ if [ -z "$MATCHING_STRING" ]; then
+ $LCTL dk | grep -E "get xattr 'security.c'|get xattrs"
+ else
+ $LCTL dk | grep -E "$MATCHING_STRING"
+ fi
[ $? -ne 0 ] || error "get xattr event was triggered"
}
trace_cmd stat $dirname/f1
trace_cmd cat $dirname/f1
dd if=/dev/zero of=$dirname/f1 bs=1M count=10 conv=fsync
- trace_cmd $TRUNCATE $dirname/f1 10240
+ MATCHING_STRING="get xattr 'security.c'" \
+ trace_cmd $TRUNCATE $dirname/f1 10240
trace_cmd $LFS setstripe -E -1 -S 4M $dirname/f2
trace_cmd $LFS migrate -E -1 -S 256K $dirname/f2
trace_cmd stat $dirname/f1
trace_cmd cat $dirname/f1
dd if=/dev/zero of=$dirname/f1 bs=1M count=10 conv=fsync
- trace_cmd $TRUNCATE $dirname/f1 10240
+ MATCHING_STRING="get xattr 'security.c'" \
+ trace_cmd $TRUNCATE $dirname/f1 10240
trace_cmd $LFS setstripe -E -1 -S 4M $dirname/f2
trace_cmd $LFS migrate -E -1 -S 256K $dirname/f2
else
}
run_test 57 "security.c xattr protection"
+test_58() {
+ local testdir=$DIR/$tdir/mytestdir
+ local testfile=$DIR/$tdir/$tfile
+
+ [[ $(facet_fstype ost1) == zfs ]] && skip "skip ZFS backend"
+
+ $LCTL get_param mdc.*.import | grep -q client_encryption ||
+ skip "client encryption not supported"
+
+ mount.lustre --help |& grep -q "test_dummy_encryption:" ||
+ skip "need dummy encryption support"
+
+ stack_trap cleanup_for_enc_tests EXIT
+ setup_for_enc_tests
+
+ touch $DIR/$tdir/$tfile
+ mkdir $DIR/$tdir/subdir
+
+ cancel_lru_locks
+ sync ; sync
+ echo 3 > /proc/sys/vm/drop_caches
+
+ ll_decode_linkea $DIR/$tdir/$tfile || error "cannot read $tfile linkea"
+ ll_decode_linkea $DIR/$tdir/subdir || error "cannot read subdir linkea"
+
+ for ((i = 0; i < 1000; i = $((i+1)))); do
+ mkdir -p $DIR/$tdir/d${i}
+ touch $DIR/$tdir/f${i}
+ createmany -m $DIR/$tdir/d${i}/f 5 > /dev/null
+ done
+
+ cancel_lru_locks
+ sync ; sync
+ echo 3 > /proc/sys/vm/drop_caches
+
+ sleep 10
+ ls -ailR $DIR/$tdir > /dev/null || error "fail to ls"
+}
+run_test 58 "access to enc file's xattrs"
+
log "cleanup: ======================================================"
sec_unsetup() {