struct mdt_body *reqbody = NULL;
struct mdt_statfs_cache *msf;
ktime_t kstart = ktime_get();
+ int current_blockbits;
int rc;
ENTRY;
spin_unlock(&mdt->mdt_lock);
}
+ /* tgd_blockbit is recordsize bits set during mkfs.
+ * This once set does not change. However, 'zfs set'
+ * can be used to change the MDT blocksize. Instead
+ * of using cached value of 'tgd_blockbit' always
+ * calculate the blocksize bits which may have
+ * changed.
+ */
+ current_blockbits = fls64(osfs->os_bsize) - 1;
+
/* at least try to account for cached pages. its still racy and
* might be under-reporting if clients haven't announced their
* caches with brw recently */
" pending %llu free %llu avail %llu\n",
tgd->tgd_tot_dirty, tgd->tgd_tot_granted,
tgd->tgd_tot_pending,
- osfs->os_bfree << tgd->tgd_blockbits,
- osfs->os_bavail << tgd->tgd_blockbits);
+ osfs->os_bfree << current_blockbits,
+ osfs->os_bavail << current_blockbits);
osfs->os_bavail -= min_t(u64, osfs->os_bavail,
((tgd->tgd_tot_dirty + tgd->tgd_tot_pending +
- osfs->os_bsize - 1) >> tgd->tgd_blockbits));
+ osfs->os_bsize - 1) >> current_blockbits));
tgt_grant_sanity_check(mdt->mdt_lu_dev.ld_obd, __func__);
CDEBUG(D_CACHE, "%llu blocks: %llu free, %llu avail; "
osfs->os_files, osfs->os_ffree, osfs->os_state);
if (!exp_grant_param_supp(tsi->tsi_exp) &&
- tgd->tgd_blockbits > COMPAT_BSIZE_SHIFT) {
+ current_blockbits > COMPAT_BSIZE_SHIFT) {
/* clients which don't support OBD_CONNECT_GRANT_PARAM
* should not see a block size > page size, otherwise
* cl_lost_grant goes mad. Therefore, we emulate a 4KB (=2^12)
* block size which is the biggest block size known to work
* with all client's page size. */
- osfs->os_blocks <<= tgd->tgd_blockbits - COMPAT_BSIZE_SHIFT;
- osfs->os_bfree <<= tgd->tgd_blockbits - COMPAT_BSIZE_SHIFT;
- osfs->os_bavail <<= tgd->tgd_blockbits - COMPAT_BSIZE_SHIFT;
+ osfs->os_blocks <<= current_blockbits - COMPAT_BSIZE_SHIFT;
+ osfs->os_bfree <<= current_blockbits - COMPAT_BSIZE_SHIFT;
+ osfs->os_bavail <<= current_blockbits - COMPAT_BSIZE_SHIFT;
osfs->os_bsize = 1 << COMPAT_BSIZE_SHIFT;
}
if (rc == 0)
if (buf->lb_len == 0)
RETURN(0);
+ LASSERT(!info->mti_big_acl_used);
again:
rc = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_ACCESS);
if (rc < 0) {
rc = 0;
} else if (rc == -EOPNOTSUPP) {
rc = 0;
- } else {
- if (rc == -ERANGE &&
- exp_connect_large_acl(info->mti_exp) &&
- buf->lb_buf != info->mti_big_acl) {
+ } else if (rc == -ERANGE) {
+ if (exp_connect_large_acl(info->mti_exp) &&
+ !info->mti_big_acl_used) {
if (info->mti_big_acl == NULL) {
info->mti_big_aclsize =
min_t(unsigned int,
buf->lb_buf = info->mti_big_acl;
buf->lb_len = info->mti_big_aclsize;
-
+ info->mti_big_acl_used = 1;
goto again;
}
-
+ /* FS has ACL bigger that our limits */
+ CDEBUG(D_INODE, "%s: "DFID" ACL can't fit into %d\n",
+ mdt_obd_name(mdt), PFID(mdt_object_fid(o)),
+ info->mti_big_aclsize);
+ rc = -E2BIG;
+ } else {
CERROR("%s: unable to read "DFID" ACL: rc = %d\n",
mdt_obd_name(mdt), PFID(mdt_object_fid(o)), rc);
}
} else {
- int client;
- int server;
- int acl_buflen;
- int lmm_buflen = 0;
- int lmmsize = 0;
-
- acl_buflen = req_capsule_get_size(pill, &RMF_ACL, RCL_SERVER);
- if (acl_buflen >= rc)
- goto map;
-
- /* If LOV/LMA EA is small, we can reuse part of their buffer */
- client = ptlrpc_req_get_repsize(pill->rc_req);
- server = lustre_packed_msg_size(pill->rc_req->rq_repmsg);
- if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) {
- lmm_buflen = req_capsule_get_size(pill, &RMF_MDT_MD,
- RCL_SERVER);
- lmmsize = repbody->mbo_eadatasize;
- }
-
- if (client < server - acl_buflen - lmm_buflen + rc + lmmsize) {
- CDEBUG(D_INODE, "%s: client prepared buffer size %d "
- "is not big enough with the ACL size %d (%d)\n",
- mdt_obd_name(mdt), client, rc,
- server - acl_buflen - lmm_buflen + rc + lmmsize);
- repbody->mbo_aclsize = 0;
- repbody->mbo_valid &= ~OBD_MD_FLACL;
- RETURN(-ERANGE);
- }
-
-map:
- if (buf->lb_buf == info->mti_big_acl)
- info->mti_big_acl_used = 1;
-
rc = nodemap_map_acl(nodemap, buf->lb_buf,
rc, NODEMAP_FS_TO_CLIENT);
/* if all ACLs mapped out, rc is still >= 0 */