struct page *vmpage;
struct niobuf_remote *rnb;
char *data;
- struct lu_env *env;
- struct cl_io *io;
- __u16 refcheck;
struct lustre_handle lockh;
struct ldlm_lock *lock;
unsigned long index, start;
struct niobuf_local lnb;
- int rc;
bool dom_lock = false;
ENTRY;
dom_lock = ldlm_has_dom(lock);
LDLM_LOCK_PUT(lock);
}
-
if (!dom_lock)
RETURN_EXIT;
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN_EXIT;
-
if (!req_capsule_has_field(&req->rq_pill, &RMF_NIOBUF_INLINE,
RCL_SERVER))
- GOTO(out_env, rc = -ENODATA);
+ RETURN_EXIT;
rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
- data = (char *)rnb + sizeof(*rnb);
-
if (rnb == NULL || rnb->rnb_len == 0)
- GOTO(out_env, rc = 0);
+ RETURN_EXIT;
- CDEBUG(D_INFO, "Get data buffer along with open, len %i, i_size %llu\n",
- rnb->rnb_len, i_size_read(inode));
+ /* LU-11595: Server may return whole file and that is OK always or
+ * it may return just file tail and its offset must be aligned with
+ * client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
+ * smaller then offset may be not aligned and that data is just ignored.
+ */
+ if (rnb->rnb_offset % PAGE_SIZE)
+ RETURN_EXIT;
- io = vvp_env_thread_io(env);
- io->ci_obj = obj;
- io->ci_ignore_layout = 1;
- rc = cl_io_init(env, io, CIT_MISC, obj);
- if (rc)
- GOTO(out_io, rc);
+ /* Server returns whole file or just file tail if it fills in
+ * reply buffer, in both cases total size should be inode size.
+ */
+ if (rnb->rnb_offset + rnb->rnb_len < i_size_read(inode)) {
+ CERROR("%s: server returns off/len %llu/%u < i_size %llu\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), rnb->rnb_offset,
+ rnb->rnb_len, i_size_read(inode));
+ RETURN_EXIT;
+ }
+
+ CDEBUG(D_INFO, "Get data along with open at %llu len %i, i_size %llu\n",
+ rnb->rnb_offset, rnb->rnb_len, i_size_read(inode));
+
+ data = (char *)rnb + sizeof(*rnb);
lnb.lnb_file_offset = rnb->rnb_offset;
start = lnb.lnb_file_offset / PAGE_SIZE;
LASSERT(lnb.lnb_file_offset % PAGE_SIZE == 0);
lnb.lnb_page_offset = 0;
do {
- struct cl_page *clp;
-
lnb.lnb_data = data + (index << PAGE_SHIFT);
lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
if (lnb.lnb_len > PAGE_SIZE)
PTR_ERR(vmpage));
break;
}
- lock_page(vmpage);
- if (vmpage->mapping == NULL) {
- unlock_page(vmpage);
- put_page(vmpage);
- /* page was truncated */
- GOTO(out_io, rc = -ENODATA);
- }
- clp = cl_page_find(env, obj, vmpage->index, vmpage,
- CPT_CACHEABLE);
- if (IS_ERR(clp)) {
- unlock_page(vmpage);
- put_page(vmpage);
- GOTO(out_io, rc = PTR_ERR(clp));
- }
-
- /* export page */
- cl_page_export(env, clp, 1);
- cl_page_put(env, clp);
- unlock_page(vmpage);
put_page(vmpage);
index++;
} while (rnb->rnb_len > (index << PAGE_SHIFT));
- rc = 0;
EXIT;
-out_io:
- cl_io_fini(env, io);
-out_env:
- cl_env_put(env, &refcheck);
}
static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
if (file->f_flags & O_TRUNC)
oit.it_flags |= FMODE_WRITE;
- /* kernel only call f_op->open in dentry_open. filp_open calls
- * dentry_open after call to open_namei that checks permissions.
- * Only nfsd_open call dentry_open directly without checking
- * permissions and because of that this code below is safe. */
- if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
- oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
+ /* kernel only call f_op->open in dentry_open. filp_open calls
+ * dentry_open after call to open_namei that checks permissions.
+ * Only nfsd_open call dentry_open directly without checking
+ * permissions and because of that this code below is safe.
+ */
+ if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
+ oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
/* We do not want O_EXCL here, presumably we opened the file
* already? XXX - NFS implications? */
* After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT
*/
static int ll_lease_file_resync(struct obd_client_handle *och,
- struct inode *inode)
+ struct inode *inode, unsigned long arg)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
+ struct ll_ioc_lease_id ioc;
__u64 data_version_unused;
int rc;
ENTRY;
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
+ if (copy_from_user(&ioc, (struct ll_ioc_lease_id __user *)arg,
+ sizeof(ioc)))
+ RETURN(-EFAULT);
+
/* before starting file resync, it's necessary to clean up page cache
* in client memory, otherwise once the layout version is increased,
* writing back cached data will be denied the OSTs. */
GOTO(out, rc);
op_data->op_lease_handle = och->och_lease_handle;
+ op_data->op_mirror_id = ioc.lil_mirror_id;
rc = md_file_resync(sbi->ll_md_exp, op_data);
if (rc)
GOTO(out, rc);
* if it's at least 'mdd.*.atime_diff' older.
* All in all, the atime in Lustre does not strictly comply with
* POSIX. Solving this problem needs to send an RPC to MDT for each
- * read, this will hurt performance. */
- if (LTIME_S(inode->i_atime) < lli->lli_atime || lli->lli_update_atime) {
- LTIME_S(inode->i_atime) = lli->lli_atime;
+ * read, this will hurt performance.
+ */
+ if (inode->i_atime.tv_sec < lli->lli_atime ||
+ lli->lli_update_atime) {
+ inode->i_atime.tv_sec = lli->lli_atime;
lli->lli_update_atime = 0;
}
- LTIME_S(inode->i_mtime) = lli->lli_mtime;
- LTIME_S(inode->i_ctime) = lli->lli_ctime;
+ inode->i_mtime.tv_sec = lli->lli_mtime;
+ inode->i_ctime.tv_sec = lli->lli_ctime;
- atime = LTIME_S(inode->i_atime);
- mtime = LTIME_S(inode->i_mtime);
- ctime = LTIME_S(inode->i_ctime);
+ mtime = inode->i_mtime.tv_sec;
+ atime = inode->i_atime.tv_sec;
+ ctime = inode->i_ctime.tv_sec;
cl_object_attr_lock(obj);
if (OBD_FAIL_CHECK(OBD_FAIL_MDC_MERGE))
i_size_write(inode, attr->cat_size);
inode->i_blocks = attr->cat_blocks;
- LTIME_S(inode->i_atime) = atime;
- LTIME_S(inode->i_mtime) = mtime;
- LTIME_S(inode->i_ctime) = ctime;
+ inode->i_mtime.tv_sec = mtime;
+ inode->i_atime.tv_sec = atime;
+ inode->i_ctime.tv_sec = ctime;
out_size_unlock:
ll_inode_size_unlock(inode);
io->ci_ndelay = 0;
io->ci_designated_mirror = fd->fd_designated_mirror;
io->ci_layout_version = fd->fd_layout_version;
- io->ci_pio = 0; /* doesn't have a mechanism to pass mirror
- * io to ptasks */
}
CDEBUG(D_VFSTRACE, "%s: desiginated mirror: %d\n",
return false;
}
-static int ll_file_io_ptask(struct cfs_ptask *ptask);
-
static void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot)
{
struct inode *inode = file_inode(file);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- memset(&io->u.ci_rw.rw_iter, 0, sizeof(io->u.ci_rw.rw_iter));
- init_sync_kiocb(&io->u.ci_rw.rw_iocb, file);
- io->u.ci_rw.rw_file = file;
- io->u.ci_rw.rw_ptask = ll_file_io_ptask;
- io->u.ci_rw.rw_nonblock = !!(file->f_flags & O_NONBLOCK);
+ io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
io->ci_lock_no_expand = fd->ll_lock_no_expand;
if (iot == CIT_WRITE) {
- io->u.ci_rw.rw_append = !!(file->f_flags & O_APPEND);
- io->u.ci_rw.rw_sync = !!(file->f_flags & O_SYNC ||
+ io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
+ io->u.ci_wr.wr_sync = !!(file->f_flags & O_SYNC ||
file->f_flags & O_DIRECT ||
IS_SYNC(inode));
}
io->ci_lockreq = CILR_MANDATORY;
}
io->ci_noatime = file_is_noatime(file);
- if (ll_i2sbi(inode)->ll_flags & LL_SBI_PIO)
- io->ci_pio = !io->u.ci_rw.rw_append;
- else
- io->ci_pio = 0;
/* FLR: only use non-delay I/O for read as there is only one
* avaliable mirror for write. */
ll_io_set_mirror(io, file);
}
-static int ll_file_io_ptask(struct cfs_ptask *ptask)
-{
- struct cl_io_pt *pt = ptask->pt_cbdata;
- struct file *file = pt->cip_file;
- struct lu_env *env;
- struct cl_io *io;
- loff_t pos = pt->cip_pos;
- int rc;
- __u16 refcheck;
- ENTRY;
-
- CDEBUG(D_VFSTRACE, "%s: %s range: [%llu, %llu)\n",
- file_dentry(file)->d_name.name,
- pt->cip_iot == CIT_READ ? "read" : "write",
- pos, pos + pt->cip_count);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
-
- io = vvp_env_thread_io(env);
- ll_io_init(io, file, pt->cip_iot);
- io->u.ci_rw.rw_iter = pt->cip_iter;
- io->u.ci_rw.rw_iocb = pt->cip_iocb;
- io->ci_pio = 0; /* It's already in parallel task */
-
- rc = cl_io_rw_init(env, io, pt->cip_iot, pos,
- pt->cip_count - pt->cip_result);
- if (!rc) {
- struct vvp_io *vio = vvp_env_io(env);
-
- vio->vui_io_subtype = IO_NORMAL;
- vio->vui_fd = LUSTRE_FPRIVATE(file);
-
- ll_cl_add(file, env, io, LCC_RW);
- rc = cl_io_loop(env, io);
- ll_cl_remove(file, env);
- } else {
- /* cl_io_rw_init() handled IO */
- rc = io->ci_result;
- }
-
- if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_PTASK_IO_FAIL, 0)) {
- if (io->ci_nob > 0)
- io->ci_nob /= 2;
- rc = -EIO;
- }
-
- if (io->ci_nob > 0) {
- pt->cip_result += io->ci_nob;
- iov_iter_advance(&pt->cip_iter, io->ci_nob);
- pos += io->ci_nob;
- pt->cip_iocb.ki_pos = pos;
-#ifdef HAVE_KIOCB_KI_LEFT
- pt->cip_iocb.ki_left = pt->cip_count - pt->cip_result;
-#elif defined(HAVE_KI_NBYTES)
- pt->cip_iocb.ki_nbytes = pt->cip_count - pt->cip_result;
-#endif
- }
-
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
-
- pt->cip_need_restart = io->ci_need_restart;
-
- CDEBUG(D_VFSTRACE, "%s: %s ret: %zd, rc: %d\n",
- file_dentry(file)->d_name.name,
- pt->cip_iot == CIT_READ ? "read" : "write",
- pt->cip_result, rc);
-
- RETURN(pt->cip_result > 0 ? 0 : rc);
-}
-
static ssize_t
ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
struct file *file, enum cl_io_type iot,
loff_t *ppos, size_t count)
{
- struct range_lock range;
struct vvp_io *vio = vvp_env_io(env);
struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct range_lock range;
struct cl_io *io;
- loff_t pos = *ppos;
ssize_t result = 0;
int rc = 0;
unsigned retried = 0;
ENTRY;
- CDEBUG(D_VFSTRACE, "%s: %s range: [%llu, %llu)\n",
+ CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, count: %zu\n",
file_dentry(file)->d_name.name,
- iot == CIT_READ ? "read" : "write", pos, pos + count);
+ iot == CIT_READ ? "read" : "write", *ppos, count);
restart:
io = vvp_env_thread_io(env);
ll_io_init(io, file, iot);
- if (args->via_io_subtype == IO_NORMAL) {
- io->u.ci_rw.rw_iter = *args->u.normal.via_iter;
- io->u.ci_rw.rw_iocb = *args->u.normal.via_iocb;
- }
- if (args->via_io_subtype != IO_NORMAL || restarted)
- io->ci_pio = 0;
io->ci_ndelay_tried = retried;
- if (cl_io_rw_init(env, io, iot, pos, count) == 0) {
+ if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
bool range_locked = false;
if (file->f_flags & O_APPEND)
range_lock_init(&range, 0, LUSTRE_EOF);
else
- range_lock_init(&range, pos, pos + count - 1);
+ range_lock_init(&range, *ppos, *ppos + count - 1);
vio->vui_fd = LUSTRE_FPRIVATE(file);
vio->vui_io_subtype = args->via_io_subtype;
switch (vio->vui_io_subtype) {
case IO_NORMAL:
+ vio->vui_iter = args->u.normal.via_iter;
+ vio->vui_iocb = args->u.normal.via_iocb;
/* Direct IO reads must also take range lock,
* or multiple reads will try to work on the same pages
* See LU-6227 for details. */
}
ll_cl_add(file, env, io, LCC_RW);
- if (io->ci_pio && iot == CIT_WRITE && !IS_NOSEC(inode) &&
- !lli->lli_inode_locked) {
- inode_lock(inode);
- lli->lli_inode_locked = 1;
- }
rc = cl_io_loop(env, io);
- if (lli->lli_inode_locked) {
- lli->lli_inode_locked = 0;
- inode_unlock(inode);
- }
ll_cl_remove(file, env);
if (range_locked) {
if (io->ci_nob > 0) {
result += io->ci_nob;
count -= io->ci_nob;
+ *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
- if (args->via_io_subtype == IO_NORMAL) {
- iov_iter_advance(args->u.normal.via_iter, io->ci_nob);
-
- /* CLIO is too complicated. See LU-11069. */
- if (cl_io_is_append(io))
- pos = io->u.ci_rw.rw_iocb.ki_pos;
- else
- pos += io->ci_nob;
-
- args->u.normal.via_iocb->ki_pos = pos;
-#ifdef HAVE_KIOCB_KI_LEFT
- args->u.normal.via_iocb->ki_left = count;
-#elif defined(HAVE_KI_NBYTES)
- args->u.normal.via_iocb->ki_nbytes = count;
-#endif
- } else {
- /* for splice */
- pos = io->u.ci_rw.rw_range.cir_pos;
- }
+ /* prepare IO restart */
+ if (count > 0 && args->via_io_subtype == IO_NORMAL)
+ args->u.normal.via_iter = vio->vui_iter;
}
out:
cl_io_fini(env, io);
if ((rc == 0 || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
- "%s: restart %s range: [%llu, %llu) ret: %zd, rc: %d\n",
- file_dentry(file)->d_name.name,
- iot == CIT_READ ? "read" : "write",
- pos, pos + count, result, rc);
+ "%s: restart %s from %lld, count: %zu, ret: %zd, rc: %d\n",
+ file_dentry(file)->d_name.name,
+ iot == CIT_READ ? "read" : "write",
+ *ppos, count, result, rc);
/* preserve the tried count for FLR */
retried = io->ci_ndelay_tried;
restarted = true;
}
}
- CDEBUG(D_VFSTRACE, "%s: %s *ppos: %llu, pos: %llu, ret: %zd, rc: %d\n",
- file_dentry(file)->d_name.name,
- iot == CIT_READ ? "read" : "write", *ppos, pos, result, rc);
-
- *ppos = pos;
+ CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
RETURN(result > 0 ? result : rc);
}
int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
{
- struct md_op_data *op_data;
- int rc;
+ struct obd_export *exp = ll_i2mdexp(inode);
+ struct md_op_data *op_data;
+ int rc;
ENTRY;
/* Detect out-of range masks */
!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
- /* Detect out-of range archive id */
- if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
- (hss->hss_archive_id > LL_HSM_MAX_ARCHIVE))
- RETURN(-EINVAL);
+ if (!exp_connect_archive_id_array(exp)) {
+ /* Detect out-of range archive id */
+ if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
+ (hss->hss_archive_id > LL_HSM_ORIGIN_MAX_ARCHIVE))
+ RETURN(-EINVAL);
+ }
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, hss);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, ll_i2mdexp(inode),
- sizeof(*op_data), op_data, NULL);
+ rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, exp, sizeof(*op_data),
+ op_data, NULL);
ll_finish_md_op_data(op_data);
RETURN(0);
}
+int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+{
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() == &init_user_ns)
+ return 0;
+
+ if (ll_i2info(inode)->lli_projid != fa->fsx_projid)
+ return -EINVAL;
+
+ if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT)) {
+ if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
+ return -EINVAL;
+ } else {
+ if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
unsigned long arg)
{
struct iattr *attr;
int flags;
- /* only root could change project ID */
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- RETURN(-EPERM);
+ if (copy_from_user(&fsxattr,
+ (const struct fsxattr __user *)arg,
+ sizeof(fsxattr)))
+ RETURN(-EFAULT);
+
+ rc = ll_ioctl_check_project(inode, &fsxattr);
+ if (rc)
+ RETURN(rc);
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- if (copy_from_user(&fsxattr,
- (const struct fsxattr __user *)arg,
- sizeof(fsxattr)))
- GOTO(out_fsxattr, rc = -EFAULT);
-
flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
op_data->op_attr_flags = ll_inode_to_ext_flags(flags);
if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT)
RETURN(PTR_ERR(och));
if (ioc->lil_flags & LL_LEASE_RESYNC) {
- rc = ll_lease_file_resync(och, inode);
+ rc = ll_lease_file_resync(och, inode, arg);
if (rc) {
ll_lease_close(och, inode, NULL);
RETURN(rc);
int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file_dentry(file);
- bool lock_inode;
#elif defined(HAVE_FILE_FSYNC_2ARGS)
int ll_fsync(struct file *file, int datasync)
{
#ifdef HAVE_FILE_FSYNC_4ARGS
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- lock_inode = !lli->lli_inode_locked;
- if (lock_inode)
- inode_lock(inode);
+ inode_lock(inode);
#else
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
}
#ifdef HAVE_FILE_FSYNC_4ARGS
- if (lock_inode)
- inode_unlock(inode);
+ inode_unlock(inode);
#endif
RETURN(rc);
}
static int ll_merge_md_attr(struct inode *inode)
{
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_attr attr = { 0 };
int rc;
- LASSERT(ll_i2info(inode)->lli_lsm_md != NULL);
+ LASSERT(lli->lli_lsm_md != NULL);
+ down_read(&lli->lli_lsm_sem);
rc = md_merge_attr(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md,
&attr, ll_md_blocking_ast);
+ up_read(&lli->lli_lsm_sem);
if (rc != 0)
RETURN(rc);
RETURN(rc);
}
- LTIME_S(inode->i_atime) = lli->lli_atime;
- LTIME_S(inode->i_mtime) = lli->lli_mtime;
- LTIME_S(inode->i_ctime) = lli->lli_ctime;
+ inode->i_atime.tv_sec = lli->lli_atime;
+ inode->i_mtime.tv_sec = lli->lli_mtime;
+ inode->i_ctime.tv_sec = lli->lli_ctime;
}
OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req;
- struct mdt_body *body;
void *lvbdata;
void *lmm;
int lmmsize;
* layout here. Please note that we can't use the LVB buffer in
* completion AST because it doesn't have a large enough buffer */
rc = ll_get_default_mdsize(sbi, &lmmsize);
- if (rc == 0)
- rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- OBD_MD_FLXATTR, XATTR_NAME_LOV, lmmsize, &req);
if (rc < 0)
RETURN(rc);
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
- GOTO(out, rc = -EPROTO);
+ rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), OBD_MD_FLXATTR,
+ XATTR_NAME_LOV, lmmsize, &req);
+ if (rc < 0) {
+ if (rc == -ENODATA)
+ GOTO(out, rc = 0); /* empty layout */
+ else
+ RETURN(rc);
+ }
- lmmsize = body->mbo_eadatasize;
+ lmmsize = rc;
+ rc = 0;
if (lmmsize == 0) /* empty layout */
GOTO(out, rc = 0);