1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2007 Cluster File Systems, Inc.
5 * Author: Nikita Danilov <nikita@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LLITE
29 #include <sys/types.h>
32 #include <sys/queue.h>
34 # include <sys/statvfs.h>
36 # include <sys/statfs.h>
49 #include <liblustre.h>
52 #include <obd_support.h>
53 #include <lustre_fid.h>
54 #include <lustre_lite.h>
55 #include <lustre_dlm.h>
56 #include <lustre_ver.h>
57 #include <lustre_mdc.h>
58 #include <cl_object.h>
60 #include "llite_lib.h"
63 * slp_ prefix stands for "Sysio Library Posix". It corresponds to historical
67 static int slp_type_init (struct lu_device_type *t);
68 static void slp_type_fini (struct lu_device_type *t);
70 static struct cl_page * slp_page_init(const struct lu_env *env,
71 struct cl_object *obj,
72 struct cl_page *page, cfs_page_t *vmpage);
73 static int slp_attr_get (const struct lu_env *env, struct cl_object *obj,
74 struct cl_attr *attr);
76 static struct lu_device *slp_device_alloc(const struct lu_env *env,
77 struct lu_device_type *t,
78 struct lustre_cfg *cfg);
80 static int slp_io_init(const struct lu_env *env, struct cl_object *obj,
82 static struct slp_io *cl2slp_io(const struct lu_env *env,
83 const struct cl_io_slice *slice);
86 static void llu_free_user_page(struct page *page);
88 static const struct lu_object_operations slp_lu_obj_ops;
89 static const struct lu_device_operations slp_lu_ops;
90 static const struct cl_device_operations slp_cl_ops;
91 static const struct cl_io_operations ccc_io_ops;
92 static const struct lu_device_type_operations slp_device_type_ops;
93 //struct lu_device_type slp_device_type;
94 static const struct cl_page_operations slp_page_ops;
95 static const struct cl_page_operations slp_transient_page_ops;
96 static const struct cl_lock_operations slp_lock_ops;
99 /*****************************************************************************
101 * Slp device and device type functions.
105 void *slp_session_key_init(const struct lu_context *ctx,
106 struct lu_context_key *key)
108 struct slp_session *session;
110 OBD_ALLOC_PTR(session);
112 session = ERR_PTR(-ENOMEM);
116 void slp_session_key_fini(const struct lu_context *ctx,
117 struct lu_context_key *key, void *data)
119 struct slp_session *session = data;
120 OBD_FREE_PTR(session);
123 struct lu_context_key slp_session_key = {
124 .lct_tags = LCT_SESSION,
125 .lct_init = slp_session_key_init,
126 .lct_fini = slp_session_key_fini
129 /* type constructor/destructor: slp_type_{init,fini,start,stop}(). */
130 LU_TYPE_INIT_FINI(slp, &ccc_key, &ccc_session_key, &slp_session_key);
132 static struct lu_device *slp_device_alloc(const struct lu_env *env,
133 struct lu_device_type *t,
134 struct lustre_cfg *cfg)
136 return ccc_device_alloc(env, t, cfg, &slp_lu_ops, &slp_cl_ops);
139 static int slp_lock_init(const struct lu_env *env,
140 struct cl_object *obj, struct cl_lock *lock,
141 const struct cl_io *io)
143 return ccc_lock_init(env, obj, lock, io, &slp_lock_ops);
146 static const struct cl_object_operations slp_ops = {
147 .coo_page_init = slp_page_init,
148 .coo_lock_init = slp_lock_init,
149 .coo_io_init = slp_io_init,
150 .coo_attr_get = slp_attr_get,
151 .coo_attr_set = ccc_attr_set,
152 .coo_conf_set = ccc_conf_set,
153 .coo_glimpse = ccc_object_glimpse
156 static int slp_object_print(const struct lu_env *env, void *cookie,
157 lu_printer_t p, const struct lu_object *o)
159 struct ccc_object *obj = lu2ccc(o);
160 struct inode *inode = obj->cob_inode;
161 struct intnl_stat *st = NULL;
164 st = llu_i2stat(inode);
166 return (*p)(env, cookie, LUSTRE_SLP_NAME"-object@%p(%p:%lu/%u)",
168 st ? (unsigned long)st->st_ino : 0UL,
169 inode ? (unsigned int)llu_i2info(inode)->lli_st_generation
173 static const struct lu_object_operations slp_lu_obj_ops = {
174 .loo_object_init = ccc_object_init,
175 .loo_object_start = NULL,
176 .loo_object_delete = NULL,
177 .loo_object_release = NULL,
178 .loo_object_free = ccc_object_free,
179 .loo_object_print = slp_object_print,
180 .loo_object_invariant = NULL
183 static struct lu_object *slp_object_alloc(const struct lu_env *env,
184 const struct lu_object_header *hdr,
185 struct lu_device *dev)
187 return ccc_object_alloc(env, hdr, dev, &slp_ops, &slp_lu_obj_ops);
190 static const struct lu_device_operations slp_lu_ops = {
191 .ldo_object_alloc = slp_object_alloc
194 static const struct cl_device_operations slp_cl_ops = {
195 .cdo_req_init = ccc_req_init
198 static const struct lu_device_type_operations slp_device_type_ops = {
199 .ldto_init = slp_type_init,
200 .ldto_fini = slp_type_fini,
202 .ldto_start = slp_type_start,
203 .ldto_stop = slp_type_stop,
205 .ldto_device_alloc = slp_device_alloc,
206 .ldto_device_free = ccc_device_free,
207 .ldto_device_init = ccc_device_init,
208 .ldto_device_fini = ccc_device_fini
211 struct lu_device_type slp_device_type = {
212 .ldt_tags = LU_DEVICE_CL,
213 .ldt_name = LUSTRE_SLP_NAME,
214 .ldt_ops = &slp_device_type_ops,
215 .ldt_ctx_tags = LCT_CL_THREAD
218 int slp_global_init(void)
222 result = ccc_global_init(&slp_device_type);
226 void slp_global_fini(void)
228 ccc_global_fini(&slp_device_type);
231 /*****************************************************************************
237 static struct cl_page *slp_page_init(const struct lu_env *env,
238 struct cl_object *obj,
239 struct cl_page *page, cfs_page_t *vmpage)
241 struct ccc_page *cpg;
244 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
248 cpg->cpg_page = vmpage;
250 if (page->cp_type == CPT_CACHEABLE) {
253 struct ccc_object *clobj = cl2ccc(obj);
255 cl_page_slice_add(page, &cpg->cpg_cl, obj,
256 &slp_transient_page_ops);
257 clobj->cob_transient_pages++;
262 return ERR_PTR(result);
265 static int slp_io_init(const struct lu_env *env, struct cl_object *obj,
268 struct ccc_io *vio = ccc_env_io(env);
271 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
273 cl_io_slice_add(io, &vio->cui_cl, obj, &ccc_io_ops);
274 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
277 count = io->u.ci_rw.crw_count;
278 /* "If nbyte is 0, read() will return 0 and have no other
279 * results." -- Single Unix Spec */
283 vio->cui_tot_count = count;
284 vio->cui_tot_nrsegs = 0;
291 static int slp_attr_get(const struct lu_env *env, struct cl_object *obj,
292 struct cl_attr *attr)
294 struct inode *inode = ccc_object_inode(obj);
295 struct intnl_stat *st = llu_i2stat(inode);
297 attr->cat_size = st->st_size;
298 attr->cat_blocks = st->st_blocks;
299 attr->cat_mtime = st->st_mtime;
300 attr->cat_atime = st->st_atime;
301 attr->cat_ctime = st->st_ctime;
302 /* KMS is not known by this layer */
303 return 0; /* layers below have to fill in the rest */
306 /*****************************************************************************
312 static void slp_page_fini_common(struct ccc_page *cp)
314 cfs_page_t *vmpage = cp->cpg_page;
316 LASSERT(vmpage != NULL);
317 llu_free_user_page(vmpage);
321 static void slp_page_completion_common(const struct lu_env *env,
322 struct ccc_page *cp, int ioret)
324 LASSERT(cp->cpg_cl.cpl_page->cp_sync_io != NULL);
327 static void slp_page_completion_read(const struct lu_env *env,
328 const struct cl_page_slice *slice,
331 struct ccc_page *cp = cl2ccc_page(slice);
334 slp_page_completion_common(env, cp, ioret);
339 static void slp_page_completion_write_common(const struct lu_env *env,
340 const struct cl_page_slice *slice,
343 struct ccc_page *cp = cl2ccc_page(slice);
346 cp->cpg_write_queued = 0;
348 * Only ioret == 0, write succeed, then this page could be
349 * deleted from the pending_writing count.
352 slp_page_completion_common(env, cp, ioret);
355 static int slp_page_is_vmlocked(const struct lu_env *env,
356 const struct cl_page_slice *slice)
361 static void slp_transient_page_fini(const struct lu_env *env,
362 struct cl_page_slice *slice)
364 struct ccc_page *cp = cl2ccc_page(slice);
365 struct cl_page *clp = slice->cpl_page;
366 struct ccc_object *clobj = cl2ccc(clp->cp_obj);
368 slp_page_fini_common(cp);
369 clobj->cob_transient_pages--;
373 static const struct cl_page_operations slp_transient_page_ops = {
374 .cpo_own = ccc_transient_page_own,
375 .cpo_assume = ccc_transient_page_assume,
376 .cpo_unassume = ccc_transient_page_unassume,
377 .cpo_disown = ccc_transient_page_disown,
378 .cpo_discard = ccc_transient_page_discard,
379 .cpo_vmpage = ccc_page_vmpage,
380 .cpo_is_vmlocked = slp_page_is_vmlocked,
381 .cpo_fini = slp_transient_page_fini,
382 .cpo_is_under_lock = ccc_page_is_under_lock,
385 .cpo_completion = slp_page_completion_read,
388 .cpo_completion = slp_page_completion_write_common,
393 /*****************************************************************************
399 static int slp_lock_enqueue(const struct lu_env *env,
400 const struct cl_lock_slice *slice,
401 struct cl_io *unused, __u32 enqflags)
403 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
405 liblustre_wait_event(0);
409 static const struct cl_lock_operations slp_lock_ops = {
410 .clo_delete = ccc_lock_delete,
411 .clo_fini = ccc_lock_fini,
412 .clo_enqueue = slp_lock_enqueue,
413 .clo_wait = ccc_lock_wait,
414 .clo_unuse = ccc_lock_unuse,
415 .clo_fits_into = ccc_lock_fits_into,
418 /*****************************************************************************
424 static int slp_io_rw_lock(const struct lu_env *env,
425 const struct cl_io_slice *ios)
427 struct ccc_io *cio = ccc_env_io(env);
428 struct cl_io *io = ios->cis_io;
432 if (cl_io_is_append(io)) {
434 end = OBD_OBJECT_EOF;
436 start = io->u.ci_wr.wr.crw_pos;
437 end = start + io->u.ci_wr.wr.crw_count - 1;
440 ccc_io_update_iov(env, cio, io);
443 * This acquires real DLM lock only in O_APPEND case, because of
444 * the io->ci_lockreq setting in llu_io_init().
446 LASSERT(ergo(cl_io_is_append(io), io->ci_lockreq == CILR_MANDATORY));
447 LASSERT(ergo(!cl_io_is_append(io), io->ci_lockreq == CILR_NEVER));
448 return ccc_io_one_lock(env, io, 0,
449 io->ci_type == CIT_READ ? CLM_READ : CLM_WRITE,
454 static int slp_io_trunc_iter_init(const struct lu_env *env,
455 const struct cl_io_slice *ios)
460 static int slp_io_trunc_start(const struct lu_env *env,
461 const struct cl_io_slice *ios)
466 static struct page *llu_get_user_page(int index, void *addr, int offset,
476 page->_offset = offset;
477 page->_count = count;
479 CFS_INIT_LIST_HEAD(&page->list);
480 CFS_INIT_LIST_HEAD(&page->_node);
485 static void llu_free_user_page(struct page *page)
491 static int llu_queue_pio(const struct lu_env *env, struct cl_io *io,
492 struct llu_io_group *group,
493 char *buf, size_t count, loff_t pos)
495 struct cl_object *obj = io->ci_obj;
496 struct inode *inode = ccc_object_inode(obj);
497 struct intnl_stat *st = llu_i2stat(inode);
498 struct obd_export *exp = llu_i2obdexp(inode);
500 int rc = 0, ret_bytes = 0;
503 struct cl_2queue *queue;
509 local_lock = group->lig_params->lrp_lock_mode != LCK_NL;
511 queue = &io->ci_queue;
512 cl_2queue_init(queue);
515 /* prepare the pages array */
517 unsigned long index, offset, bytes;
519 offset = (pos & ~CFS_PAGE_MASK);
520 index = pos >> CFS_PAGE_SHIFT;
521 bytes = CFS_PAGE_SIZE - offset;
525 /* prevent read beyond file range */
526 if (/* local_lock && */
527 io->ci_type == CIT_READ && pos + bytes >= st->st_size) {
528 if (pos >= st->st_size)
530 bytes = st->st_size - pos;
533 /* prepare page for this index */
534 page = llu_get_user_page(index, buf - offset, offset, bytes);
540 clp = cl_page_find(env, obj,
542 page, CPT_TRANSIENT);
549 rc = cl_page_own(env, io, clp);
551 LASSERT(clp->cp_state == CPS_FREEING);
552 cl_page_put(env, clp);
556 cl_2queue_add(queue, clp);
558 /* drop the reference count for cl_page_find, so that the page
559 * will be freed in cl_2queue_fini. */
560 cl_page_put(env, clp);
562 cl_page_clip(env, clp, offset, offset+bytes);
568 group->lig_rwcount += bytes;
574 enum cl_req_type iot;
575 iot = io->ci_type == CIT_READ ? CRT_READ : CRT_WRITE;
576 rc = cl_io_submit_sync(env, io, iot, queue, CRP_NORMAL, 0);
581 cl_2queue_discard(env, io, queue);
582 cl_2queue_disown(env, io, queue);
583 cl_2queue_fini(env, queue);
589 struct llu_io_group * get_io_group(struct inode *inode, int maxpages,
590 struct lustre_rw_params *params)
592 struct llu_io_group *group;
594 OBD_ALLOC_PTR(group);
596 return ERR_PTR(-ENOMEM);
598 group->lig_params = params;
603 static int max_io_pages(ssize_t len, int iovlen)
605 return (((len + CFS_PAGE_SIZE -1) / CFS_PAGE_SIZE) + 2 + iovlen - 1);
608 void put_io_group(struct llu_io_group *group)
614 * True, if \a io is a normal io, False for sendfile() / splice_{read|write}
616 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
621 static int slp_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
623 struct ccc_io *cio = cl2ccc_io(env, ios);
624 struct cl_io *io = ios->cis_io;
625 struct cl_object *obj = io->ci_obj;
626 struct inode *inode = ccc_object_inode(obj);
630 struct llu_io_group *iogroup;
631 struct lustre_rw_params p = {0};
633 struct intnl_stat *st = llu_i2stat(inode);
634 struct llu_inode_info *lli = llu_i2info(inode);
635 struct llu_io_session *session = cl2slp_io(env, ios)->sio_session;
636 int write = io->ci_type == CIT_WRITE;
639 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
642 pos = io->u.ci_wr.wr.crw_pos;
643 cnt = io->u.ci_wr.wr.crw_count;
645 pos = io->u.ci_rd.rd.crw_pos;
646 cnt = io->u.ci_rd.rd.crw_count;
648 if (io->u.ci_wr.wr_append) {
649 p.lrp_lock_mode = LCK_PW;
651 p.lrp_brw_flags = OBD_BRW_SRVLOCK;
652 p.lrp_lock_mode = LCK_NL;
655 iogroup = get_io_group(inode, max_io_pages(cnt, cio->cui_nrsegs), &p);
657 RETURN(PTR_ERR(iogroup));
659 err = ccc_prep_size(env, obj, io, pos, cnt, 0, &exceed);
660 if (err != 0 || (write == 0 && exceed != 0))
664 "%s ino %lu, %lu bytes, offset %lld, i_size %llu\n",
665 write ? "Write" : "Read", (unsigned long)st->st_ino,
666 cnt, (__u64)pos, (__u64)st->st_size);
668 if (write && io->u.ci_wr.wr_append)
669 pos = io->u.ci_wr.wr.crw_pos = st->st_size; /* XXX? Do we need to change io content too here? */
670 /* XXX What about if one write syscall writes at 2 different offsets? */
672 for (iovidx = 0; iovidx < cio->cui_nrsegs; iovidx++) {
673 char *buf = (char *) cio->cui_iov[iovidx].iov_base;
674 long count = cio->cui_iov[iovidx].iov_len;
680 if (IS_BAD_PTR(buf) || IS_BAD_PTR(buf + count)) {
681 GOTO(out, err = -EFAULT);
684 if (io->ci_type == CIT_READ) {
685 if (/* local_lock && */ pos >= st->st_size)
687 } else if (io->ci_type == CIT_WRITE) {
688 if (pos >= lli->lli_maxbytes) {
689 GOTO(out, err = -EFBIG);
691 if (pos + count >= lli->lli_maxbytes)
692 count = lli->lli_maxbytes - pos;
697 ret = llu_queue_pio(env, io, iogroup, buf, count, pos);
699 GOTO(out, err = ret);
704 if (io->ci_type == CIT_WRITE) {
705 // obd_adjust_kms(exp, lsm, pos, 0); // XXX
706 if (pos > st->st_size)
713 LASSERT(cnt == 0 || io->ci_type == CIT_READ); /* libsysio should guarantee this */
715 if (!iogroup->lig_rc)
716 session->lis_rwcount += iogroup->lig_rwcount;
717 else if (!session->lis_rc)
718 session->lis_rc = iogroup->lig_rc;
722 put_io_group(iogroup);
726 static const struct cl_io_operations ccc_io_ops = {
729 .cio_fini = ccc_io_fini,
730 .cio_lock = slp_io_rw_lock,
731 .cio_start = slp_io_start,
732 .cio_end = ccc_io_end,
733 .cio_advance = ccc_io_advance
736 .cio_fini = ccc_io_fini,
737 .cio_lock = slp_io_rw_lock,
738 .cio_start = slp_io_start,
739 .cio_end = ccc_io_end,
740 .cio_advance = ccc_io_advance
743 .cio_fini = ccc_io_fini,
744 .cio_iter_init = slp_io_trunc_iter_init,
745 .cio_start = slp_io_trunc_start
748 .cio_fini = ccc_io_fini
753 static struct slp_io *cl2slp_io(const struct lu_env *env,
754 const struct cl_io_slice *slice)
756 /* We call it just for assertion here */
757 cl2ccc_io(env, slice);
759 return slp_env_io(env);
762 /*****************************************************************************
764 * Temporary prototype thing: mirror obd-devices into cl devices.
768 int cl_sb_init(struct llu_sb_info *sbi)
770 struct cl_device *cl;
775 env = cl_env_get(&refcheck);
777 RETURN(PTR_ERR(env));
779 cl = cl_type_setup(env, NULL, &slp_device_type,
780 sbi->ll_dt_exp->exp_obd->obd_lu_dev);
782 GOTO(out, rc = PTR_ERR(cl));
785 sbi->ll_site = cl2lu_dev(cl)->ld_site;
787 cl_env_put(env, &refcheck);
791 int cl_sb_fini(struct llu_sb_info *sbi)
798 env = cl_env_get(&refcheck);
800 RETURN(PTR_ERR(env));
802 if (sbi->ll_cl != NULL) {
803 cl_stack_fini(env, sbi->ll_cl);
807 cl_env_put(env, &refcheck);
809 * If mount failed (sbi->ll_cl == NULL), and this there are no other
810 * mounts, stop device types manually (this usually happens
811 * automatically when last device is destroyed).
814 cl_env_cache_purge(~0);