* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/* This is a new object to be allocated, or the file
* corresponding to the object does not exists. */
LOC_F_NEW = 0x00000001,
+
+ /* When find a dying object, just return -EAGAIN at once instead of
+ * blocking the thread. */
+ LOC_F_NOWAIT = 0x00000002,
} loc_flags_t;
/**
/**
* Link the device to the site.
**/
- cfs_list_t ld_linkage;
+ struct list_head ld_linkage;
};
struct lu_device_type_operations;
struct lu_attr {
/** size in bytes */
__u64 la_size;
- /** modification time in seconds since Epoch */
- obd_time la_mtime;
- /** access time in seconds since Epoch */
- obd_time la_atime;
- /** change time in seconds since Epoch */
- obd_time la_ctime;
+ /** modification time in seconds since Epoch */
+ s64 la_mtime;
+ /** access time in seconds since Epoch */
+ s64 la_atime;
+ /** change time in seconds since Epoch */
+ s64 la_ctime;
/** 512-byte blocks allocated to object */
__u64 la_blocks;
/** permission bits and file type */
__u64 la_valid;
};
+static inline void lu_attr_cpu_to_le(struct lu_attr *dst_attr,
+ struct lu_attr *src_attr)
+{
+ dst_attr->la_size = cpu_to_le64(src_attr->la_size);
+ dst_attr->la_mtime = cpu_to_le64(src_attr->la_mtime);
+ dst_attr->la_atime = cpu_to_le64(src_attr->la_atime);
+ dst_attr->la_ctime = cpu_to_le64(src_attr->la_ctime);
+ dst_attr->la_blocks = cpu_to_le64(src_attr->la_blocks);
+ dst_attr->la_mode = cpu_to_le32(src_attr->la_mode);
+ dst_attr->la_uid = cpu_to_le32(src_attr->la_uid);
+ dst_attr->la_gid = cpu_to_le32(src_attr->la_gid);
+ dst_attr->la_flags = cpu_to_le32(src_attr->la_flags);
+ dst_attr->la_nlink = cpu_to_le32(src_attr->la_nlink);
+ dst_attr->la_blkbits = cpu_to_le32(src_attr->la_blkbits);
+ dst_attr->la_blksize = cpu_to_le32(src_attr->la_blksize);
+ dst_attr->la_rdev = cpu_to_le32(src_attr->la_rdev);
+ dst_attr->la_valid = cpu_to_le64(src_attr->la_valid);
+}
+
+static inline void lu_attr_le_to_cpu(struct lu_attr *dst_attr,
+ struct lu_attr *src_attr)
+{
+ dst_attr->la_size = le64_to_cpu(src_attr->la_size);
+ dst_attr->la_mtime = le64_to_cpu(src_attr->la_mtime);
+ dst_attr->la_atime = le64_to_cpu(src_attr->la_atime);
+ dst_attr->la_ctime = le64_to_cpu(src_attr->la_ctime);
+ dst_attr->la_blocks = le64_to_cpu(src_attr->la_blocks);
+ dst_attr->la_mode = le32_to_cpu(src_attr->la_mode);
+ dst_attr->la_uid = le32_to_cpu(src_attr->la_uid);
+ dst_attr->la_gid = le32_to_cpu(src_attr->la_gid);
+ dst_attr->la_flags = le32_to_cpu(src_attr->la_flags);
+ dst_attr->la_nlink = le32_to_cpu(src_attr->la_nlink);
+ dst_attr->la_blkbits = le32_to_cpu(src_attr->la_blkbits);
+ dst_attr->la_blksize = le32_to_cpu(src_attr->la_blksize);
+ dst_attr->la_rdev = le32_to_cpu(src_attr->la_rdev);
+ dst_attr->la_valid = le64_to_cpu(src_attr->la_valid);
+}
+
/** Bit-mask of valid attributes */
enum la_valid {
LA_ATIME = 1 << 0,
/**
* Linkage into list of all layers.
*/
- cfs_list_t lo_linkage;
+ struct list_head lo_linkage;
/**
* Link to the device, for debugging.
*/
*/
struct lu_object_header {
/**
+ * Fid, uniquely identifying this object.
+ */
+ struct lu_fid loh_fid;
+ /**
* Object flags from enum lu_object_header_flags. Set and checked
* atomically.
*/
*/
atomic_t loh_ref;
/**
- * Fid, uniquely identifying this object.
- */
- struct lu_fid loh_fid;
- /**
* Common object attributes, cached for efficiency. From enum
* lu_object_header_attr.
*/
/**
* Linkage into per-site hash table. Protected by lu_site::ls_guard.
*/
- cfs_hlist_node_t loh_hash;
+ struct hlist_node loh_hash;
/**
* Linkage into per-site LRU list. Protected by lu_site::ls_guard.
*/
- cfs_list_t loh_lru;
+ struct list_head loh_lru;
/**
* Linkage into list of layers. Never modified once set (except lately
* during object destruction). No locking is necessary.
*/
- cfs_list_t loh_layers;
+ struct list_head loh_layers;
/**
* A list of references to this object, for debugging.
*/
struct lu_site_bkt_data {
/**
- * number of busy object on this bucket
+ * number of object in this bucket on the lsb_lru list.
*/
- long lsb_busy;
+ long lsb_lru_len;
/**
* LRU list, updated on each access to object. Protected by
* bucket lock of lu_site::ls_obj_hash.
* moved to the lu_site::ls_lru.prev (this is due to the non-existence
* of list_for_each_entry_safe_reverse()).
*/
- cfs_list_t lsb_lru;
+ struct list_head lsb_lru;
/**
* Wait-queue signaled when an object in this site is ultimately
* destroyed (lu_object_free()). It is used by lu_object_find() to
*
* \see htable_lookup().
*/
- wait_queue_head_t lsb_marche_funebre;
+ wait_queue_head_t lsb_marche_funebre;
};
enum {
/**
* objects hash table
*/
- cfs_hash_t *ls_obj_hash;
+ cfs_hash_t *ls_obj_hash;
/**
* index of bucket on hash table while purging
*/
- int ls_purge_start;
- /**
- * Top-level device for this stack.
- */
- struct lu_device *ls_top_dev;
+ unsigned int ls_purge_start;
+ /**
+ * Top-level device for this stack.
+ */
+ struct lu_device *ls_top_dev;
/**
* Bottom-level device for this stack
*/
struct lu_device *ls_bottom_dev;
- /**
- * Linkage into global list of sites.
- */
- cfs_list_t ls_linkage;
- /**
- * List for lu device for this site, protected
- * by ls_ld_lock.
- **/
- cfs_list_t ls_ld_linkage;
+ /**
+ * Linkage into global list of sites.
+ */
+ struct list_head ls_linkage;
+ /**
+ * List for lu device for this site, protected
+ * by ls_ld_lock.
+ **/
+ struct list_head ls_ld_linkage;
spinlock_t ls_ld_lock;
-
+ /**
+ * Lock to serialize site purge.
+ */
+ struct mutex ls_purge_mutex;
/**
* lu_site stats
*/
* XXX: a hack! fld has to find md_site via site, remove when possible
*/
struct seq_server_site *ld_seq_site;
+ /**
+ * Pointer to the lu_target for this site.
+ */
+ struct lu_target *ls_tgt;
};
static inline struct lu_site_bkt_data *
struct lu_device *dev,
const struct lu_fid *f,
const struct lu_object_conf *conf);
-void lu_object_purge(const struct lu_env *env, struct lu_device *dev,
- const struct lu_fid *f);
struct lu_object *lu_object_find_slice(const struct lu_env *env,
struct lu_device *dev,
const struct lu_fid *f,
*/
static inline struct lu_object *lu_object_top(struct lu_object_header *h)
{
- LASSERT(!cfs_list_empty(&h->loh_layers));
- return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
+ LASSERT(!list_empty(&h->loh_layers));
+ return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
}
/**
* Pointer to an array with key values. Internal implementation
* detail.
*/
- void **lc_value;
- /**
- * Linkage into a list of all remembered contexts. Only
- * `non-transient' contexts, i.e., ones created for service threads
- * are placed here.
- */
- cfs_list_t lc_remember;
- /**
- * Version counter used to skip calls to lu_context_refill() when no
- * keys were registered.
- */
- unsigned lc_version;
+ void **lc_value;
+ /**
+ * Linkage into a list of all remembered contexts. Only
+ * `non-transient' contexts, i.e., ones created for service threads
+ * are placed here.
+ */
+ struct list_head lc_remember;
+ /**
+ * Version counter used to skip calls to lu_context_refill() when no
+ * keys were registered.
+ */
+ unsigned lc_version;
/**
* Debugging cookie.
*/
- unsigned lc_cookie;
+ unsigned lc_cookie;
};
/**
int ln_namelen;
};
+/**
+ * Validate names (path components)
+ *
+ * To be valid \a name must be non-empty, '\0' terminated of length \a
+ * name_len, and not contain '/'. The maximum length of a name (before
+ * say -ENAMETOOLONG will be returned) is really controlled by llite
+ * and the server. We only check for something insane coming from bad
+ * integer handling here.
+ */
+static inline bool lu_name_is_valid_2(const char *name, size_t name_len)
+{
+ return name != NULL &&
+ name_len > 0 &&
+ name_len < INT_MAX &&
+ name[name_len] == '\0' &&
+ strlen(name) == name_len &&
+ memchr(name, '/', name_len) == NULL;
+}
+
static inline bool lu_name_is_valid(const struct lu_name *ln)
{
- return ln->ln_name != NULL &&
- ln->ln_namelen > 0 &&
- ln->ln_name[0] != '\0' &&
- ln->ln_name[ln->ln_namelen] == '\0';
+ return lu_name_is_valid_2(ln->ln_name, ln->ln_namelen);
}
#define DNAME "%.*s"
* methods.
*/
struct lu_buf {
- void *lb_buf;
- ssize_t lb_len;
+ void *lb_buf;
+ size_t lb_len;
};
#define DLUBUF "(%p %zu)"
extern struct lu_buf LU_BUF_NULL;
void lu_buf_free(struct lu_buf *buf);
-void lu_buf_alloc(struct lu_buf *buf, int size);
-void lu_buf_realloc(struct lu_buf *buf, int size);
+void lu_buf_alloc(struct lu_buf *buf, size_t size);
+void lu_buf_realloc(struct lu_buf *buf, size_t size);
+
+int lu_buf_check_and_grow(struct lu_buf *buf, size_t len);
+struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len);
-int lu_buf_check_and_grow(struct lu_buf *buf, int len);
-struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len);
+extern __u32 lu_context_tags_default;
+extern __u32 lu_session_tags_default;
+
+static inline bool lu_device_is_cl(const struct lu_device *d)
+{
+ return d->ld_type->ldt_tags & LU_DEVICE_CL;
+}
+
+static inline bool lu_object_is_cl(const struct lu_object *o)
+{
+ return lu_device_is_cl(o->lo_dev);
+}
/** @} lu */
#endif /* __LUSTRE_LU_OBJECT_H */