- clear_bit(LLI_F_HAVE_SIZE_LOCK, &(llu_i2info(inode)->lli_flags));
-#if 0
- struct ldlm_extent *extent = &lock->l_extent;
- unsigned long start, end, count, skip, i, j;
- struct page *page;
- int ret;
- ENTRY;
-
- CDEBUG(D_INODE, "obdo %lu inode %p ["LPU64"->"LPU64"] size: %llu\n",
- inode->i_ino, inode, extent->start, extent->end, inode->i_size);
-
- start = extent->start >> PAGE_CACHE_SHIFT;
- count = ~0;
- skip = 0;
- end = (extent->end >> PAGE_CACHE_SHIFT) + 1;
- if ((end << PAGE_CACHE_SHIFT) < extent->end)
- end = ~0;
- if (lsm->lsm_stripe_count > 1) {
- struct {
- char name[16];
- struct ldlm_lock *lock;
- struct lov_stripe_md *lsm;
- } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
- __u32 stripe;
- __u32 vallen = sizeof(stripe);
- int rc;
-
- /* get our offset in the lov */
- rc = obd_get_info(ll_i2obdconn(inode), sizeof(key),
- &key, &vallen, &stripe);
- if (rc != 0) {
- CERROR("obd_get_info: rc = %d\n", rc);
- LBUG();
- }
- LASSERT(stripe < lsm->lsm_stripe_count);
-
- count = lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
- skip = (lsm->lsm_stripe_count - 1) * count;
- start += (start/count * skip) + (stripe * count);
- if (end != ~0)
- end += (end/count * skip) + (stripe * count);
- }
-
- i = (inode->i_size + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
- if (end >= i)
- clear_bit(LLI_F_HAVE_SIZE_LOCK, &(ll_i2info(inode)->lli_flags));
- if (i < end)
- end = i;
-
- CDEBUG(D_INODE, "start: %lu j: %lu count: %lu skip: %lu end: %lu\n",
- start, start % count, count, skip, end);
-
- /* start writeback on dirty pages in the extent when its PW */
- for (i = start, j = start % count;
- lock->l_granted_mode == LCK_PW && i < end; j++, i++) {
- if (j == count) {
- i += skip;
- j = 0;
- }
- /* its unlikely, but give us a chance to bail when we're out */
- PGCACHE_WRLOCK(inode->i_mapping);
- if (list_empty(&inode->i_mapping->dirty_pages)) {
- CDEBUG(D_INODE, "dirty list empty\n");
- PGCACHE_WRUNLOCK(inode->i_mapping);
- break;
- }
- PGCACHE_WRUNLOCK(inode->i_mapping);
-
- if (need_resched())
- schedule();
-
- /* always do a getattr for the first person to pop out of lock
- * acquisition.. the DID_GETATTR flag and semaphore serialize
- * this initial race. we used to make a decision based on whether
- * the lock was matched or acquired, but the matcher could win the
- * waking race with the first issuer so that was no good..
- */
- if (test_bit(LLI_F_DID_GETATTR, &lli->lli_flags))
- RETURN(ELDLM_OK);
-
- down(&lli->lli_getattr_sem);
-
- if (!test_bit(LLI_F_DID_GETATTR, &lli->lli_flags)) {
- rc = ll_inode_getattr(inode, lsm);
- if (rc == 0) {
- set_bit(LLI_F_DID_GETATTR, &lli->lli_flags);
- } else {
- unlock_page(page);
- }
- page_cache_release(page);
-
- }
-
- /* our locks are page granular thanks to osc_enqueue, we invalidate the
- * whole page. */
- LASSERT((extent->start & ~PAGE_CACHE_MASK) == 0);
- LASSERT(((extent->end+1) & ~PAGE_CACHE_MASK) == 0);
- for (i = start, j = start % count ; i < end ; j++, i++) {
- if ( j == count ) {
- i += skip;
- j = 0;
- }
- PGCACHE_WRLOCK(inode->i_mapping);
- if (list_empty(&inode->i_mapping->dirty_pages) &&
- list_empty(&inode->i_mapping->clean_pages) &&
- list_empty(&inode->i_mapping->locked_pages)) {
- CDEBUG(D_INODE, "nothing left\n");
- PGCACHE_WRUNLOCK(inode->i_mapping);
- break;
- }
- PGCACHE_WRUNLOCK(inode->i_mapping);
- if (need_resched())
- schedule();
- page = find_get_page(inode->i_mapping, i);
- if (page == NULL)
- continue;
- CDEBUG(D_INODE, "dropping page %p at %lu\n", page, page->index);
- lock_page(page);
- if (page->mapping) /* might have raced */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- truncate_complete_page(page);
-#else
- truncate_complete_page(page->mapping, page);
-#endif
- unlock_page(page);
- page_cache_release(page);
- }
- EXIT;
-#endif
+ struct llu_inode_info *lli = llu_i2info(inode);
+ struct cl_object *obj = lli->lli_clob;
+ struct intnl_stat *st = llu_i2stat(inode);
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct ost_lvb lvb;
+ int rc;
+ ENTRY;
+
+ /* merge timestamps the most recently obtained from mds with
+ timestamps obtained from osts */
+ LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime;
+ LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime;
+ LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime;
+
+ inode_init_lvb(inode, &lvb);
+
+ cl_object_attr_lock(obj);
+ rc = cl_object_attr_get(env, obj, attr);
+ cl_object_attr_unlock(obj);
+ if (rc == 0) {
+ if (lvb.lvb_atime < attr->cat_atime)
+ lvb.lvb_atime = attr->cat_atime;
+ if (lvb.lvb_ctime < attr->cat_ctime)
+ lvb.lvb_ctime = attr->cat_ctime;
+ if (lvb.lvb_mtime < attr->cat_mtime)
+ lvb.lvb_mtime = attr->cat_mtime;
+
+ st->st_size = lvb.lvb_size;
+ st->st_blocks = lvb.lvb_blocks;
+ st->st_mtime = lvb.lvb_mtime;
+ st->st_atime = lvb.lvb_atime;
+ st->st_ctime = lvb.lvb_ctime;
+ }
+
+ RETURN(rc);