diff options
Diffstat (limited to 'fs')
175 files changed, 5948 insertions, 5445 deletions
diff --git a/fs/9p/mux.c b/fs/9p/mux.c index ea1134eb47c8..8e8356c1c229 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c @@ -31,6 +31,7 @@ #include <linux/poll.h> #include <linux/kthread.h> #include <linux/idr.h> +#include <linux/mutex.h> #include "debug.h" #include "v9fs.h" @@ -110,7 +111,7 @@ static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16); -static DECLARE_MUTEX(v9fs_mux_task_lock); +static DEFINE_MUTEX(v9fs_mux_task_lock); static struct workqueue_struct *v9fs_mux_wq; static int v9fs_mux_num; @@ -166,7 +167,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num, v9fs_mux_poll_task_num); - up(&v9fs_mux_task_lock); + mutex_lock(&v9fs_mux_task_lock); n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1); if (n > v9fs_mux_poll_task_num) { @@ -225,7 +226,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) } v9fs_mux_num++; - down(&v9fs_mux_task_lock); + mutex_unlock(&v9fs_mux_task_lock); return 0; } @@ -235,7 +236,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) int i; struct v9fs_mux_poll_task *vpt; - up(&v9fs_mux_task_lock); + mutex_lock(&v9fs_mux_task_lock); vpt = m->poll_task; list_del(&m->mux_list); for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { @@ -252,7 +253,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) v9fs_mux_poll_task_num--; } v9fs_mux_num--; - down(&v9fs_mux_task_lock); + mutex_unlock(&v9fs_mux_task_lock); } /** diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 3ad8455f8577..651a9e14d9a9 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -614,6 +614,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, sb = dir->i_sb; v9ses = v9fs_inode2v9ses(dir); + dentry->d_op = &v9fs_dentry_operations; dirfid = v9fs_fid_lookup(dentry->d_parent); if (!dirfid) { @@ -681,8 +682,6 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, goto FreeFcall; fid->qid = fcall->params.rstat.stat.qid; - - dentry->d_op = &v9fs_dentry_operations; v9fs_stat2inode(&fcall->params.rstat.stat, inode, inode->i_sb); d_add(dentry, inode); diff --git a/fs/Kconfig b/fs/Kconfig index 9a4158f2a3ac..e207be68d4ca 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -859,18 +859,6 @@ config RAMFS To compile this as a module, choose M here: the module will be called ramfs. -config RELAYFS_FS - tristate "Relayfs file system support" - ---help--- - Relayfs is a high-speed data relay filesystem designed to provide - an efficient mechanism for tools and facilities to relay large - amounts of data from kernel space to user space. - - To compile this code as a module, choose M here: the module will be - called relayfs. - - If unsure, say N. - config CONFIGFS_FS tristate "Userspace-driven configuration filesystem (EXPERIMENTAL)" depends on EXPERIMENTAL diff --git a/fs/Makefile b/fs/Makefile index 1db711319c80..080b3867be4d 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -91,7 +91,6 @@ obj-$(CONFIG_AUTOFS4_FS) += autofs4/ obj-$(CONFIG_ADFS_FS) += adfs/ obj-$(CONFIG_FUSE_FS) += fuse/ obj-$(CONFIG_UDF_FS) += udf/ -obj-$(CONFIG_RELAYFS_FS) += relayfs/ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ obj-$(CONFIG_JFS_FS) += jfs/ obj-$(CONFIG_XFS_FS) += xfs/ diff --git a/fs/adfs/file.c b/fs/adfs/file.c index afebbfde6968..6af10885f9d6 100644 --- a/fs/adfs/file.c +++ b/fs/adfs/file.c @@ -19,11 +19,7 @@ * * adfs regular file handling primitives */ -#include <linux/errno.h> #include <linux/fs.h> -#include <linux/fcntl.h> -#include <linux/time.h> -#include <linux/stat.h> #include <linux/buffer_head.h> /* for file_fsync() */ #include <linux/adfs_fs.h> diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 385bed09b0d8..f54c5b21f876 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -13,6 +13,7 @@ /* Internal header file for autofs */ #include <linux/auto_fs4.h> +#include <linux/mutex.h> #include <linux/list.h> /* This is the range of ioctl() numbers we claim as ours */ @@ -102,7 +103,7 @@ struct autofs_sb_info { int reghost_enabled; int needs_reghost; struct super_block *sb; - struct semaphore wq_sem; + struct mutex wq_mutex; spinlock_t fs_lock; struct autofs_wait_queue *queues; /* Wait queue pointer */ }; diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 2d3082854a29..1ad98d48e550 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -269,7 +269,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) sbi->sb = s; sbi->version = 0; sbi->sub_version = 0; - init_MUTEX(&sbi->wq_sem); + mutex_init(&sbi->wq_mutex); spin_lock_init(&sbi->fs_lock); sbi->queues = NULL; s->s_blocksize = 1024; diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 394ff36ef8f1..be78e9378c03 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -178,7 +178,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, return -ENOENT; } - if (down_interruptible(&sbi->wq_sem)) { + if (mutex_lock_interruptible(&sbi->wq_mutex)) { kfree(name); return -EINTR; } @@ -194,7 +194,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, /* Can't wait for an expire if there's no mount */ if (notify == NFY_NONE && !d_mountpoint(dentry)) { kfree(name); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); return -ENOENT; } @@ -202,7 +202,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); if ( !wq ) { kfree(name); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); return -ENOMEM; } @@ -218,10 +218,10 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, wq->status = -EINTR; /* Status return if interrupted */ atomic_set(&wq->wait_ctr, 2); atomic_set(&wq->notified, 1); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); } else { atomic_inc(&wq->wait_ctr); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); kfree(name); DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); @@ -282,19 +282,19 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok { struct autofs_wait_queue *wq, **wql; - down(&sbi->wq_sem); + mutex_lock(&sbi->wq_mutex); for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { if ( wq->wait_queue_token == wait_queue_token ) break; } if ( !wq ) { - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); return -EINVAL; } *wql = wq->next; /* Unlink from chain */ - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); kfree(wq->name); wq->name = NULL; /* Do not wait on this queue */ diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 2d365cb8eec6..dd6048ce0532 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -561,7 +561,7 @@ befs_utf2nls(struct super_block *sb, const char *in, * @sb: Superblock * @src: Input string buffer in NLS format * @srclen: Length of input string in bytes - * @dest: The output string in UTF8 format + * @dest: The output string in UTF-8 format * @destlen: Length of the output buffer * * Converts input string @src, which is in the format of the loaded NLS map, @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/mempool.h> #include <linux/workqueue.h> +#include <linux/blktrace_api.h> #include <scsi/sg.h> /* for struct sg_iovec */ #define BIO_POOL_SIZE 256 @@ -1095,6 +1096,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) if (!bp) return bp; + blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, + bi->bi_sector + first_sectors); + BUG_ON(bi->bi_vcnt != 1); BUG_ON(bi->bi_idx != 0); atomic_set(&bp->cnt, 3); @@ -1243,11 +1247,11 @@ static int __init init_bio(void) scale = 4; /* - * scale number of entries + * Limit number of entries reserved -- mempools are only used when + * the system is completely unable to allocate memory, so we only + * need enough to make progress. */ - bvec_pool_entries = megabytes * 2; - if (bvec_pool_entries > 256) - bvec_pool_entries = 256; + bvec_pool_entries = 1 + scale; fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); if (!fs_bio_set) diff --git a/fs/block_dev.c b/fs/block_dev.c index 6e50346fb1ee..44d05e6e34db 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -265,8 +265,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) SLAB_CTOR_CONSTRUCTOR) { memset(bdev, 0, sizeof(*bdev)); - sema_init(&bdev->bd_sem, 1); - sema_init(&bdev->bd_mount_sem, 1); + mutex_init(&bdev->bd_mutex); + mutex_init(&bdev->bd_mount_mutex); INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); inode_init_once(&ei->vfs_inode); @@ -574,7 +574,7 @@ static int do_open(struct block_device *bdev, struct file *file) } owner = disk->fops->owner; - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); if (!bdev->bd_openers) { bdev->bd_disk = disk; bdev->bd_contains = bdev; @@ -605,21 +605,21 @@ static int do_open(struct block_device *bdev, struct file *file) if (ret) goto out_first; bdev->bd_contains = whole; - down(&whole->bd_sem); + mutex_lock(&whole->bd_mutex); whole->bd_part_count++; p = disk->part[part - 1]; bdev->bd_inode->i_data.backing_dev_info = whole->bd_inode->i_data.backing_dev_info; if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { whole->bd_part_count--; - up(&whole->bd_sem); + mutex_unlock(&whole->bd_mutex); ret = -ENXIO; goto out_first; } kobject_get(&p->kobj); bdev->bd_part = p; bd_set_size(bdev, (loff_t) p->nr_sects << 9); - up(&whole->bd_sem); + mutex_unlock(&whole->bd_mutex); } } else { put_disk(disk); @@ -633,13 +633,13 @@ static int do_open(struct block_device *bdev, struct file *file) if (bdev->bd_invalidated) rescan_partitions(bdev->bd_disk, bdev); } else { - down(&bdev->bd_contains->bd_sem); + mutex_lock(&bdev->bd_contains->bd_mutex); bdev->bd_contains->bd_part_count++; - up(&bdev->bd_contains->bd_sem); + mutex_unlock(&bdev->bd_contains->bd_mutex); } } bdev->bd_openers++; - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); unlock_kernel(); return 0; @@ -652,7 +652,7 @@ out_first: put_disk(disk); module_put(owner); out: - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); unlock_kernel(); if (ret) bdput(bdev); @@ -714,7 +714,7 @@ int blkdev_put(struct block_device *bdev) struct inode *bd_inode = bdev->bd_inode; struct gendisk *disk = bdev->bd_disk; - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); lock_kernel(); if (!--bdev->bd_openers) { sync_blockdev(bdev); @@ -724,9 +724,9 @@ int blkdev_put(struct block_device *bdev) if (disk->fops->release) ret = disk->fops->release(bd_inode, NULL); } else { - down(&bdev->bd_contains->bd_sem); + mutex_lock(&bdev->bd_contains->bd_mutex); bdev->bd_contains->bd_part_count--; - up(&bdev->bd_contains->bd_sem); + mutex_unlock(&bdev->bd_contains->bd_mutex); } if (!bdev->bd_openers) { struct module *owner = disk->fops->owner; @@ -746,7 +746,7 @@ int blkdev_put(struct block_device *bdev) bdev->bd_contains = NULL; } unlock_kernel(); - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); bdput(bdev); return ret; } diff --git a/fs/buffer.c b/fs/buffer.c index a9b399402007..0d6ca7bac6c8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -201,7 +201,7 @@ int fsync_bdev(struct block_device *bdev) * freeze_bdev -- lock a filesystem and force it into a consistent state * @bdev: blockdevice to lock * - * This takes the block device bd_mount_sem to make sure no new mounts + * This takes the block device bd_mount_mutex to make sure no new mounts * happen on bdev until thaw_bdev() is called. * If a superblock is found on this device, we take the s_umount semaphore * on it to make sure nobody unmounts until the snapshot creation is done. @@ -210,7 +210,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) { struct super_block *sb; - down(&bdev->bd_mount_sem); + mutex_lock(&bdev->bd_mount_mutex); sb = get_super(bdev); if (sb && !(sb->s_flags & MS_RDONLY)) { sb->s_frozen = SB_FREEZE_WRITE; @@ -264,7 +264,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) drop_super(sb); } - up(&bdev->bd_mount_sem); + mutex_unlock(&bdev->bd_mount_mutex); } EXPORT_SYMBOL(thaw_bdev); @@ -3051,68 +3051,6 @@ asmlinkage long sys_bdflush(int func, long data) } /* - * Migration function for pages with buffers. This function can only be used - * if the underlying filesystem guarantees that no other references to "page" - * exist. - */ -#ifdef CONFIG_MIGRATION -int buffer_migrate_page(struct page *newpage, struct page *page) -{ - struct address_space *mapping = page->mapping; - struct buffer_head *bh, *head; - int rc; - - if (!mapping) - return -EAGAIN; - - if (!page_has_buffers(page)) - return migrate_page(newpage, page); - - head = page_buffers(page); - - rc = migrate_page_remove_references(newpage, page, 3); - if (rc) - return rc; - - bh = head; - do { - get_bh(bh); - lock_buffer(bh); - bh = bh->b_this_page; - - } while (bh != head); - - ClearPagePrivate(page); - set_page_private(newpage, page_private(page)); - set_page_private(page, 0); - put_page(page); - get_page(newpage); - - bh = head; - do { - set_bh_page(bh, newpage, bh_offset(bh)); - bh = bh->b_this_page; - - } while (bh != head); - - SetPagePrivate(newpage); - - migrate_page_copy(newpage, page); - - bh = head; - do { - unlock_buffer(bh); - put_bh(bh); - bh = bh->b_this_page; - - } while (bh != head); - - return 0; -} -EXPORT_SYMBOL(buffer_migrate_page); -#endif - -/* * Buffer-head allocation */ static kmem_cache_t *bh_cachep; diff --git a/fs/char_dev.c b/fs/char_dev.c index 21195c481637..5c36345c9bf7 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -19,6 +19,7 @@ #include <linux/kobject.h> #include <linux/kobj_map.h> #include <linux/cdev.h> +#include <linux/mutex.h> #ifdef CONFIG_KMOD #include <linux/kmod.h> @@ -28,7 +29,7 @@ static struct kobj_map *cdev_map; #define MAX_PROBE_HASH 255 /* random */ -static DECLARE_MUTEX(chrdevs_lock); +static DEFINE_MUTEX(chrdevs_lock); static struct char_device_struct { struct char_device_struct *next; @@ -88,13 +89,13 @@ out: void *acquire_chrdev_list(void) { - down(&chrdevs_lock); + mutex_lock(&chrdevs_lock); return get_next_chrdev(NULL); } void release_chrdev_list(void *dev) { - up(&chrdevs_lock); + mutex_unlock(&chrdevs_lock); kfree(dev); } @@ -151,7 +152,7 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor, memset(cd, 0, sizeof(struct char_device_struct)); - down(&chrdevs_lock); + mutex_lock(&chrdevs_lock); /* temporary */ if (major == 0) { @@ -186,10 +187,10 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor, } cd->next = *cp; *cp = cd; - up(&chrdevs_lock); + mutex_unlock(&chrdevs_lock); return cd; out: - up(&chrdevs_lock); + mutex_unlock(&chrdevs_lock); kfree(cd); return ERR_PTR(ret); } @@ -200,7 +201,7 @@ __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) struct char_device_struct *cd = NULL, **cp; int i = major_to_index(major); - down(&chrdevs_lock); + mutex_lock(&chrdevs_lock); for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) if ((*cp)->major == major && (*cp)->baseminor == baseminor && @@ -210,7 +211,7 @@ __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) cd = *cp; *cp = cd->next; } - up(&chrdevs_lock); + mutex_unlock(&chrdevs_lock); return cd; } diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index d335015473a5..cb68efba35db 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -160,7 +160,7 @@ improperly zeroed buffer in CIFS Unix extensions set times call. Version 1.25 ------------ Fix internationalization problem in cifs readdir with filenames that map to -longer UTF8 strings than the string on the wire was in Unicode. Add workaround +longer UTF-8 strings than the string on the wire was in Unicode. Add workaround for readdir to netapp servers. Fix search rewind (seek into readdir to return non-consecutive entries). Do not do readdir when server negotiates buffer size to small to fit filename. Add support for reading POSIX ACLs from diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index fed55e3c53df..632561dd9c50 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -138,9 +138,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -317,9 +317,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) rc = -ENOMEM; else if (pTcon->ses->capabilities & CAP_UNIX) { diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c index a7a47bb36bf3..ec4dfe9bf5ef 100644 --- a/fs/cifs/fcntl.c +++ b/fs/cifs/fcntl.c @@ -86,9 +86,9 @@ int cifs_dir_notify(struct file * file, unsigned long arg) cifs_sb = CIFS_SB(file->f_dentry->d_sb); pTcon = cifs_sb->tcon; - down(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(file->f_dentry); - up(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) { rc = -ENOMEM; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 675bd2568297..165d67426381 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -203,9 +203,9 @@ int cifs_open(struct inode *inode, struct file *file) } } - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(file->f_dentry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 59359911f481..ff93a9f81d1c 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -574,9 +574,9 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry) /* Unlink can be called from rename so we can not grab the sem here since we deadlock otherwise */ -/* down(&direntry->d_sb->s_vfs_rename_sem);*/ +/* mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);*/ full_path = build_path_from_dentry(direntry); -/* up(&direntry->d_sb->s_vfs_rename_sem);*/ +/* mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);*/ if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -718,9 +718,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -803,9 +803,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -1137,9 +1137,9 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) rc = 0; } - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 0f99aae33162..8d0da7c87c7b 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -48,10 +48,10 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, /* No need to check for cross device links since server will do that BB note DFS case in future though (when we may have to check) */ - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); fromName = build_path_from_dentry(old_file); toName = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if((fromName == NULL) || (toName == NULL)) { rc = -ENOMEM; goto cifs_hl_exit; @@ -103,9 +103,9 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd) xid = GetXid(); - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if (!full_path) goto out_no_free; @@ -164,9 +164,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); @@ -232,9 +232,9 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) /* BB would it be safe against deadlock to grab this sem even though rename itself grabs the sem and calls lookup? */ -/* down(&inode->i_sb->s_vfs_rename_sem);*/ +/* mutex_lock(&inode->i_sb->s_vfs_rename_mutex);*/ full_path = build_path_from_dentry(direntry); -/* up(&inode->i_sb->s_vfs_rename_sem);*/ +/* mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);*/ if(full_path == NULL) { FreeXid(xid); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 288cc048d37f..edb3b6eb34bc 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -404,9 +404,9 @@ static int initiate_cifs_search(const int xid, struct file *file) if(pTcon == NULL) return -EINVAL; - down(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(file->f_dentry); - up(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) { return -ENOMEM; diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 777e3363c2a4..3938444d87b2 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -62,9 +62,9 @@ int cifs_removexattr(struct dentry * direntry, const char * ea_name) cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -116,9 +116,9 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name, cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -223,9 +223,9 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name, cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -341,9 +341,9 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size) cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index c666769a875d..7c031f00fd79 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -72,6 +72,7 @@ #include <linux/i2c-dev.h> #include <linux/wireless.h> #include <linux/atalk.h> +#include <linux/blktrace_api.h> #include <net/sock.h> /* siocdevprivate_ioctl */ #include <net/bluetooth/bluetooth.h> diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index d575452cd9f7..40c4fc973fad 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -251,3 +251,49 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode, } EXPORT_SYMBOL_GPL(debugfs_create_bool); +static ssize_t read_file_blob(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct debugfs_blob_wrapper *blob = file->private_data; + return simple_read_from_buffer(user_buf, count, ppos, blob->data, + blob->size); +} + +static struct file_operations fops_blob = { + .read = read_file_blob, + .open = default_open, +}; + +/** + * debugfs_create_blob - create a file in the debugfs filesystem that is + * used to read and write a binary blob. + * + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this paramater is NULL, then the + * file will be created in the root of the debugfs filesystem. + * @blob: a pointer to a struct debugfs_blob_wrapper which contains a pointer + * to the blob data and the size of the data. + * + * This function creates a file in debugfs with the given name that exports + * @blob->data as a binary blob. If the @mode variable is so set it can be + * read from. Writing is not supported. + * + * This function will return a pointer to a dentry if it succeeds. This + * pointer must be passed to the debugfs_remove() function when the file is + * to be removed (no automatic cleanup happens if your module is unloaded, + * you are responsible here.) If an error occurs, NULL will be returned. + * + * If debugfs is not enabled in the kernel, the value -ENODEV will be + * returned. It is not wise to check for this value, but rather, check for + * NULL or !NULL instead as to eliminate the need for #ifdef in the calling + * code. + */ +struct dentry *debugfs_create_blob(const char *name, mode_t mode, + struct dentry *parent, + struct debugfs_blob_wrapper *blob) +{ + return debugfs_create_file(name, mode, parent, blob, &fops_blob); +} +EXPORT_SYMBOL_GPL(debugfs_create_blob); diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index bfb8a230bac9..14c5620b5cab 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -18,6 +18,7 @@ #include <linux/mount.h> #include <linux/tty.h> #include <linux/devpts_fs.h> +#include <linux/parser.h> #define DEVPTS_SUPER_MAGIC 0x1cd1 @@ -32,39 +33,60 @@ static struct { umode_t mode; } config = {.mode = 0600}; +enum { + Opt_uid, Opt_gid, Opt_mode, + Opt_err +}; + +static match_table_t tokens = { + {Opt_uid, "uid=%u"}, + {Opt_gid, "gid=%u"}, + {Opt_mode, "mode=%o"}, + {Opt_err, NULL} +}; + static int devpts_remount(struct super_block *sb, int *flags, char *data) { - int setuid = 0; - int setgid = 0; - uid_t uid = 0; - gid_t gid = 0; - umode_t mode = 0600; - char *this_char; - - this_char = NULL; - while ((this_char = strsep(&data, ",")) != NULL) { - int n; - char dummy; - if (!*this_char) + char *p; + + config.setuid = 0; + config.setgid = 0; + config.uid = 0; + config.gid = 0; + config.mode = 0600; + + while ((p = strsep(&data, ",")) != NULL) { + substring_t args[MAX_OPT_ARGS]; + int token; + int option; + + if (!*p) continue; - if (sscanf(this_char, "uid=%i%c", &n, &dummy) == 1) { - setuid = 1; - uid = n; - } else if (sscanf(this_char, "gid=%i%c", &n, &dummy) == 1) { - setgid = 1; - gid = n; - } else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1) - mode = n & ~S_IFMT; - else { - printk("devpts: called with bogus options\n"); + + token = match_token(p, tokens, args); + switch (token) { + case Opt_uid: + if (match_int(&args[0], &option)) + return -EINVAL; + config.uid = option; + config.setuid = 1; + break; + case Opt_gid: + if (match_int(&args[0], &option)) + return -EINVAL; + config.gid = option; + config.setgid = 1; + break; + case Opt_mode: + if (match_octal(&args[0], &option)) + return -EINVAL; + config.mode = option & ~S_IFMT; + break; + default: + printk(KERN_ERR "devpts: called with bogus options\n"); return -EINVAL; } } - config.setuid = setuid; - config.setgid = setgid; - config.uid = uid; - config.gid = gid; - config.mode = mode; return 0; } diff --git a/fs/dquot.c b/fs/dquot.c index 1966c890b48d..acf07e581f8c 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -103,12 +103,12 @@ * (these locking rules also apply for S_NOQUOTA flag in the inode - note that * for altering the flag i_mutex is also needed). If operation is holding * reference to dquot in other way (e.g. quotactl ops) it must be guarded by - * dqonoff_sem. + * dqonoff_mutex. * This locking assures that: * a) update/access to dquot pointers in inode is serialized * b) everyone is guarded against invalidate_dquots() * - * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced + * Each dquot has its dq_lock mutex. Locked dquots might not be referenced * from inodes (dquot_alloc_space() and such don't check the dq_lock). * Currently dquot is locked only when it is being read to memory (or space for * it is being allocated) on the first dqget() and when it is being released on @@ -118,9 +118,9 @@ * spinlock to internal buffers before writing. * * Lock ordering (including related VFS locks) is the following: - * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > - * > dquot->dq_lock > dqio_sem - * i_mutex on quota files is special (it's below dqio_sem) + * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > + * dqio_mutex + * i_mutex on quota files is special (it's below dqio_mutex) */ static DEFINE_SPINLOCK(dq_list_lock); @@ -281,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot) static void wait_on_dquot(struct dquot *dquot) { - down(&dquot->dq_lock); - up(&dquot->dq_lock); + mutex_lock(&dquot->dq_lock); + mutex_unlock(&dquot->dq_lock); } #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) @@ -321,8 +321,8 @@ int dquot_acquire(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - down(&dquot->dq_lock); - down(&dqopt->dqio_sem); + mutex_lock(&dquot->dq_lock); + mutex_lock(&dqopt->dqio_mutex); if (!test_bit(DQ_READ_B, &dquot->dq_flags)) ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); if (ret < 0) @@ -343,8 +343,8 @@ int dquot_acquire(struct dquot *dquot) } set_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_iolock: - up(&dqopt->dqio_sem); - up(&dquot->dq_lock); + mutex_unlock(&dqopt->dqio_mutex); + mutex_unlock(&dquot->dq_lock); return ret; } @@ -356,7 +356,7 @@ int dquot_commit(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); spin_lock(&dq_list_lock); if (!clear_dquot_dirty(dquot)) { spin_unlock(&dq_list_lock); @@ -373,7 +373,7 @@ int dquot_commit(struct dquot *dquot) ret = ret2; } out_sem: - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); return ret; } @@ -385,11 +385,11 @@ int dquot_release(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - down(&dquot->dq_lock); + mutex_lock(&dquot->dq_lock); /* Check whether we are not racing with some other dqget() */ if (atomic_read(&dquot->dq_count) > 1) goto out_dqlock; - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); if (dqopt->ops[dquot->dq_type]->release_dqblk) { ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); /* Write the info */ @@ -399,31 +399,57 @@ int dquot_release(struct dquot *dquot) ret = ret2; } clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); out_dqlock: - up(&dquot->dq_lock); + mutex_unlock(&dquot->dq_lock); return ret; } /* Invalidate all dquots on the list. Note that this function is called after * quota is disabled and pointers from inodes removed so there cannot be new - * quota users. Also because we hold dqonoff_sem there can be no quota users - * for this sb+type at all. */ + * quota users. There can still be some users of quotas due to inodes being + * just deleted or pruned by prune_icache() (those are not attached to any + * list). We have to wait for such users. + */ static void invalidate_dquots(struct super_block *sb, int type) { struct dquot *dquot, *tmp; +restart: spin_lock(&dq_list_lock); list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { if (dquot->dq_sb != sb) continue; if (dquot->dq_type != type) continue; -#ifdef __DQUOT_PARANOIA - if (atomic_read(&dquot->dq_count)) - BUG(); -#endif - /* Quota now has no users and it has been written on last dqput() */ + /* Wait for dquot users */ + if (atomic_read(&dquot->dq_count)) { + DEFINE_WAIT(wait); + + atomic_inc(&dquot->dq_count); + prepare_to_wait(&dquot->dq_wait_unused, &wait, + TASK_UNINTERRUPTIBLE); + spin_unlock(&dq_list_lock); + /* Once dqput() wakes us up, we know it's time to free + * the dquot. + * IMPORTANT: we rely on the fact that there is always + * at most one process waiting for dquot to free. + * Otherwise dq_count would be > 1 and we would never + * wake up. + */ + if (atomic_read(&dquot->dq_count) > 1) + schedule(); + finish_wait(&dquot->dq_wait_unused, &wait); + dqput(dquot); + /* At this moment dquot() need not exist (it could be + * reclaimed by prune_dqcache(). Hence we must + * restart. */ + goto restart; + } + /* + * Quota now has no users and it has been written on last + * dqput() + */ remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); @@ -439,7 +465,7 @@ int vfs_quota_sync(struct super_block *sb, int type) struct quota_info *dqopt = sb_dqopt(sb); int cnt; - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; @@ -474,7 +500,7 @@ int vfs_quota_sync(struct super_block *sb, int type) spin_lock(&dq_list_lock); dqstats.syncs++; spin_unlock(&dq_list_lock); - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); return 0; } @@ -515,7 +541,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) /* * Put reference to dquot * NOTE: If you change this function please check whether dqput_blocks() works right... - * MUST be called with either dqptr_sem or dqonoff_sem held + * MUST be called with either dqptr_sem or dqonoff_mutex held */ static void dqput(struct dquot *dquot) { @@ -540,6 +566,10 @@ we_slept: if (atomic_read(&dquot->dq_count) > 1) { /* We have more than one user... nothing to do */ atomic_dec(&dquot->dq_count); + /* Releasing dquot during quotaoff phase? */ + if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) && + atomic_read(&dquot->dq_count) == 1) + wake_up(&dquot->dq_wait_unused); spin_unlock(&dq_list_lock); return; } @@ -576,11 +606,12 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) return NODQUOT; memset((caddr_t)dquot, 0, sizeof(struct dquot)); - sema_init(&dquot->dq_lock, 1); + mutex_init(&dquot->dq_lock); INIT_LIST_HEAD(&dquot->dq_free); INIT_LIST_HEAD(&dquot->dq_inuse); INIT_HLIST_NODE(&dquot->dq_hash); INIT_LIST_HEAD(&dquot->dq_dirty); + init_waitqueue_head(&dquot->dq_wait_unused); dquot->dq_sb = sb; dquot->dq_type = type; atomic_set(&dquot->dq_count, 1); @@ -590,7 +621,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) /* * Get reference to dquot - * MUST be called with either dqptr_sem or dqonoff_sem held + * MUST be called with either dqptr_sem or dqonoff_mutex held */ static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) { @@ -656,7 +687,7 @@ static int dqinit_needed(struct inode *inode, int type) return 0; } -/* This routine is guarded by dqonoff_sem semaphore */ +/* This routine is guarded by dqonoff_mutex mutex */ static void add_dquot_ref(struct super_block *sb, int type) { struct list_head *p; @@ -732,13 +763,9 @@ static void drop_dquot_ref(struct super_block *sb, int type) { LIST_HEAD(tofree_head); - /* We need to be guarded against prune_icache to reach all the - * inodes - otherwise some can be on the local list of prune_icache */ - down(&iprune_sem); down_write(&sb_dqopt(sb)->dqptr_sem); remove_dquot_ref(sb, type, &tofree_head); up_write(&sb_dqopt(sb)->dqptr_sem); - up(&iprune_sem); put_dquot_list(&tofree_head); } @@ -938,8 +965,8 @@ int dquot_initialize(struct inode *inode, int type) unsigned int id = 0; int cnt, ret = 0; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return 0; down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1002,8 +1029,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) int cnt, ret = NO_QUOTA; char warntype[MAXQUOTAS]; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) { out_add: inode_add_bytes(inode, number); @@ -1051,8 +1078,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number) int cnt, ret = NO_QUOTA; char warntype[MAXQUOTAS]; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return QUOTA_OK; for (cnt = 0; cnt < MAXQUOTAS; cnt++) @@ -1095,8 +1122,8 @@ int dquot_free_space(struct inode *inode, qsize_t number) { unsigned int cnt; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) { out_sub: inode_sub_bytes(inode, number); @@ -1131,8 +1158,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number) { unsigned int cnt; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return QUOTA_OK; down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1171,8 +1198,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; char warntype[MAXQUOTAS]; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return QUOTA_OK; /* Clear the arrays */ @@ -1266,9 +1293,9 @@ int dquot_commit_info(struct super_block *sb, int type) int ret; struct quota_info *dqopt = sb_dqopt(sb); - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); ret = dqopt->ops[type]->write_file_info(sb, type); - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); return ret; } @@ -1324,7 +1351,7 @@ int vfs_quota_off(struct super_block *sb, int type) struct inode *toputinode[MAXQUOTAS]; /* We need to serialize quota_off() for device */ - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { toputinode[cnt] = NULL; if (type != -1 && cnt != type) @@ -1353,7 +1380,7 @@ int vfs_quota_off(struct super_block *sb, int type) dqopt->info[cnt].dqi_bgrace = 0; dqopt->ops[cnt] = NULL; } - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); /* Sync the superblock so that buffers with quota data are written to * disk (and so userspace sees correct data afterwards). */ if (sb->s_op->sync_fs) @@ -1366,7 +1393,7 @@ int vfs_quota_off(struct super_block *sb, int type) * changes done by userspace on the next quotaon() */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (toputinode[cnt]) { - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); /* If quota was reenabled in the meantime, we have * nothing to do */ if (!sb_has_quota_enabled(sb, cnt)) { @@ -1378,7 +1405,7 @@ int vfs_quota_off(struct super_block *sb, int type) mark_inode_dirty(toputinode[cnt]); iput(toputinode[cnt]); } - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); } if (sb->s_bdev) invalidate_bdev(sb->s_bdev, 0); @@ -1419,7 +1446,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) /* And now flush the block cache so that kernel sees the changes */ invalidate_bdev(sb->s_bdev, 0); mutex_lock(&inode->i_mutex); - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); if (sb_has_quota_enabled(sb, type)) { error = -EBUSY; goto out_lock; @@ -1444,17 +1471,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) dqopt->ops[type] = fmt->qf_ops; dqopt->info[type].dqi_format = fmt; INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); goto out_file_init; } - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); mutex_unlock(&inode->i_mutex); set_enable_flags(dqopt, type); add_dquot_ref(sb, type); - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); return 0; @@ -1462,7 +1489,7 @@ out_file_init: dqopt->files[type] = NULL; iput(inode); out_lock: - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); if (oldflags != -1) { down_write(&dqopt->dqptr_sem); /* Set the flags back (in the case of accidental quotaon() @@ -1550,14 +1577,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d { struct dquot *dquot; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!(dquot = dqget(sb, id, type))) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } do_get_dqblk(dquot, di); dqput(dquot); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } @@ -1619,14 +1646,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d { struct dquot *dquot; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!(dquot = dqget(sb, id, type))) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } do_set_dqblk(dquot, di); dqput(dquot); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } @@ -1635,9 +1662,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_enabled(sb, type)) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } mi = sb_dqopt(sb)->info + type; @@ -1647,7 +1674,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) ii->dqi_flags = mi->dqi_flags & DQF_MASK; ii->dqi_valid = IIF_ALL; spin_unlock(&dq_data_lock); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } @@ -1656,9 +1683,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_enabled(sb, type)) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } mi = sb_dqopt(sb)->info + type; @@ -1673,7 +1700,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) mark_info_dirty(sb, type); /* Force write to disk */ sb->dq_op->write_info(sb, type); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4284cd31eba6..1c2b16fda13a 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -34,6 +34,7 @@ #include <linux/eventpoll.h> #include <linux/mount.h> #include <linux/bitops.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> @@ -46,7 +47,7 @@ * LOCKING: * There are three level of locking required by epoll : * - * 1) epsem (semaphore) + * 1) epmutex (mutex) * 2) ep->sem (rw_semaphore) * 3) ep->lock (rw_lock) * @@ -67,9 +68,9 @@ * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). * It is possible to drop the "ep->sem" and to use the global - * semaphore "epsem" (together with "ep->lock") to have it working, + * semaphore "epmutex" (together with "ep->lock") to have it working, * but having "ep->sem" will make the interface more scalable. - * Events that require holding "epsem" are very rare, while for + * Events that require holding "epmutex" are very rare, while for * normal operations the epoll private "ep->sem" will guarantee * a greater scalability. */ @@ -274,7 +275,7 @@ static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type, /* * This semaphore is used to serialize ep_free() and eventpoll_release_file(). */ -static struct semaphore epsem; +static struct mutex epmutex; /* Safe wake up implementation */ static struct poll_safewake psw; @@ -451,15 +452,6 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) } -/* Used to initialize the epoll bits inside the "struct file" */ -void eventpoll_init_file(struct file *file) -{ - - INIT_LIST_HEAD(&file->f_ep_links); - spin_lock_init(&file->f_ep_lock); -} - - /* * This is called from eventpoll_release() to unlink files from the eventpoll * interface. We need to have this facility to cleanup correctly files that are @@ -477,10 +469,10 @@ void eventpoll_release_file(struct file *file) * cleanup path, and this means that noone is using this file anymore. * The only hit might come from ep_free() but by holding the semaphore * will correctly serialize the operation. We do need to acquire - * "ep->sem" after "epsem" because ep_remove() requires it when called + * "ep->sem" after "epmutex" because ep_remove() requires it when called * from anywhere but ep_free(). */ - down(&epsem); + mutex_lock(&epmutex); while (!list_empty(lsthead)) { epi = list_entry(lsthead->next, struct epitem, fllink); @@ -492,7 +484,7 @@ void eventpoll_release_file(struct file *file) up_write(&ep->sem); } - up(&epsem); + mutex_unlock(&epmutex); } @@ -819,9 +811,9 @@ static void ep_free(struct eventpoll *ep) * We do not need to hold "ep->sem" here because the epoll file * is on the way to be removed and no one has references to it * anymore. The only hit might come from eventpoll_release_file() but - * holding "epsem" is sufficent here. + * holding "epmutex" is sufficent here. */ - down(&epsem); + mutex_lock(&epmutex); /* * Walks through the whole tree by unregistering poll callbacks. @@ -843,7 +835,7 @@ static void ep_free(struct eventpoll *ep) ep_remove(ep, epi); } - up(&epsem); + mutex_unlock(&epmutex); } @@ -1615,7 +1607,7 @@ static int __init eventpoll_init(void) { int error; - init_MUTEX(&epsem); + mutex_init(&epmutex); /* Initialize the structure used to perform safe poll wait head wake ups */ ep_poll_safewake_init(&psw); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index ad1432a2a62e..4ca824985321 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -36,22 +36,6 @@ #include "acl.h" #include "xip.h" -/* - * Couple of helper functions - make the code slightly cleaner. - */ - -static inline void ext2_inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void ext2_dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ext2_add_link(dentry, inode); @@ -59,7 +43,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - ext2_dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -201,7 +185,7 @@ out: return err; out_fail: - ext2_dec_count(inode); + inode_dec_link_count(inode); iput (inode); goto out; } @@ -215,7 +199,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - ext2_inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); return ext2_add_nondir(dentry, inode); @@ -229,7 +213,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) if (dir->i_nlink >= EXT2_LINK_MAX) goto out; - ext2_inc_count(dir); + inode_inc_link_count(dir); inode = ext2_new_inode (dir, S_IFDIR | mode); err = PTR_ERR(inode); @@ -243,7 +227,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) else inode->i_mapping->a_ops = &ext2_aops; - ext2_inc_count(inode); + inode_inc_link_count(inode); err = ext2_make_empty(inode, dir); if (err) @@ -258,11 +242,11 @@ out: return err; out_fail: - ext2_dec_count(inode); - ext2_dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput(inode); out_dir: - ext2_dec_count(dir); + inode_dec_link_count(dir); goto out; } @@ -282,7 +266,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry) goto out; inode->i_ctime = dir->i_ctime; - ext2_dec_count(inode); + inode_dec_link_count(inode); err = 0; out: return err; @@ -297,8 +281,8 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry) err = ext2_unlink(dir, dentry); if (!err) { inode->i_size = 0; - ext2_dec_count(inode); - ext2_dec_count(dir); + inode_dec_link_count(inode); + inode_dec_link_count(dir); } } return err; @@ -338,41 +322,41 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ext2_find_entry (new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; - ext2_inc_count(old_inode); + inode_inc_link_count(old_inode); ext2_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - ext2_dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= EXT2_LINK_MAX) goto out_dir; } - ext2_inc_count(old_inode); + inode_inc_link_count(old_inode); err = ext2_add_link(new_dentry, old_inode); if (err) { - ext2_dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - ext2_inc_count(new_dir); + inode_inc_link_count(new_dir); } /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * ext2_dec_count() will mark the inode dirty. + * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; ext2_delete_entry (old_de, old_page); - ext2_dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { ext2_set_link(old_inode, dir_de, dir_page, new_dir); - ext2_dec_count(old_dir); + inode_dec_link_count(old_dir); } return 0; diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 832867aef3dc..773459164bb2 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c @@ -95,11 +95,10 @@ static int ext3_readdir(struct file * filp, void * dirent, filldir_t filldir) { int error = 0; - unsigned long offset, blk; - int i, num, stored; - struct buffer_head * bh, * tmp, * bha[16]; - struct ext3_dir_entry_2 * de; - struct super_block * sb; + unsigned long offset; + int i, stored; + struct ext3_dir_entry_2 *de; + struct super_block *sb; int err; struct inode *inode = filp->f_dentry->d_inode; int ret = 0; @@ -124,12 +123,29 @@ static int ext3_readdir(struct file * filp, } #endif stored = 0; - bh = NULL; offset = filp->f_pos & (sb->s_blocksize - 1); while (!error && !stored && filp->f_pos < inode->i_size) { - blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb); - bh = ext3_bread(NULL, inode, blk, 0, &err); + unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb); + struct buffer_head map_bh; + struct buffer_head *bh = NULL; + + map_bh.b_state = 0; + err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); + if (!err) { + page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, + &filp->f_ra, + filp, + map_bh.b_blocknr >> + (PAGE_CACHE_SHIFT - inode->i_blkbits), + 1); + bh = ext3_bread(NULL, inode, blk, 0, &err); + } + + /* + * We ignore I/O errors on directories so users have a chance + * of recovering data when there's a bad sector + */ if (!bh) { ext3_error (sb, "ext3_readdir", "directory #%lu contains a hole at offset %lu", @@ -138,26 +154,6 @@ static int ext3_readdir(struct file * filp, continue; } - /* - * Do the readahead - */ - if (!offset) { - for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0; - i > 0; i--) { - tmp = ext3_getblk (NULL, inode, ++blk, 0, &err); - if (tmp && !buffer_uptodate(tmp) && - !buffer_locked(tmp)) - bha[num++] = tmp; - else - brelse (tmp); - } - if (num) { - ll_rw_block (READA, num, bha); - for (i = 0; i < num; i++) - brelse (bha[i]); - } - } - revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid diff --git a/fs/ext3/file.c b/fs/ext3/file.c index 98e78345ead9..59098ea56711 100644 --- a/fs/ext3/file.c +++ b/fs/ext3/file.c @@ -37,9 +37,9 @@ static int ext3_release_file (struct inode * inode, struct file * filp) if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1)) { - down(&EXT3_I(inode)->truncate_sem); + mutex_lock(&EXT3_I(inode)->truncate_mutex); ext3_discard_reservation(inode); - up(&EXT3_I(inode)->truncate_sem); + mutex_unlock(&EXT3_I(inode)->truncate_mutex); } if (is_dx(inode) && filp->private_data) ext3_htree_free_dir_info(filp->private_data); diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 0384e539b88f..2c361377e0a5 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -671,7 +671,7 @@ err_out: * The BKL may not be held on entry here. Be sure to take it early. */ -static int +int ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create, int extend_disksize) { @@ -702,7 +702,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, if (!create || err == -EIO) goto cleanup; - down(&ei->truncate_sem); + mutex_lock(&ei->truncate_mutex); /* * If the indirect block is missing while we are reading @@ -723,7 +723,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, } partial = ext3_get_branch(inode, depth, offsets, chain, &err); if (!partial) { - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; clear_buffer_new(bh_result); @@ -759,13 +759,13 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, err = ext3_splice_branch(handle, inode, iblock, chain, partial, left); /* - * i_disksize growing is protected by truncate_sem. Don't forget to + * i_disksize growing is protected by truncate_mutex. Don't forget to * protect it if you're about to implement concurrent * ext3_get_block() -bzzz */ if (!err && extend_disksize && inode->i_size > ei->i_disksize) ei->i_disksize = inode->i_size; - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; @@ -1227,7 +1227,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... * * Same applies to ext3_get_block(). We will deadlock on various things like - * lock_journal and i_truncate_sem. + * lock_journal and i_truncate_mutex. * * Setting PF_MEMALLOC here doesn't work - too many internal memory * allocations fail. @@ -2161,7 +2161,7 @@ void ext3_truncate(struct inode * inode) * From here we block out all ext3_get_block() callers who want to * modify the block allocation tree. */ - down(&ei->truncate_sem); + mutex_lock(&ei->truncate_mutex); if (n == 1) { /* direct blocks */ ext3_free_data(handle, inode, NULL, i_data+offsets[0], @@ -2228,7 +2228,7 @@ do_indirects: ext3_discard_reservation(inode); - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c index 556cd5510078..aaf1da17b6d4 100644 --- a/fs/ext3/ioctl.c +++ b/fs/ext3/ioctl.c @@ -182,7 +182,7 @@ flags_err: * need to allocate reservation structure for this inode * before set the window size */ - down(&ei->truncate_sem); + mutex_lock(&ei->truncate_mutex); if (!ei->i_block_alloc_info) ext3_init_block_alloc_info(inode); @@ -190,7 +190,7 @@ flags_err: struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); return 0; } case EXT3_IOC_GROUP_EXTEND: { diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 56bf76586019..efe5b20d7a5a 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -472,7 +472,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif - init_MUTEX(&ei->truncate_sem); + mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); } } @@ -2382,8 +2382,8 @@ static int ext3_statfs (struct super_block * sb, struct kstatfs * buf) * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() - * DQUOT_INIT() down(dqio_sem) - * down(dqio_sem) journal_start() + * DQUOT_INIT() down(dqio_mutex) + * down(dqio_mutex) journal_start() * */ diff --git a/fs/fat/dir.c b/fs/fat/dir.c index db0de5c621c7..4095bc149eb1 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -114,7 +114,7 @@ static inline int fat_get_entry(struct inode *dir, loff_t *pos, } /* - * Convert Unicode 16 to UTF8, translated Unicode, or ASCII. + * Convert Unicode 16 to UTF-8, translated Unicode, or ASCII. * If uni_xlate is enabled and we can't get a 1:1 conversion, use a * colon as an escape character since it is normally invalid on the vfat * filesystem. The following four characters are the hexadecimal digits diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index a1a9e0451217..ab171ea8e869 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -267,19 +267,19 @@ static struct fatent_operations fat32_ops = { static inline void lock_fat(struct msdos_sb_info *sbi) { - down(&sbi->fat_lock); + mutex_lock(&sbi->fat_lock); } static inline void unlock_fat(struct msdos_sb_info *sbi) { - up(&sbi->fat_lock); + mutex_unlock(&sbi->fat_lock); } void fat_ent_access_init(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); - init_MUTEX(&sbi->fat_lock); + mutex_init(&sbi->fat_lock); switch (sbi->fat_bits) { case 32: diff --git a/fs/fat/inode.c b/fs/fat/inode.c index e7f4aa7fc686..e78d7b4842cc 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1101,7 +1101,7 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug, return -EINVAL; } } - /* UTF8 doesn't provide FAT semantics */ + /* UTF-8 doesn't provide FAT semantics */ if (!strcmp(opts->iocharset, "utf8")) { printk(KERN_ERR "FAT: utf8 is not a recommended IO charset" " for FAT filesystems, filesystem will be case sensitive!\n"); diff --git a/fs/fcntl.c b/fs/fcntl.c index dc4a7007f4e7..03c789560fb8 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -73,8 +73,8 @@ repeat: * orig_start..fdt->next_fd */ start = orig_start; - if (start < fdt->next_fd) - start = fdt->next_fd; + if (start < files->next_fd) + start = files->next_fd; newfd = start; if (start < fdt->max_fdset) { @@ -102,9 +102,8 @@ repeat: * we reacquire the fdtable pointer and use it while holding * the lock, no one can free it during that time. */ - fdt = files_fdtable(files); - if (start <= fdt->next_fd) - fdt->next_fd = newfd + 1; + if (start <= files->next_fd) + files->next_fd = newfd + 1; error = newfd; diff --git a/fs/file.c b/fs/file.c index cea7cbea11d0..bbc743314730 100644 --- a/fs/file.c +++ b/fs/file.c @@ -125,7 +125,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) kmem_cache_free(files_cachep, fdt->free_files); return; } - if (fdt->max_fdset <= __FD_SETSIZE && fdt->max_fds <= NR_OPEN_DEFAULT) { + if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE && + fdt->max_fds <= NR_OPEN_DEFAULT) { /* * The fdtable was embedded */ @@ -155,8 +156,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu) void free_fdtable(struct fdtable *fdt) { - if (fdt->free_files || fdt->max_fdset > __FD_SETSIZE || - fdt->max_fds > NR_OPEN_DEFAULT) + if (fdt->free_files || + fdt->max_fdset > EMBEDDED_FD_SET_SIZE || + fdt->max_fds > NR_OPEN_DEFAULT) call_rcu(&fdt->rcu, free_fdtable_rcu); } @@ -199,7 +201,6 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt) (nfdt->max_fds - fdt->max_fds) * sizeof(struct file *)); } - nfdt->next_fd = fdt->next_fd; } /* @@ -220,11 +221,9 @@ fd_set * alloc_fdset(int num) void free_fdset(fd_set *array, int num) { - int size = num / 8; - - if (num <= __FD_SETSIZE) /* Don't free an embedded fdset */ + if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */ return; - else if (size <= PAGE_SIZE) + else if (num <= 8 * PAGE_SIZE) kfree(array); else vfree(array); @@ -237,22 +236,17 @@ static struct fdtable *alloc_fdtable(int nr) fd_set *new_openset = NULL, *new_execset = NULL; struct file **new_fds; - fdt = kmalloc(sizeof(*fdt), GFP_KERNEL); + fdt = kzalloc(sizeof(*fdt), GFP_KERNEL); if (!fdt) goto out; - memset(fdt, 0, sizeof(*fdt)); - nfds = __FD_SETSIZE; + nfds = 8 * L1_CACHE_BYTES; /* Expand to the max in easy steps */ - do { - if (nfds < (PAGE_SIZE * 8)) - nfds = PAGE_SIZE * 8; - else { - nfds = nfds * 2; - if (nfds > NR_OPEN) - nfds = NR_OPEN; - } - } while (nfds <= nr); + while (nfds <= nr) { + nfds = nfds * 2; + if (nfds > NR_OPEN) + nfds = NR_OPEN; + } new_openset = alloc_fdset(nfds); new_execset = alloc_fdset(nfds); diff --git a/fs/file_table.c b/fs/file_table.c index 44fabeaa9415..bcea1998b4de 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -88,6 +88,7 @@ int proc_nr_files(ctl_table *table, int write, struct file *filp, */ struct file *get_empty_filp(void) { + struct task_struct *tsk; static int old_max; struct file * f; @@ -112,13 +113,14 @@ struct file *get_empty_filp(void) if (security_file_alloc(f)) goto fail_sec; - eventpoll_init_file(f); + tsk = current; + INIT_LIST_HEAD(&f->f_u.fu_list); atomic_set(&f->f_count, 1); - f->f_uid = current->fsuid; - f->f_gid = current->fsgid; rwlock_init(&f->f_owner.lock); + f->f_uid = tsk->fsuid; + f->f_gid = tsk->fsgid; + eventpoll_init_file(f); /* f->f_version: 0 */ - INIT_LIST_HEAD(&f->f_u.fu_list); return f; over: diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 6628c3b352cb..4c6473ab3b34 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -9,6 +9,7 @@ //#define DBG //#define DEBUG_LOCKS +#include <linux/mutex.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/hpfs_fs.h> @@ -57,8 +58,8 @@ struct hpfs_inode_info { unsigned i_ea_uid : 1; /* file's uid is stored in ea */ unsigned i_ea_gid : 1; /* file's gid is stored in ea */ unsigned i_dirty : 1; - struct semaphore i_sem; - struct semaphore i_parent; + struct mutex i_mutex; + struct mutex i_parent_mutex; loff_t **i_rddir_off; struct inode vfs_inode; }; diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index e3d17e9ea6c1..56f2c338c4d9 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -186,9 +186,9 @@ void hpfs_write_inode(struct inode *i) kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } - down(&hpfs_inode->i_parent); + mutex_lock(&hpfs_inode->i_parent_mutex); if (!i->i_nlink) { - up(&hpfs_inode->i_parent); + mutex_unlock(&hpfs_inode->i_parent_mutex); return; } parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); @@ -199,14 +199,14 @@ void hpfs_write_inode(struct inode *i) hpfs_read_inode(parent); unlock_new_inode(parent); } - down(&hpfs_inode->i_sem); + mutex_lock(&hpfs_inode->i_mutex); hpfs_write_inode_nolock(i); - up(&hpfs_inode->i_sem); + mutex_unlock(&hpfs_inode->i_mutex); iput(parent); } else { mark_inode_dirty(i); } - up(&hpfs_inode->i_parent); + mutex_unlock(&hpfs_inode->i_parent_mutex); } void hpfs_write_inode_nolock(struct inode *i) diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index 8ff8fc433fc1..a03abb12c610 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -60,7 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (dee.read_only) result->i_mode &= ~0222; - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail3; @@ -101,11 +101,11 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); unlock_kernel(); return 0; bail3: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail2: hpfs_brelse4(&qbh0); @@ -168,7 +168,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = 0; - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -193,12 +193,12 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); unlock_kernel(); return 0; bail2: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -254,7 +254,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t result->i_blocks = 1; init_special_inode(result, mode, rdev); - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -271,12 +271,12 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); brelse(bh); unlock_kernel(); return 0; bail2: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -333,7 +333,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_op = &page_symlink_inode_operations; result->i_data.a_ops = &hpfs_symlink_aops; - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -352,11 +352,11 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); unlock_kernel(); return 0; bail2: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -382,8 +382,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) lock_kernel(); hpfs_adjust_length((char *)name, &len); again: - down(&hpfs_i(inode)->i_parent); - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(inode)->i_parent_mutex); + mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); if (!de) @@ -410,8 +410,8 @@ again: if (rep++) break; - up(&hpfs_i(dir)->i_sem); - up(&hpfs_i(inode)->i_parent); + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); d_drop(dentry); spin_lock(&dentry->d_lock); if (atomic_read(&dentry->d_count) > 1 || @@ -442,8 +442,8 @@ again: out1: hpfs_brelse4(&qbh); out: - up(&hpfs_i(dir)->i_sem); - up(&hpfs_i(inode)->i_parent); + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); unlock_kernel(); return err; } @@ -463,8 +463,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) hpfs_adjust_length((char *)name, &len); lock_kernel(); - down(&hpfs_i(inode)->i_parent); - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(inode)->i_parent_mutex); + mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); if (!de) @@ -502,8 +502,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) out1: hpfs_brelse4(&qbh); out: - up(&hpfs_i(dir)->i_sem); - up(&hpfs_i(inode)->i_parent); + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); unlock_kernel(); return err; } @@ -565,12 +565,12 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, lock_kernel(); /* order doesn't matter, due to VFS exclusion */ - down(&hpfs_i(i)->i_parent); + mutex_lock(&hpfs_i(i)->i_parent_mutex); if (new_inode) - down(&hpfs_i(new_inode)->i_parent); - down(&hpfs_i(old_dir)->i_sem); + mutex_lock(&hpfs_i(new_inode)->i_parent_mutex); + mutex_lock(&hpfs_i(old_dir)->i_mutex); if (new_dir != old_dir) - down(&hpfs_i(new_dir)->i_sem); + mutex_lock(&hpfs_i(new_dir)->i_mutex); /* Erm? Moving over the empty non-busy directory is perfectly legal */ if (new_inode && S_ISDIR(new_inode->i_mode)) { @@ -650,11 +650,11 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, hpfs_decide_conv(i, (char *)new_name, new_len); end1: if (old_dir != new_dir) - up(&hpfs_i(new_dir)->i_sem); - up(&hpfs_i(old_dir)->i_sem); - up(&hpfs_i(i)->i_parent); + mutex_unlock(&hpfs_i(new_dir)->i_mutex); + mutex_unlock(&hpfs_i(old_dir)->i_mutex); + mutex_unlock(&hpfs_i(i)->i_parent_mutex); if (new_inode) - up(&hpfs_i(new_inode)->i_parent); + mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex); unlock_kernel(); return err; } diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 63e88d7e2c3b..9488a794076e 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -181,8 +181,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { - init_MUTEX(&ei->i_sem); - init_MUTEX(&ei->i_parent); + mutex_init(&ei->i_mutex); + mutex_init(&ei->i_parent_mutex); inode_init_once(&ei->vfs_inode); } } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index b35195289945..25fa8bba8cb5 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -56,48 +56,10 @@ static void huge_pagevec_release(struct pagevec *pvec) pagevec_reinit(pvec); } -/* - * huge_pages_needed tries to determine the number of new huge pages that - * will be required to fully populate this VMA. This will be equal to - * the size of the VMA in huge pages minus the number of huge pages - * (covered by this VMA) that are found in the page cache. - * - * Result is in bytes to be compatible with is_hugepage_mem_enough() - */ -static unsigned long -huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma) -{ - int i; - struct pagevec pvec; - unsigned long start = vma->vm_start; - unsigned long end = vma->vm_end; - unsigned long hugepages = (end - start) >> HPAGE_SHIFT; - pgoff_t next = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT); - pgoff_t endpg = next + hugepages; - - pagevec_init(&pvec, 0); - while (next < endpg) { - if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) - break; - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i]; - if (page->index > next) - next = page->index; - if (page->index >= endpg) - break; - next++; - hugepages--; - } - huge_pagevec_release(&pvec); - } - return hugepages << HPAGE_SHIFT; -} - static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file->f_dentry->d_inode; - struct address_space *mapping = inode->i_mapping; - unsigned long bytes; + struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); loff_t len, vma_len; int ret; @@ -113,10 +75,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) if (vma->vm_end - vma->vm_start < HPAGE_SIZE) return -EINVAL; - bytes = huge_pages_needed(mapping, vma); - if (!is_hugepage_mem_enough(bytes)) - return -ENOMEM; - vma_len = (loff_t)(vma->vm_end - vma->vm_start); mutex_lock(&inode->i_mutex); @@ -129,6 +87,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size) goto out; + if (vma->vm_flags & VM_MAYSHARE) + if (hugetlb_extend_reservation(info, len >> HPAGE_SHIFT) != 0) + goto out; + ret = 0; hugetlb_prefault_arch_hook(vma->vm_mm); if (inode->i_size < len) @@ -227,13 +189,18 @@ static void truncate_huge_page(struct page *page) put_page(page); } -static void truncate_hugepages(struct address_space *mapping, loff_t lstart) +static void truncate_hugepages(struct inode *inode, loff_t lstart) { + struct address_space *mapping = &inode->i_data; const pgoff_t start = lstart >> HPAGE_SHIFT; struct pagevec pvec; pgoff_t next; int i; + hugetlb_truncate_reservation(HUGETLBFS_I(inode), + lstart >> HPAGE_SHIFT); + if (!mapping->nrpages) + return; pagevec_init(&pvec, 0); next = start; while (1) { @@ -262,8 +229,7 @@ static void truncate_hugepages(struct address_space *mapping, loff_t lstart) static void hugetlbfs_delete_inode(struct inode *inode) { - if (inode->i_data.nrpages) - truncate_hugepages(&inode->i_data, 0); + truncate_hugepages(inode, 0); clear_inode(inode); } @@ -296,8 +262,7 @@ static void hugetlbfs_forget_inode(struct inode *inode) inode->i_state |= I_FREEING; inodes_stat.nr_inodes--; spin_unlock(&inode_lock); - if (inode->i_data.nrpages) - truncate_hugepages(&inode->i_data, 0); + truncate_hugepages(inode, 0); clear_inode(inode); destroy_inode(inode); } @@ -356,7 +321,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) if (!prio_tree_empty(&mapping->i_mmap)) hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); spin_unlock(&mapping->i_mmap_lock); - truncate_hugepages(mapping, offset); + truncate_hugepages(inode, offset); return 0; } @@ -573,6 +538,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) hugetlbfs_inc_free_inodes(sbinfo); return NULL; } + p->prereserved_hpages = 0; return &p->vfs_inode; } @@ -771,21 +737,6 @@ static struct file_system_type hugetlbfs_fs_type = { static struct vfsmount *hugetlbfs_vfsmount; -/* - * Return the next identifier for a shm file - */ -static unsigned long hugetlbfs_counter(void) -{ - static DEFINE_SPINLOCK(lock); - static unsigned long counter; - unsigned long ret; - - spin_lock(&lock); - ret = ++counter; - spin_unlock(&lock); - return ret; -} - static int can_do_hugetlb_shm(void) { return likely(capable(CAP_IPC_LOCK) || @@ -801,18 +752,16 @@ struct file *hugetlb_zero_setup(size_t size) struct dentry *dentry, *root; struct qstr quick_string; char buf[16]; + static atomic_t counter; if (!can_do_hugetlb_shm()) return ERR_PTR(-EPERM); - if (!is_hugepage_mem_enough(size)) - return ERR_PTR(-ENOMEM); - if (!user_shm_lock(size, current->user)) return ERR_PTR(-ENOMEM); root = hugetlbfs_vfsmount->mnt_root; - snprintf(buf, 16, "%lu", hugetlbfs_counter()); + snprintf(buf, 16, "%u", atomic_inc_return(&counter)); quick_string.name = buf; quick_string.len = strlen(quick_string.name); quick_string.hash = 0; @@ -831,6 +780,11 @@ struct file *hugetlb_zero_setup(size_t size) if (!inode) goto out_file; + error = -ENOMEM; + if (hugetlb_extend_reservation(HUGETLBFS_I(inode), + size >> HPAGE_SHIFT) != 0) + goto out_inode; + d_instantiate(dentry, inode); inode->i_size = size; inode->i_nlink = 0; @@ -841,6 +795,8 @@ struct file *hugetlb_zero_setup(size_t size) file->f_mode = FMODE_WRITE | FMODE_READ; return file; +out_inode: + iput(inode); out_file: put_filp(file); out_dentry: diff --git a/fs/inode.c b/fs/inode.c index d0be6159eb7f..25967b67903d 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable; DEFINE_SPINLOCK(inode_lock); /* - * iprune_sem provides exclusion between the kswapd or try_to_free_pages + * iprune_mutex provides exclusion between the kswapd or try_to_free_pages * icache shrinking path, and the umount path. Without this exclusion, * by the time prune_icache calls iput for the inode whose pages it has * been invalidating, or by the time it calls clear_inode & destroy_inode * from its final dispose_list, the struct super_block they refer to * (for inode->i_sb->s_op) may already have been freed and reused. */ -DECLARE_MUTEX(iprune_sem); +DEFINE_MUTEX(iprune_mutex); /* * Statistics gathering.. @@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode) i_size_ordered_init(inode); #ifdef CONFIG_INOTIFY INIT_LIST_HEAD(&inode->inotify_watches); - sema_init(&inode->inotify_sem, 1); + mutex_init(&inode->inotify_mutex); #endif } @@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) /* * We can reschedule here without worrying about the list's * consistency because the per-sb list of inodes must not - * change during umount anymore, and because iprune_sem keeps + * change during umount anymore, and because iprune_mutex keeps * shrink_icache_memory() away. */ cond_resched_lock(&inode_lock); @@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb) int busy; LIST_HEAD(throw_away); - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); inotify_unmount_inodes(&sb->s_inodes); busy = invalidate_list(&sb->s_inodes, &throw_away); spin_unlock(&inode_lock); dispose_list(&throw_away); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); return busy; } @@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev) if (sb) { /* * no need to lock the super, get_super holds the - * read semaphore so the filesystem cannot go away + * read mutex so the filesystem cannot go away * under us (->put_super runs with the write lock * hold). */ @@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan) int nr_scanned; unsigned long reap = 0; - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { struct inode *inode; @@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan) spin_unlock(&inode_lock); dispose_list(&freeable); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); if (current_is_kswapd()) mod_page_state(kswapd_inodesteal, reap); diff --git a/fs/inotify.c b/fs/inotify.c index 3041503bde02..0ee39ef591c6 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -54,10 +54,10 @@ int inotify_max_queued_events; * Lock ordering: * * dentry->d_lock (used to keep d_move() away from dentry->d_parent) - * iprune_sem (synchronize shrink_icache_memory()) + * iprune_mutex (synchronize shrink_icache_memory()) * inode_lock (protects the super_block->s_inodes list) - * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) - * inotify_dev->sem (protects inotify_device and watches->d_list) + * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) + * inotify_dev->mutex (protects inotify_device and watches->d_list) */ /* @@ -79,12 +79,12 @@ int inotify_max_queued_events; /* * struct inotify_device - represents an inotify instance * - * This structure is protected by the semaphore 'sem'. + * This structure is protected by the mutex 'mutex'. */ struct inotify_device { wait_queue_head_t wq; /* wait queue for i/o */ struct idr idr; /* idr mapping wd -> watch */ - struct semaphore sem; /* protects this bad boy */ + struct mutex mutex; /* protects this bad boy */ struct list_head events; /* list of queued events */ struct list_head watches; /* list of watches */ atomic_t count; /* reference count */ @@ -101,7 +101,7 @@ struct inotify_device { * device. In read(), this list is walked and all events that can fit in the * buffer are returned. * - * Protected by dev->sem of the device in which we are queued. + * Protected by dev->mutex of the device in which we are queued. */ struct inotify_kernel_event { struct inotify_event event; /* the user-space event */ @@ -112,8 +112,8 @@ struct inotify_kernel_event { /* * struct inotify_watch - represents a watch request on a specific inode * - * d_list is protected by dev->sem of the associated watch->dev. - * i_list and mask are protected by inode->inotify_sem of the associated inode. + * d_list is protected by dev->mutex of the associated watch->dev. + * i_list and mask are protected by inode->inotify_mutex of the associated inode. * dev, inode, and wd are never written to once the watch is created. */ struct inotify_watch { @@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, /* * inotify_dev_get_event - return the next event in the given dev's queue * - * Caller must hold dev->sem. + * Caller must hold dev->mutex. */ static inline struct inotify_kernel_event * inotify_dev_get_event(struct inotify_device *dev) @@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev) /* * inotify_dev_queue_event - add a new event to the given device * - * Caller must hold dev->sem. Can sleep (calls kernel_event()). + * Caller must hold dev->mutex. Can sleep (calls kernel_event()). */ static void inotify_dev_queue_event(struct inotify_device *dev, struct inotify_watch *watch, u32 mask, @@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev, /* * remove_kevent - cleans up and ultimately frees the given kevent * - * Caller must hold dev->sem. + * Caller must hold dev->mutex. */ static void remove_kevent(struct inotify_device *dev, struct inotify_kernel_event *kevent) @@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev, /* * inotify_dev_event_dequeue - destroy an event on the given device * - * Caller must hold dev->sem. + * Caller must hold dev->mutex. */ static void inotify_dev_event_dequeue(struct inotify_device *dev) { @@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev) /* * inotify_dev_get_wd - returns the next WD for use by the given dev * - * Callers must hold dev->sem. This function can sleep. + * Callers must hold dev->mutex. This function can sleep. */ static int inotify_dev_get_wd(struct inotify_device *dev, struct inotify_watch *watch) @@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd, /* * create_watch - creates a watch on the given device. * - * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. + * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. */ static struct inotify_watch *create_watch(struct inotify_device *dev, @@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, /* * inotify_find_dev - find the watch associated with the given inode and dev * - * Callers must hold inode->inotify_sem. + * Callers must hold inode->inotify_mutex. */ static struct inotify_watch *inode_find_dev(struct inode *inode, struct inotify_device *dev) @@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch, * the IN_IGNORED event to the given device signifying that the inode is no * longer watched. * - * Callers must hold both inode->inotify_sem and dev->sem. We drop a + * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a * reference to the inode before returning. * * The inode is not iput() so as to remain atomic. If the inode needs to be @@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, if (!inotify_inode_watched(inode)) return; - down(&inode->inotify_sem); + mutex_lock(&inode->inotify_mutex); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { u32 watch_mask = watch->mask; if (watch_mask & mask) { struct inotify_device *dev = watch->dev; get_inotify_watch(watch); - down(&dev->sem); + mutex_lock(&dev->mutex); inotify_dev_queue_event(dev, watch, mask, cookie, name); if (watch_mask & IN_ONESHOT) remove_watch_no_event(watch, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); put_inotify_watch(watch); } } - up(&inode->inotify_sem); + mutex_unlock(&inode->inotify_mutex); } EXPORT_SYMBOL_GPL(inotify_inode_queue_event); @@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie); * @list: list of inodes being unmounted (sb->s_inodes) * * Called with inode_lock held, protecting the unmounting super block's list - * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. + * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. * We temporarily drop inode_lock, however, and CAN block. */ void inotify_unmount_inodes(struct list_head *list) @@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list) * We can safely drop inode_lock here because we hold * references on both inode and next_i. Also no new inodes * will be added since the umount has begun. Finally, - * iprune_sem keeps shrink_icache_memory() away. + * iprune_mutex keeps shrink_icache_memory() away. */ spin_unlock(&inode_lock); @@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list) iput(need_iput_tmp); /* for each watch, send IN_UNMOUNT and then remove it */ - down(&inode->inotify_sem); + mutex_lock(&inode->inotify_mutex); watches = &inode->inotify_watches; list_for_each_entry_safe(watch, next_w, watches, i_list) { struct inotify_device *dev = watch->dev; - down(&dev->sem); + mutex_lock(&dev->mutex); inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); remove_watch(watch, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); } - up(&inode->inotify_sem); + mutex_unlock(&inode->inotify_mutex); iput(inode); spin_lock(&inode_lock); @@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode) { struct inotify_watch *watch, *next; - down(&inode->inotify_sem); + mutex_lock(&inode->inotify_mutex); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { struct inotify_device *dev = watch->dev; - down(&dev->sem); + mutex_lock(&dev->mutex); remove_watch(watch, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); } - up(&inode->inotify_sem); + mutex_unlock(&inode->inotify_mutex); } EXPORT_SYMBOL_GPL(inotify_inode_is_dead); @@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) int ret = 0; poll_wait(file, &dev->wq, wait); - down(&dev->sem); + mutex_lock(&dev->mutex); if (!list_empty(&dev->events)) ret = POLLIN | POLLRDNORM; - up(&dev->sem); + mutex_unlock(&dev->mutex); return ret; } @@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); - down(&dev->sem); + mutex_lock(&dev->mutex); events = !list_empty(&dev->events); - up(&dev->sem); + mutex_unlock(&dev->mutex); if (events) { ret = 0; break; @@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, if (ret) return ret; - down(&dev->sem); + mutex_lock(&dev->mutex); while (1) { struct inotify_kernel_event *kevent; @@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, remove_kevent(dev, kevent); } - up(&dev->sem); + mutex_unlock(&dev->mutex); return ret; } @@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file) * Destroy all of the watches on this device. Unfortunately, not very * pretty. We cannot do a simple iteration over the list, because we * do not know the inode until we iterate to the watch. But we need to - * hold inode->inotify_sem before dev->sem. The following works. + * hold inode->inotify_mutex before dev->mutex. The following works. */ while (1) { struct inotify_watch *watch; struct list_head *watches; struct inode *inode; - down(&dev->sem); + mutex_lock(&dev->mutex); watches = &dev->watches; if (list_empty(watches)) { - up(&dev->sem); + mutex_unlock(&dev->mutex); break; } watch = list_entry(watches->next, struct inotify_watch, d_list); get_inotify_watch(watch); - up(&dev->sem); + mutex_unlock(&dev->mutex); inode = watch->inode; - down(&inode->inotify_sem); - down(&dev->sem); + mutex_lock(&inode->inotify_mutex); + mutex_lock(&dev->mutex); remove_watch_no_event(watch, dev); - up(&dev->sem); - up(&inode->inotify_sem); + mutex_unlock(&dev->mutex); + mutex_unlock(&inode->inotify_mutex); put_inotify_watch(watch); } /* destroy all of the events on this device */ - down(&dev->sem); + mutex_lock(&dev->mutex); while (!list_empty(&dev->events)) inotify_dev_event_dequeue(dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); /* free this device: the put matching the get in inotify_init() */ put_inotify_dev(dev); @@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd) struct inotify_watch *watch; struct inode *inode; - down(&dev->sem); + mutex_lock(&dev->mutex); watch = idr_find(&dev->idr, wd); if (unlikely(!watch)) { - up(&dev->sem); + mutex_unlock(&dev->mutex); return -EINVAL; } get_inotify_watch(watch); inode = watch->inode; - up(&dev->sem); + mutex_unlock(&dev->mutex); - down(&inode->inotify_sem); - down(&dev->sem); + mutex_lock(&inode->inotify_mutex); + mutex_lock(&dev->mutex); /* make sure that we did not race */ watch = idr_find(&dev->idr, wd); if (likely(watch)) remove_watch(watch, dev); - up(&dev->sem); - up(&inode->inotify_sem); + mutex_unlock(&dev->mutex); + mutex_unlock(&inode->inotify_mutex); put_inotify_watch(watch); return 0; @@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void) INIT_LIST_HEAD(&dev->events); INIT_LIST_HEAD(&dev->watches); init_waitqueue_head(&dev->wq); - sema_init(&dev->sem, 1); + mutex_init(&dev->mutex); dev->event_count = 0; dev->queue_size = 0; dev->max_events = inotify_max_queued_events; @@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) inode = nd.dentry->d_inode; dev = filp->private_data; - down(&inode->inotify_sem); - down(&dev->sem); + mutex_lock(&inode->inotify_mutex); + mutex_lock(&dev->mutex); if (mask & IN_MASK_ADD) mask_add = 1; @@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) list_add(&watch->i_list, &inode->inotify_watches); ret = watch->wd; out: - up(&dev->sem); - up(&inode->inotify_sem); + mutex_unlock(&dev->mutex); + mutex_unlock(&inode->inotify_mutex); path_release(&nd); fput_and_out: fput_light(filp, fput_needed); diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c index 2931de7f1a6a..81a90e170ac3 100644 --- a/fs/isofs/joliet.c +++ b/fs/isofs/joliet.c @@ -11,7 +11,7 @@ #include "isofs.h" /* - * Convert Unicode 16 to UTF8 or ASCII. + * Convert Unicode 16 to UTF-8 or ASCII. */ static int uni16_to_x8(unsigned char *ascii, u16 *uni, int len, struct nls_table *nls) diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 543ed543d1e5..3f5102b069db 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -85,7 +85,7 @@ void __log_wait_for_space(journal_t *journal) if (journal->j_flags & JFS_ABORT) return; spin_unlock(&journal->j_state_lock); - down(&journal->j_checkpoint_sem); + mutex_lock(&journal->j_checkpoint_mutex); /* * Test again, another process may have checkpointed while we @@ -98,7 +98,7 @@ void __log_wait_for_space(journal_t *journal) log_do_checkpoint(journal); spin_lock(&journal->j_state_lock); } - up(&journal->j_checkpoint_sem); + mutex_unlock(&journal->j_checkpoint_mutex); } } diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index e4b516ac4989..95a628d8cac8 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -659,8 +659,8 @@ static journal_t * journal_init_common (void) init_waitqueue_head(&journal->j_wait_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); - init_MUTEX(&journal->j_barrier); - init_MUTEX(&journal->j_checkpoint_sem); + mutex_init(&journal->j_barrier); + mutex_init(&journal->j_checkpoint_mutex); spin_lock_init(&journal->j_revoke_lock); spin_lock_init(&journal->j_list_lock); spin_lock_init(&journal->j_state_lock); diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index ca917973c2c0..5fc40888f4cf 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -455,7 +455,7 @@ void journal_lock_updates(journal_t *journal) * to make sure that we serialise special journal-locked operations * too. */ - down(&journal->j_barrier); + mutex_lock(&journal->j_barrier); } /** @@ -470,7 +470,7 @@ void journal_unlock_updates (journal_t *journal) { J_ASSERT(journal->j_barrier_count != 0); - up(&journal->j_barrier); + mutex_unlock(&journal->j_barrier); spin_lock(&journal->j_state_lock); --journal->j_barrier_count; spin_unlock(&journal->j_state_lock); diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c index fc3855a1aef3..890d7ff7456d 100644 --- a/fs/jffs/inode-v23.c +++ b/fs/jffs/inode-v23.c @@ -42,7 +42,7 @@ #include <linux/quotaops.h> #include <linux/highmem.h> #include <linux/vfs.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <asm/uaccess.h> @@ -203,7 +203,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) fmc = c->fmc; D3(printk (KERN_NOTICE "notify_change(): down biglock\n")); - down(&fmc->biglock); + mutex_lock(&fmc->biglock); f = jffs_find_file(c, inode->i_ino); @@ -211,7 +211,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) printk("jffs_setattr(): Invalid inode number: %lu\n", inode->i_ino); D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); - up(&fmc->biglock); + mutex_unlock(&fmc->biglock); res = -EINVAL; goto out; }); @@ -232,7 +232,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) if (!(new_node = jffs_alloc_node())) { D(printk("jffs_setattr(): Allocation failed!\n")); D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); - up(&fmc->biglock); + mutex_unlock(&fmc->biglock); res = -ENOMEM; goto out; } @@ -319,7 +319,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) D(printk("jffs_notify_change(): The write failed!\n")); jffs_free_node(new_node); D3(printk (KERN_NOTICE "n_c(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); goto out; } @@ -327,7 +327,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) mark_inode_dirty(inode); D3(printk (KERN_NOTICE "n_c(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); out: unlock_kernel(); return res; @@ -461,7 +461,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, goto jffs_rename_end; } D3(printk (KERN_NOTICE "rename(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); /* Create a node and initialize as much as needed. */ result = -ENOMEM; if (!(node = jffs_alloc_node())) { @@ -555,7 +555,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, jffs_rename_end: D3(printk (KERN_NOTICE "rename(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_rename() */ @@ -574,14 +574,14 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) int ddino; lock_kernel(); D3(printk (KERN_NOTICE "readdir(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp)); if (filp->f_pos == 0) { D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino)); if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -598,7 +598,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) D3(printk("jffs_readdir(): \"..\" %u\n", ddino)); if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -617,7 +617,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (filldir(dirent, f->name, f->nsize, filp->f_pos , f->ino, DT_UNKNOWN) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -627,7 +627,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) } while(f && f->deleted); } D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return filp->f_pos; } /* jffs_readdir() */ @@ -660,7 +660,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) }); D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); r = -ENAMETOOLONG; if (len > JFFS_MAX_NAME_LEN) { @@ -683,31 +683,31 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) if ((len == 1) && (name[0] == '.')) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, d->ino))) { D(printk("jffs_lookup(): . iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, d->pino))) { D(printk("jffs_lookup(): .. iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); } else if ((f = jffs_find_child(d, name, len))) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, f->ino))) { D(printk("jffs_lookup(): iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); } else { D3(printk("jffs_lookup(): Couldn't find the file. " "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n", @@ -717,13 +717,13 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) d_add(dentry, inode); D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return NULL; jffs_lookup_end: D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); jffs_lookup_end_no_biglock: unlock_kernel(); @@ -753,7 +753,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ClearPageError(page); D3(printk (KERN_NOTICE "readpage(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); read_len = 0; result = 0; @@ -782,7 +782,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) kunmap(page); D3(printk (KERN_NOTICE "readpage(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (result) { SetPageError(page); @@ -839,7 +839,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) c = dir_f->c; D3(printk (KERN_NOTICE "mkdir(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask); @@ -906,7 +906,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) result = 0; jffs_mkdir_end: D3(printk (KERN_NOTICE "mkdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_mkdir() */ @@ -921,10 +921,10 @@ jffs_rmdir(struct inode *dir, struct dentry *dentry) D3(printk("***jffs_rmdir()\n")); D3(printk (KERN_NOTICE "rmdir(): down biglock\n")); lock_kernel(); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); ret = jffs_remove(dir, dentry, S_IFDIR); D3(printk (KERN_NOTICE "rmdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return ret; } @@ -940,10 +940,10 @@ jffs_unlink(struct inode *dir, struct dentry *dentry) lock_kernel(); D3(printk("***jffs_unlink()\n")); D3(printk (KERN_NOTICE "unlink(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); ret = jffs_remove(dir, dentry, 0); D3(printk (KERN_NOTICE "unlink(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return ret; } @@ -1086,7 +1086,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) c = dir_f->c; D3(printk (KERN_NOTICE "mknod(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); /* Create and initialize a new node. */ if (!(node = jffs_alloc_node())) { @@ -1152,7 +1152,7 @@ jffs_mknod_err: jffs_mknod_end: D3(printk (KERN_NOTICE "mknod(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_mknod() */ @@ -1203,7 +1203,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) return -ENOMEM; } D3(printk (KERN_NOTICE "symlink(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); node->data_offset = 0; node->removed_size = 0; @@ -1253,7 +1253,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) d_instantiate(dentry, inode); jffs_symlink_end: D3(printk (KERN_NOTICE "symlink(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return err; } /* jffs_symlink() */ @@ -1306,7 +1306,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, return -ENOMEM; } D3(printk (KERN_NOTICE "create(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); node->data_offset = 0; node->removed_size = 0; @@ -1359,7 +1359,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, d_instantiate(dentry, inode); jffs_create_end: D3(printk (KERN_NOTICE "create(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return err; } /* jffs_create() */ @@ -1423,7 +1423,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count); D3(printk (KERN_NOTICE "file_write(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); /* Urgh. POSIX says we can do short writes if we feel like it. * In practice, we can't. Nothing will cope. So we loop until @@ -1511,7 +1511,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, } out: D3(printk (KERN_NOTICE "file_write(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); /* Fix things in the real inode. */ if (pos > inode->i_size) { @@ -1567,7 +1567,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, return -EIO; } D3(printk (KERN_NOTICE "ioctl(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); switch (cmd) { case JFFS_PRINT_HASH: @@ -1609,7 +1609,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, ret = -ENOTTY; } D3(printk (KERN_NOTICE "ioctl(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); return ret; } /* jffs_ioctl() */ @@ -1685,12 +1685,12 @@ jffs_read_inode(struct inode *inode) } c = (struct jffs_control *)inode->i_sb->s_fs_info; D3(printk (KERN_NOTICE "read_inode(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); if (!(f = jffs_find_file(c, inode->i_ino))) { D(printk("jffs_read_inode(): No such inode (%lu).\n", inode->i_ino)); D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); return; } inode->u.generic_ip = (void *)f; @@ -1732,7 +1732,7 @@ jffs_read_inode(struct inode *inode) } D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); } diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c index ce7b54b0b2b7..0ef207dfaf6f 100644 --- a/fs/jffs/intrep.c +++ b/fs/jffs/intrep.c @@ -62,7 +62,7 @@ #include <linux/fs.h> #include <linux/stat.h> #include <linux/pagemap.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <linux/smp_lock.h> #include <linux/time.h> @@ -3416,7 +3416,7 @@ jffs_garbage_collect_thread(void *ptr) D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n")); D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n")); - down(&fmc->biglock); + mutex_lock(&fmc->biglock); D1(printk("***jffs_garbage_collect_thread(): round #%u, " "fmc->dirty_size = %u\n", i++, fmc->dirty_size)); @@ -3447,6 +3447,6 @@ jffs_garbage_collect_thread(void *ptr) gc_end: D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n")); - up(&fmc->biglock); + mutex_unlock(&fmc->biglock); } /* for (;;) */ } /* jffs_garbage_collect_thread() */ diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c index 6da13b309bd1..7d8ca1aeace2 100644 --- a/fs/jffs/jffs_fm.c +++ b/fs/jffs/jffs_fm.c @@ -139,7 +139,7 @@ jffs_build_begin(struct jffs_control *c, int unit) fmc->tail = NULL; fmc->head_extra = NULL; fmc->tail_extra = NULL; - init_MUTEX(&fmc->biglock); + mutex_init(&fmc->biglock); return fmc; } diff --git a/fs/jffs/jffs_fm.h b/fs/jffs/jffs_fm.h index f64151e74122..c794d923df2a 100644 --- a/fs/jffs/jffs_fm.h +++ b/fs/jffs/jffs_fm.h @@ -20,10 +20,11 @@ #ifndef __LINUX_JFFS_FM_H__ #define __LINUX_JFFS_FM_H__ +#include <linux/config.h> #include <linux/types.h> #include <linux/jffs.h> #include <linux/mtd/mtd.h> -#include <linux/config.h> +#include <linux/mutex.h> /* The alignment between two nodes in the flash memory. */ #define JFFS_ALIGN_SIZE 4 @@ -97,7 +98,7 @@ struct jffs_fmcontrol struct jffs_fm *tail; struct jffs_fm *head_extra; struct jffs_fm *tail_extra; - struct semaphore biglock; + struct mutex biglock; }; /* Notice the two members head_extra and tail_extra in the jffs_control diff --git a/fs/libfs.c b/fs/libfs.c index 71fd08fa4103..4fdeaceb892c 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -7,6 +7,8 @@ #include <linux/pagemap.h> #include <linux/mount.h> #include <linux/vfs.h> +#include <linux/mutex.h> + #include <asm/uaccess.h> int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, @@ -530,7 +532,7 @@ struct simple_attr { char set_buf[24]; void *data; const char *fmt; /* format for read operation */ - struct semaphore sem; /* protects access to these buffers */ + struct mutex mutex; /* protects access to these buffers */ }; /* simple_attr_open is called by an actual attribute open file operation @@ -549,7 +551,7 @@ int simple_attr_open(struct inode *inode, struct file *file, attr->set = set; attr->data = inode->u.generic_ip; attr->fmt = fmt; - init_MUTEX(&attr->sem); + mutex_init(&attr->mutex); file->private_data = attr; @@ -575,7 +577,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, if (!attr->get) return -EACCES; - down(&attr->sem); + mutex_lock(&attr->mutex); if (*ppos) /* continued read */ size = strlen(attr->get_buf); else /* first read */ @@ -584,7 +586,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, (unsigned long long)attr->get(attr->data)); ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); - up(&attr->sem); + mutex_unlock(&attr->mutex); return ret; } @@ -602,7 +604,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, if (!attr->set) return -EACCES; - down(&attr->sem); + mutex_lock(&attr->mutex); ret = -EFAULT; size = min(sizeof(attr->set_buf) - 1, len); if (copy_from_user(attr->set_buf, buf, size)) @@ -613,7 +615,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, val = simple_strtol(attr->set_buf, NULL, 0); attr->set(attr->data, val); out: - up(&attr->sem); + mutex_unlock(&attr->mutex); return ret; } diff --git a/fs/minix/namei.c b/fs/minix/namei.c index b25bca5bdb57..5b6a4540a05b 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -6,18 +6,6 @@ #include "minix.h" -static inline void inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static int add_nondir(struct dentry *dentry, struct inode *inode) { int err = minix_add_link(dentry, inode); @@ -25,7 +13,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -125,7 +113,7 @@ out: return err; out_fail: - dec_count(inode); + inode_dec_link_count(inode); iput(inode); goto out; } @@ -139,7 +127,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); return add_nondir(dentry, inode); } @@ -152,7 +140,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max) goto out; - inc_count(dir); + inode_inc_link_count(dir); inode = minix_new_inode(dir, &err); if (!inode) @@ -163,7 +151,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) inode->i_mode |= S_ISGID; minix_set_inode(inode, 0); - inc_count(inode); + inode_inc_link_count(inode); err = minix_make_empty(inode, dir); if (err) @@ -178,11 +166,11 @@ out: return err; out_fail: - dec_count(inode); - dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput(inode); out_dir: - dec_count(dir); + inode_dec_link_count(dir); goto out; } @@ -202,7 +190,7 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry) goto end_unlink; inode->i_ctime = dir->i_ctime; - dec_count(inode); + inode_dec_link_count(inode); end_unlink: return err; } @@ -215,8 +203,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry) if (minix_empty_dir(inode)) { err = minix_unlink(dir, dentry); if (!err) { - dec_count(dir); - dec_count(inode); + inode_dec_link_count(dir); + inode_dec_link_count(inode); } } return err; @@ -257,34 +245,34 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, new_de = minix_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inc_count(old_inode); + inode_inc_link_count(old_inode); minix_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= info->s_link_max) goto out_dir; } - inc_count(old_inode); + inode_inc_link_count(old_inode); err = minix_add_link(new_dentry, old_inode); if (err) { - dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - inc_count(new_dir); + inode_inc_link_count(new_dir); } minix_delete_entry(old_de, old_page); - dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { minix_set_link(dir_de, dir_page, new_dir); - dec_count(old_dir); + inode_dec_link_count(old_dir); } return 0; diff --git a/fs/namei.c b/fs/namei.c index 8dc2b038d5d9..c72b940797fc 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -104,7 +104,7 @@ */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) - * implemented. Let's see if raised priority of ->s_vfs_rename_sem gives + * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ @@ -1422,7 +1422,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) return NULL; } - down(&p1->d_inode->i_sb->s_vfs_rename_sem); + mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); for (p = p1; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p2) { @@ -1450,7 +1450,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2) mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); - up(&p1->d_inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); } } @@ -2277,17 +2277,17 @@ asmlinkage long sys_link(const char __user *oldname, const char __user *newname) * a) we can get into loop creation. Check is done in is_subdir(). * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on - * sb->s_vfs_rename_sem. We might be more accurate, but that's another + * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _three_ objects - parents and victim (if it exists). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change - * only under ->s_vfs_rename_sem _and_ that parent of the object we + * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, - * lock child" and rename is under ->s_vfs_rename_sem. + * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index 973b444d6914..ebdad8f6398f 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -46,7 +46,7 @@ int ncp_make_open(struct inode *inode, int right) NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum); error = -EACCES; - down(&NCP_FINFO(inode)->open_sem); + mutex_lock(&NCP_FINFO(inode)->open_mutex); if (!atomic_read(&NCP_FINFO(inode)->opened)) { struct ncp_entry_info finfo; int result; @@ -93,7 +93,7 @@ int ncp_make_open(struct inode *inode, int right) } out_unlock: - up(&NCP_FINFO(inode)->open_sem); + mutex_unlock(&NCP_FINFO(inode)->open_mutex); out: return error; } diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index d277a58bd128..0b521d3d97ce 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -63,7 +63,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { - init_MUTEX(&ei->open_sem); + mutex_init(&ei->open_mutex); inode_init_once(&ei->vfs_inode); } } @@ -520,7 +520,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) } /* server->lock = 0; */ - init_MUTEX(&server->sem); + mutex_init(&server->mutex); server->packet = NULL; /* server->buffer_size = 0; */ /* server->conn_status = 0; */ @@ -557,7 +557,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) server->dentry_ttl = 0; /* no caching */ INIT_LIST_HEAD(&server->tx.requests); - init_MUTEX(&server->rcv.creq_sem); + mutex_init(&server->rcv.creq_mutex); server->tx.creq = NULL; server->rcv.creq = NULL; server->data_ready = sock->sk->sk_data_ready; diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index c755e1848a42..d9ebf6439f59 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c @@ -291,7 +291,7 @@ ncp_make_closed(struct inode *inode) int err; err = 0; - down(&NCP_FINFO(inode)->open_sem); + mutex_lock(&NCP_FINFO(inode)->open_mutex); if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { atomic_set(&NCP_FINFO(inode)->opened, 0); err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); @@ -301,7 +301,7 @@ ncp_make_closed(struct inode *inode) NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum, err); } - up(&NCP_FINFO(inode)->open_sem); + mutex_unlock(&NCP_FINFO(inode)->open_mutex); return err; } diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 6593a5ca88ba..8783eb7ec641 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -171,9 +171,9 @@ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_req static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) { - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncp_abort_request(server, req, err); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } static inline void __ncptcp_abort(struct ncp_server *server) @@ -303,20 +303,20 @@ static inline void __ncp_start_request(struct ncp_server *server, struct ncp_req static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) { - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); if (!ncp_conn_valid(server)) { - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); printk(KERN_ERR "ncpfs: tcp: Server died\n"); return -EIO; } if (server->tx.creq || server->rcv.creq) { req->status = RQ_QUEUED; list_add_tail(&req->req, &server->tx.requests); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); return 0; } __ncp_start_request(server, req); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); return 0; } @@ -400,7 +400,7 @@ void ncpdgram_rcv_proc(void *s) info_server(server, 0, server->unexpected_packet.data, result); continue; } - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); req = server->rcv.creq; if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && server->connection == get_conn_number(&reply)))) { @@ -430,11 +430,11 @@ void ncpdgram_rcv_proc(void *s) server->rcv.creq = NULL; ncp_finish_request(req, result); __ncp_next_request(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); continue; } } - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } drop:; _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); @@ -472,9 +472,9 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) void ncpdgram_timeout_proc(void *s) { struct ncp_server *server = s; - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncpdgram_timeout_proc(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } static inline void ncp_init_req(struct ncp_request_reply* req) @@ -657,18 +657,18 @@ void ncp_tcp_rcv_proc(void *s) { struct ncp_server *server = s; - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncptcp_rcv_proc(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } void ncp_tcp_tx_proc(void *s) { struct ncp_server *server = s; - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncptcp_try_send(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } static int do_ncp_rpc_call(struct ncp_server *server, int size, @@ -833,7 +833,7 @@ int ncp_disconnect(struct ncp_server *server) void ncp_lock_server(struct ncp_server *server) { - down(&server->sem); + mutex_lock(&server->mutex); if (server->lock) printk(KERN_WARNING "ncp_lock_server: was locked!\n"); server->lock = 1; @@ -846,5 +846,5 @@ void ncp_unlock_server(struct ncp_server *server) return; } server->lock = 0; - up(&server->sem); + mutex_unlock(&server->mutex); } diff --git a/fs/nls/Kconfig b/fs/nls/Kconfig index 0ab8f00bdbb2..976ecccd6f56 100644 --- a/fs/nls/Kconfig +++ b/fs/nls/Kconfig @@ -491,7 +491,7 @@ config NLS_KOI8_U (koi8-u) and Belarusian (koi8-ru) character sets. config NLS_UTF8 - tristate "NLS UTF8" + tristate "NLS UTF-8" depends on NLS help If you want to display filenames with native language characters diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog index 9d8ffa89e2c2..35cc4b1d60f7 100644 --- a/fs/ntfs/ChangeLog +++ b/fs/ntfs/ChangeLog @@ -16,8 +16,34 @@ ToDo/Notes: inode having been discarded already. Whether this can actually ever happen is unclear however so it is worth waiting until someone hits the problem. - - Enable the code for setting the NT4 compatibility flag when we start - making NTFS 1.2 specific modifications. + +2.1.27 - Various bug fixes and cleanups. + + - Fix two compiler warnings on Alpha. Thanks to Andrew Morton for + reporting them. + - Fix an (innocent) off-by-one error in the runlist code. + - Fix a buggette in an "should be impossible" case handling where we + continued the attribute lookup loop instead of aborting it. + - Use buffer_migrate_page() for the ->migratepage function of all ntfs + address space operations. + - Fix comparison of $MFT and $MFTMirr to not bail out when there are + unused, invalid mft records which are the same in both $MFT and + $MFTMirr. + - Add support for sparse files which have a compression unit of 0. + - Remove all the make_bad_inode() calls. This should only be called + from read inode and new inode code paths. + - Limit name length in fs/ntfs/unistr.c::ntfs_nlstoucs() to maximum + allowed by NTFS, i.e. 255 Unicode characters, not including the + terminating NULL (which is not stored on disk). + - Improve comments on file attribute flags in fs/ntfs/layout.h. + - Fix a bug in fs/ntfs/inode.c::ntfs_read_locked_index_inode() where we + forgot to update a temporary variable so loading index inodes which + have an index allocation attribute failed. + - Add a missing call to flush_dcache_mft_record_page() in + fs/ntfs/inode.c::ntfs_write_inode(). + - Handle the recently introduced -ENAMETOOLONG return value from + fs/ntfs/unistr.c::ntfs_nlstoucs() in fs/ntfs/namei.c::ntfs_lookup(). + - Semaphore to mutex conversion. (Ingo Molnar) 2.1.26 - Minor bug fixes and updates. diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile index d95fac7fdeb6..e27b4eacffbf 100644 --- a/fs/ntfs/Makefile +++ b/fs/ntfs/Makefile @@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \ index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \ unistr.o upcase.o -EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.26\" +EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.27\" ifeq ($(CONFIG_NTFS_DEBUG),y) EXTRA_CFLAGS += -DDEBUG diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 7e361da770b3..580412d330cb 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -22,6 +22,7 @@ */ #include <linux/errno.h> +#include <linux/fs.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/swap.h> @@ -1277,18 +1278,18 @@ unm_done: tni = locked_nis[nr_locked_nis]; /* Get the base inode. */ - down(&tni->extent_lock); + mutex_lock(&tni->extent_lock); if (tni->nr_extents >= 0) base_tni = tni; else { base_tni = tni->ext.base_ntfs_ino; BUG_ON(!base_tni); } - up(&tni->extent_lock); + mutex_unlock(&tni->extent_lock); ntfs_debug("Unlocking %s inode 0x%lx.", tni == base_tni ? "base" : "extent", tni->mft_no); - up(&tni->mrec_lock); + mutex_unlock(&tni->mrec_lock); atomic_dec(&tni->count); iput(VFS_I(base_tni)); } @@ -1529,7 +1530,6 @@ err_out: "error %i.", err); SetPageError(page); NVolSetErrors(ni->vol); - make_bad_inode(vi); } unlock_page(page); if (ctx) @@ -1551,6 +1551,9 @@ struct address_space_operations ntfs_aops = { #ifdef NTFS_RW .writepage = ntfs_writepage, /* Write dirty page to disk. */ #endif /* NTFS_RW */ + .migratepage = buffer_migrate_page, /* Move a page cache page from + one physical page to an + other. */ }; /** @@ -1567,6 +1570,9 @@ struct address_space_operations ntfs_mst_aops = { without touching the buffers belonging to the page. */ #endif /* NTFS_RW */ + .migratepage = buffer_migrate_page, /* Move a page cache page from + one physical page to an + other. */ }; #ifdef NTFS_RW diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index 9480a0526cd3..1663f5c3c6aa 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -1,7 +1,7 @@ /** * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. * - * Copyright (c) 2001-2005 Anton Altaparmakov + * Copyright (c) 2001-2006 Anton Altaparmakov * Copyright (c) 2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or @@ -1048,7 +1048,7 @@ do_next_attr_loop: le32_to_cpu(ctx->mrec->bytes_allocated)) break; if (a->type == AT_END) - continue; + break; if (!a->length) break; if (al_entry->instance != a->instance) @@ -1695,7 +1695,9 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) a->data.non_resident.initialized_size = cpu_to_sle64(attr_size); if (NInoSparse(ni) || NInoCompressed(ni)) { - a->data.non_resident.compression_unit = 4; + a->data.non_resident.compression_unit = 0; + if (NInoCompressed(ni) || vol->major_ver < 3) + a->data.non_resident.compression_unit = 4; a->data.non_resident.compressed_size = a->data.non_resident.allocated_size; } else @@ -1714,13 +1716,20 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) ni->allocated_size = new_size; if (NInoSparse(ni) || NInoCompressed(ni)) { ni->itype.compressed.size = ni->allocated_size; - ni->itype.compressed.block_size = 1U << - (a->data.non_resident.compression_unit + - vol->cluster_size_bits); - ni->itype.compressed.block_size_bits = - ffs(ni->itype.compressed.block_size) - 1; - ni->itype.compressed.block_clusters = 1U << - a->data.non_resident.compression_unit; + if (a->data.non_resident.compression_unit) { + ni->itype.compressed.block_size = 1U << (a->data. + non_resident.compression_unit + + vol->cluster_size_bits); + ni->itype.compressed.block_size_bits = + ffs(ni->itype.compressed.block_size) - + 1; + ni->itype.compressed.block_clusters = 1U << + a->data.non_resident.compression_unit; + } else { + ni->itype.compressed.block_size = 0; + ni->itype.compressed.block_size_bits = 0; + ni->itype.compressed.block_clusters = 0; + } vi->i_blocks = ni->itype.compressed.size >> 9; } else vi->i_blocks = ni->allocated_size >> 9; @@ -2429,16 +2438,12 @@ undo_alloc: "chkdsk to recover.", IS_ERR(m) ? "restore attribute search context" : "truncate attribute runlist"); - make_bad_inode(vi); - make_bad_inode(VFS_I(base_ni)); NVolSetErrors(vol); } else if (mp_rebuilt) { if (ntfs_attr_record_resize(m, a, attr_len)) { ntfs_error(vol->sb, "Failed to restore attribute " "record in error code path. Run " "chkdsk to recover."); - make_bad_inode(vi); - make_bad_inode(VFS_I(base_ni)); NVolSetErrors(vol); } else /* if (success) */ { if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu( @@ -2451,8 +2456,6 @@ undo_alloc: "mapping pairs array in error " "code path. Run chkdsk to " "recover."); - make_bad_inode(vi); - make_bad_inode(VFS_I(base_ni)); NVolSetErrors(vol); } flush_dcache_mft_record_page(ctx->ntfs_ino); diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index 25d24106f893..68a607ff9fd3 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c @@ -67,7 +67,7 @@ static DEFINE_SPINLOCK(ntfs_cb_lock); /** * allocate_compression_buffers - allocate the decompression buffers * - * Caller has to hold the ntfs_lock semaphore. + * Caller has to hold the ntfs_lock mutex. * * Return 0 on success or -ENOMEM if the allocations failed. */ @@ -84,7 +84,7 @@ int allocate_compression_buffers(void) /** * free_compression_buffers - free the decompression buffers * - * Caller has to hold the ntfs_lock semaphore. + * Caller has to hold the ntfs_lock mutex. */ void free_compression_buffers(void) { diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index b0690d4c8906..9d9ed3fe371d 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -1136,7 +1136,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (fpos == 1) { ntfs_debug("Calling filldir for .. with len 2, fpos 0x1, " "inode 0x%lx, DT_DIR.", - parent_ino(filp->f_dentry)); + (unsigned long)parent_ino(filp->f_dentry)); rc = filldir(dirent, "..", 2, fpos, parent_ino(filp->f_dentry), DT_DIR); if (rc) diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 5027d3d1b3fe..f5d057e4acc2 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -943,7 +943,8 @@ rl_not_mapped_enoent: } ni->runlist.rl = rl; status.runlist_merged = 1; - ntfs_debug("Allocated cluster, lcn 0x%llx.", lcn); + ntfs_debug("Allocated cluster, lcn 0x%llx.", + (unsigned long long)lcn); /* Map and lock the mft record and get the attribute record. */ if (!NInoAttr(ni)) base_ni = ni; @@ -1206,8 +1207,6 @@ rl_not_mapped_enoent: "attribute runlist in error code " "path. Run chkdsk to recover the " "lost cluster."); - make_bad_inode(vi); - make_bad_inode(VFS_I(base_ni)); NVolSetErrors(vol); } else /* if (success) */ { status.runlist_merged = 0; @@ -1238,8 +1237,6 @@ rl_not_mapped_enoent: ntfs_error(vol->sb, "Failed to restore attribute " "record in error code path. Run " "chkdsk to recover."); - make_bad_inode(vi); - make_bad_inode(VFS_I(base_ni)); NVolSetErrors(vol); } else /* if (success) */ { if (ntfs_mapping_pairs_build(vol, (u8*)a + @@ -1252,8 +1249,6 @@ rl_not_mapped_enoent: "mapping pairs array in error " "code path. Run chkdsk to " "recover."); - make_bad_inode(vi); - make_bad_inode(VFS_I(base_ni)); NVolSetErrors(vol); } flush_dcache_mft_record_page(ctx->ntfs_ino); @@ -1622,11 +1617,8 @@ err_out: unmap_mft_record(base_ni); ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error " "code %i).", err); - if (err != -ENOMEM) { + if (err != -ENOMEM) NVolSetErrors(ni->vol); - make_bad_inode(VFS_I(base_ni)); - make_bad_inode(vi); - } return err; } @@ -1801,8 +1793,6 @@ err_out: ntfs_error(vi->i_sb, "Resident attribute commit write failed " "with error %i.", err); NVolSetErrors(ni->vol); - make_bad_inode(VFS_I(base_ni)); - make_bad_inode(vi); } if (ctx) ntfs_attr_put_search_ctx(ctx); diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 55263b7de9c0..4c86b7e1d1eb 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -1,7 +1,7 @@ /** * inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project. * - * Copyright (c) 2001-2005 Anton Altaparmakov + * Copyright (c) 2001-2006 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published @@ -19,13 +19,19 @@ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <linux/pagemap.h> #include <linux/buffer_head.h> -#include <linux/smp_lock.h> -#include <linux/quotaops.h> +#include <linux/fs.h> +#include <linux/mm.h> #include <linux/mount.h> +#include <linux/mutex.h> +#include <linux/pagemap.h> +#include <linux/quotaops.h> +#include <linux/slab.h> +#include <linux/smp_lock.h> #include "aops.h" +#include "attrib.h" +#include "bitmap.h" #include "dir.h" #include "debug.h" #include "inode.h" @@ -382,7 +388,7 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni) atomic_set(&ni->count, 1); ni->vol = NTFS_SB(sb); ntfs_init_runlist(&ni->runlist); - init_MUTEX(&ni->mrec_lock); + mutex_init(&ni->mrec_lock); ni->page = NULL; ni->page_ofs = 0; ni->attr_list_size = 0; @@ -394,7 +400,7 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni) ni->itype.index.collation_rule = 0; ni->itype.index.block_size_bits = 0; ni->itype.index.vcn_size_bits = 0; - init_MUTEX(&ni->extent_lock); + mutex_init(&ni->extent_lock); ni->nr_extents = 0; ni->ext.base_ntfs_ino = NULL; } @@ -1064,10 +1070,10 @@ skip_large_dir_stuff: if (a->non_resident) { NInoSetNonResident(ni); if (NInoCompressed(ni) || NInoSparse(ni)) { - if (a->data.non_resident.compression_unit != - 4) { + if (NInoCompressed(ni) && a->data.non_resident. + compression_unit != 4) { ntfs_error(vi->i_sb, "Found " - "nonstandard " + "non-standard " "compression unit (%u " "instead of 4). " "Cannot handle this.", @@ -1076,16 +1082,26 @@ skip_large_dir_stuff: err = -EOPNOTSUPP; goto unm_err_out; } - ni->itype.compressed.block_clusters = 1U << - a->data.non_resident. - compression_unit; - ni->itype.compressed.block_size = 1U << ( - a->data.non_resident. - compression_unit + - vol->cluster_size_bits); - ni->itype.compressed.block_size_bits = ffs( - ni->itype.compressed. - block_size) - 1; + if (a->data.non_resident.compression_unit) { + ni->itype.compressed.block_size = 1U << + (a->data.non_resident. + compression_unit + + vol->cluster_size_bits); + ni->itype.compressed.block_size_bits = + ffs(ni->itype. + compressed. + block_size) - 1; + ni->itype.compressed.block_clusters = + 1U << a->data. + non_resident. + compression_unit; + } else { + ni->itype.compressed.block_size = 0; + ni->itype.compressed.block_size_bits = + 0; + ni->itype.compressed.block_clusters = + 0; + } ni->itype.compressed.size = sle64_to_cpu( a->data.non_resident. compressed_size); @@ -1338,8 +1354,9 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) goto unm_err_out; } if (NInoCompressed(ni) || NInoSparse(ni)) { - if (a->data.non_resident.compression_unit != 4) { - ntfs_error(vi->i_sb, "Found nonstandard " + if (NInoCompressed(ni) && a->data.non_resident. + compression_unit != 4) { + ntfs_error(vi->i_sb, "Found non-standard " "compression unit (%u instead " "of 4). Cannot handle this.", a->data.non_resident. @@ -1347,13 +1364,22 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) err = -EOPNOTSUPP; goto unm_err_out; } - ni->itype.compressed.block_clusters = 1U << - a->data.non_resident.compression_unit; - ni->itype.compressed.block_size = 1U << ( - a->data.non_resident.compression_unit + - vol->cluster_size_bits); - ni->itype.compressed.block_size_bits = ffs( - ni->itype.compressed.block_size) - 1; + if (a->data.non_resident.compression_unit) { + ni->itype.compressed.block_size = 1U << + (a->data.non_resident. + compression_unit + + vol->cluster_size_bits); + ni->itype.compressed.block_size_bits = + ffs(ni->itype.compressed. + block_size) - 1; + ni->itype.compressed.block_clusters = 1U << + a->data.non_resident. + compression_unit; + } else { + ni->itype.compressed.block_size = 0; + ni->itype.compressed.block_size_bits = 0; + ni->itype.compressed.block_clusters = 0; + } ni->itype.compressed.size = sle64_to_cpu( a->data.non_resident.compressed_size); } @@ -1406,7 +1432,6 @@ err_out: "Run chkdsk.", err, vi->i_ino, ni->type, ni->name_len, base_vi->i_ino); make_bad_inode(vi); - make_bad_inode(base_vi); if (err != -ENOMEM) NVolSetErrors(vol); return err; @@ -1591,6 +1616,7 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi) "$INDEX_ALLOCATION attribute."); goto unm_err_out; } + a = ctx->attr; if (!a->non_resident) { ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is " "resident."); @@ -2823,11 +2849,8 @@ done: old_bad_out: old_size = -1; bad_out: - if (err != -ENOMEM && err != -EOPNOTSUPP) { - make_bad_inode(vi); - make_bad_inode(VFS_I(base_ni)); + if (err != -ENOMEM && err != -EOPNOTSUPP) NVolSetErrors(vol); - } if (err != -EOPNOTSUPP) NInoSetTruncateFailed(ni); else if (old_size >= 0) @@ -2842,11 +2865,8 @@ out: ntfs_debug("Failed. Returning error code %i.", err); return err; conv_err_out: - if (err != -ENOMEM && err != -EOPNOTSUPP) { - make_bad_inode(vi); - make_bad_inode(VFS_I(base_ni)); + if (err != -ENOMEM && err != -EOPNOTSUPP) NVolSetErrors(vol); - } if (err != -EOPNOTSUPP) NInoSetTruncateFailed(ni); else @@ -3044,15 +3064,18 @@ int ntfs_write_inode(struct inode *vi, int sync) * record will be cleaned and written out to disk below, i.e. before * this function returns. */ - if (modified && !NInoTestSetDirty(ctx->ntfs_ino)) - mark_ntfs_record_dirty(ctx->ntfs_ino->page, - ctx->ntfs_ino->page_ofs); + if (modified) { + flush_dcache_mft_record_page(ctx->ntfs_ino); + if (!NInoTestSetDirty(ctx->ntfs_ino)) + mark_ntfs_record_dirty(ctx->ntfs_ino->page, + ctx->ntfs_ino->page_ofs); + } ntfs_attr_put_search_ctx(ctx); /* Now the access times are updated, write the base mft record. */ if (NInoDirty(ni)) err = write_mft_record(ni, m, sync); /* Write all attached extent mft records. */ - down(&ni->extent_lock); + mutex_lock(&ni->extent_lock); if (ni->nr_extents > 0) { ntfs_inode **extent_nis = ni->ext.extent_ntfs_inos; int i; @@ -3079,7 +3102,7 @@ int ntfs_write_inode(struct inode *vi, int sync) } } } - up(&ni->extent_lock); + mutex_unlock(&ni->extent_lock); unmap_mft_record(ni); if (unlikely(err)) goto err_out; @@ -3094,9 +3117,7 @@ err_out: "retries later."); mark_inode_dirty(vi); } else { - ntfs_error(vi->i_sb, "Failed (error code %i): Marking inode " - "as bad. You should run chkdsk.", -err); - make_bad_inode(vi); + ntfs_error(vi->i_sb, "Failed (error %i): Run chkdsk.", -err); NVolSetErrors(ni->vol); } return err; diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h index 3de5c0231966..f088291e017c 100644 --- a/fs/ntfs/inode.h +++ b/fs/ntfs/inode.h @@ -24,12 +24,13 @@ #ifndef _LINUX_NTFS_INODE_H #define _LINUX_NTFS_INODE_H -#include <linux/mm.h> +#include <asm/atomic.h> + #include <linux/fs.h> -#include <linux/seq_file.h> #include <linux/list.h> -#include <asm/atomic.h> -#include <asm/semaphore.h> +#include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/seq_file.h> #include "layout.h" #include "volume.h" @@ -81,7 +82,7 @@ struct _ntfs_inode { * The following fields are only valid for real inodes and extent * inodes. */ - struct semaphore mrec_lock; /* Lock for serializing access to the + struct mutex mrec_lock; /* Lock for serializing access to the mft record belonging to this inode. */ struct page *page; /* The page containing the mft record of the inode. This should only be touched by the @@ -119,7 +120,7 @@ struct _ntfs_inode { u8 block_clusters; /* Number of clusters per cb. */ } compressed; } itype; - struct semaphore extent_lock; /* Lock for accessing/modifying the + struct mutex extent_lock; /* Lock for accessing/modifying the below . */ s32 nr_extents; /* For a base mft record, the number of attached extent inodes (0 if none), for extent records and for fake diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h index bb408d4dcbb0..d34b93cb8b48 100644 --- a/fs/ntfs/layout.h +++ b/fs/ntfs/layout.h @@ -769,7 +769,7 @@ typedef struct { compressed. (This effectively limits the compression unit size to be a power of two clusters.) WinNT4 only uses a value of 4. - Sparse files also have this set to 4. */ + Sparse files have this set to 0 on XPSP2. */ /* 35*/ u8 reserved[5]; /* Align to 8-byte boundary. */ /* The sizes below are only used when lowest_vcn is zero, as otherwise it would be difficult to keep them up-to-date.*/ @@ -801,13 +801,16 @@ typedef struct { typedef ATTR_RECORD ATTR_REC; /* - * File attribute flags (32-bit). + * File attribute flags (32-bit) appearing in the file_attributes fields of the + * STANDARD_INFORMATION attribute of MFT_RECORDs and the FILENAME_ATTR + * attributes of MFT_RECORDs and directory index entries. + * + * All of the below flags appear in the directory index entries but only some + * appear in the STANDARD_INFORMATION attribute whilst only some others appear + * in the FILENAME_ATTR attribute of MFT_RECORDs. Unless otherwise stated the + * flags appear in all of the above. */ enum { - /* - * The following flags are only present in the STANDARD_INFORMATION - * attribute (in the field file_attributes). - */ FILE_ATTR_READONLY = const_cpu_to_le32(0x00000001), FILE_ATTR_HIDDEN = const_cpu_to_le32(0x00000002), FILE_ATTR_SYSTEM = const_cpu_to_le32(0x00000004), @@ -839,18 +842,14 @@ enum { F_A_COMPRESSED, and F_A_ENCRYPTED and preserves the rest. This mask is used to to obtain all flags that are valid for setting. */ /* - * The following flag is only present in the FILE_NAME attribute (in - * the field file_attributes). + * The flag FILE_ATTR_DUP_FILENAME_INDEX_PRESENT is present in all + * FILENAME_ATTR attributes but not in the STANDARD_INFORMATION + * attribute of an mft record. */ FILE_ATTR_DUP_FILE_NAME_INDEX_PRESENT = const_cpu_to_le32(0x10000000), /* Note, this is a copy of the corresponding bit from the mft record, telling us whether this is a directory or not, i.e. whether it has an index root attribute or not. */ - /* - * The following flag is present both in the STANDARD_INFORMATION - * attribute and in the FILE_NAME attribute (in the field - * file_attributes). - */ FILE_ATTR_DUP_VIEW_INDEX_PRESENT = const_cpu_to_le32(0x20000000), /* Note, this is a copy of the corresponding bit from the mft record, telling us whether this file has a view index present (eg. object id @@ -891,7 +890,7 @@ typedef struct { Windows this is only updated when accessed if some time delta has passed since the last update. Also, - last access times updates can be + last access time updates can be disabled altogether for speed. */ /* 32*/ FILE_ATTR_FLAGS file_attributes; /* Flags describing the file. */ /* 36*/ union { @@ -1076,16 +1075,21 @@ typedef struct { /* 20*/ sle64 last_access_time; /* Time this mft record was last accessed. */ /* 28*/ sle64 allocated_size; /* Byte size of on-disk allocated space - for the data attribute. So for - normal $DATA, this is the + for the unnamed data attribute. So + for normal $DATA, this is the allocated_size from the unnamed $DATA attribute and for compressed and/or sparse $DATA, this is the compressed_size from the unnamed - $DATA attribute. NOTE: This is a - multiple of the cluster size. */ -/* 30*/ sle64 data_size; /* Byte size of actual data in data - attribute. */ + $DATA attribute. For a directory or + other inode without an unnamed $DATA + attribute, this is always 0. NOTE: + This is a multiple of the cluster + size. */ +/* 30*/ sle64 data_size; /* Byte size of actual data in unnamed + data attribute. For a directory or + other inode without an unnamed $DATA + attribute, this is always 0. */ /* 38*/ FILE_ATTR_FLAGS file_attributes; /* Flags describing the file. */ /* 3c*/ union { /* 3c*/ struct { diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 6499aafc2258..4e72bc7afdf9 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c @@ -93,6 +93,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni) "Run chkdsk.", ni->mft_no); ntfs_unmap_page(page); page = ERR_PTR(-EIO); + NVolSetErrors(vol); } err_out: ni->page = NULL; @@ -104,8 +105,8 @@ err_out: * map_mft_record - map, pin and lock an mft record * @ni: ntfs inode whose MFT record to map * - * First, take the mrec_lock semaphore. We might now be sleeping, while waiting - * for the semaphore if it was already locked by someone else. + * First, take the mrec_lock mutex. We might now be sleeping, while waiting + * for the mutex if it was already locked by someone else. * * The page of the record is mapped using map_mft_record_page() before being * returned to the caller. @@ -135,9 +136,9 @@ err_out: * So that code will end up having to own the mrec_lock of all mft * records/inodes present in the page before I/O can proceed. In that case we * wouldn't need to bother with PG_locked and PG_uptodate as nobody will be - * accessing anything without owning the mrec_lock semaphore. But we do need - * to use them because of the read_cache_page() invocation and the code becomes - * so much simpler this way that it is well worth it. + * accessing anything without owning the mrec_lock mutex. But we do need to + * use them because of the read_cache_page() invocation and the code becomes so + * much simpler this way that it is well worth it. * * The mft record is now ours and we return a pointer to it. You need to check * the returned pointer with IS_ERR() and if that is true, PTR_ERR() will return @@ -160,13 +161,13 @@ MFT_RECORD *map_mft_record(ntfs_inode *ni) atomic_inc(&ni->count); /* Serialize access to this mft record. */ - down(&ni->mrec_lock); + mutex_lock(&ni->mrec_lock); m = map_mft_record_page(ni); if (likely(!IS_ERR(m))) return m; - up(&ni->mrec_lock); + mutex_unlock(&ni->mrec_lock); atomic_dec(&ni->count); ntfs_error(ni->vol->sb, "Failed with error code %lu.", -PTR_ERR(m)); return m; @@ -217,7 +218,7 @@ void unmap_mft_record(ntfs_inode *ni) ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no); unmap_mft_record_page(ni); - up(&ni->mrec_lock); + mutex_unlock(&ni->mrec_lock); atomic_dec(&ni->count); /* * If pure ntfs_inode, i.e. no vfs inode attached, we leave it to @@ -261,7 +262,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref, * in which case just return it. If not found, add it to the base * inode before returning it. */ - down(&base_ni->extent_lock); + mutex_lock(&base_ni->extent_lock); if (base_ni->nr_extents > 0) { extent_nis = base_ni->ext.extent_ntfs_inos; for (i = 0; i < base_ni->nr_extents; i++) { @@ -274,7 +275,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref, } } if (likely(ni != NULL)) { - up(&base_ni->extent_lock); + mutex_unlock(&base_ni->extent_lock); atomic_dec(&base_ni->count); /* We found the record; just have to map and return it. */ m = map_mft_record(ni); @@ -301,7 +302,7 @@ map_err_out: /* Record wasn't there. Get a new ntfs inode and initialize it. */ ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no); if (unlikely(!ni)) { - up(&base_ni->extent_lock); + mutex_unlock(&base_ni->extent_lock); atomic_dec(&base_ni->count); return ERR_PTR(-ENOMEM); } @@ -312,7 +313,7 @@ map_err_out: /* Now map the record. */ m = map_mft_record(ni); if (IS_ERR(m)) { - up(&base_ni->extent_lock); + mutex_unlock(&base_ni->extent_lock); atomic_dec(&base_ni->count); ntfs_clear_extent_inode(ni); goto map_err_out; @@ -347,14 +348,14 @@ map_err_out: base_ni->ext.extent_ntfs_inos = tmp; } base_ni->ext.extent_ntfs_inos[base_ni->nr_extents++] = ni; - up(&base_ni->extent_lock); + mutex_unlock(&base_ni->extent_lock); atomic_dec(&base_ni->count); ntfs_debug("Done 2."); *ntfs_ino = ni; return m; unm_err_out: unmap_mft_record(ni); - up(&base_ni->extent_lock); + mutex_unlock(&base_ni->extent_lock); atomic_dec(&base_ni->count); /* * If the extent inode was not attached to the base inode we need to @@ -399,12 +400,12 @@ void __mark_mft_record_dirty(ntfs_inode *ni) BUG_ON(NInoAttr(ni)); mark_ntfs_record_dirty(ni->page, ni->page_ofs); /* Determine the base vfs inode and mark it dirty, too. */ - down(&ni->extent_lock); + mutex_lock(&ni->extent_lock); if (likely(ni->nr_extents >= 0)) base_ni = ni; else base_ni = ni->ext.base_ntfs_ino; - up(&ni->extent_lock); + mutex_unlock(&ni->extent_lock); __mark_inode_dirty(VFS_I(base_ni), I_DIRTY_SYNC | I_DIRTY_DATASYNC); } @@ -650,10 +651,7 @@ err_out: * fs/ntfs/aops.c::mark_ntfs_record_dirty(). * * On success, clean the mft record and return 0. On error, leave the mft - * record dirty and return -errno. The caller should call make_bad_inode() on - * the base inode to ensure no more access happens to this inode. We do not do - * it here as the caller may want to finish writing other extent mft records - * first to minimize on-disk metadata inconsistencies. + * record dirty and return -errno. * * NOTE: We always perform synchronous i/o and ignore the @sync parameter. * However, if the mft record has a counterpart in the mft mirror and @sync is @@ -983,7 +981,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, } ntfs_debug("Inode 0x%lx is not dirty.", mft_no); /* The inode is not dirty, try to take the mft record lock. */ - if (unlikely(down_trylock(&ni->mrec_lock))) { + if (unlikely(!mutex_trylock(&ni->mrec_lock))) { ntfs_debug("Mft record 0x%lx is already locked, do " "not write it.", mft_no); atomic_dec(&ni->count); @@ -1043,13 +1041,13 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, * corresponding to this extent mft record attached. */ ni = NTFS_I(vi); - down(&ni->extent_lock); + mutex_lock(&ni->extent_lock); if (ni->nr_extents <= 0) { /* * The base inode has no attached extent inodes, write this * extent mft record. */ - up(&ni->extent_lock); + mutex_unlock(&ni->extent_lock); iput(vi); ntfs_debug("Base inode 0x%lx has no attached extent inodes, " "write the extent record.", na.mft_no); @@ -1072,7 +1070,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, * extent mft record. */ if (!eni) { - up(&ni->extent_lock); + mutex_unlock(&ni->extent_lock); iput(vi); ntfs_debug("Extent inode 0x%lx is not attached to its base " "inode 0x%lx, write the extent record.", @@ -1083,12 +1081,12 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, mft_no, na.mft_no); /* Take a reference to the extent ntfs inode. */ atomic_inc(&eni->count); - up(&ni->extent_lock); + mutex_unlock(&ni->extent_lock); /* * Found the extent inode coresponding to this extent mft record. * Try to take the mft record lock. */ - if (unlikely(down_trylock(&eni->mrec_lock))) { + if (unlikely(!mutex_trylock(&eni->mrec_lock))) { atomic_dec(&eni->count); iput(vi); ntfs_debug("Extent mft record 0x%lx is already locked, do " @@ -2711,7 +2709,7 @@ mft_rec_already_initialized: * have its page mapped and it is very easy to do. */ atomic_inc(&ni->count); - down(&ni->mrec_lock); + mutex_lock(&ni->mrec_lock); ni->page = page; ni->page_ofs = ofs; /* @@ -2798,22 +2796,22 @@ int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m) BUG_ON(NInoAttr(ni)); BUG_ON(ni->nr_extents != -1); - down(&ni->extent_lock); + mutex_lock(&ni->extent_lock); base_ni = ni->ext.base_ntfs_ino; - up(&ni->extent_lock); + mutex_unlock(&ni->extent_lock); BUG_ON(base_ni->nr_extents <= 0); ntfs_debug("Entering for extent inode 0x%lx, base inode 0x%lx.\n", mft_no, base_ni->mft_no); - down(&base_ni->extent_lock); + mutex_lock(&base_ni->extent_lock); /* Make sure we are holding the only reference to the extent inode. */ if (atomic_read(&ni->count) > 2) { ntfs_error(vol->sb, "Tried to free busy extent inode 0x%lx, " "not freeing.", base_ni->mft_no); - up(&base_ni->extent_lock); + mutex_unlock(&base_ni->extent_lock); return -EBUSY; } @@ -2831,7 +2829,7 @@ int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m) break; } - up(&base_ni->extent_lock); + mutex_unlock(&base_ni->extent_lock); if (unlikely(err)) { ntfs_error(vol->sb, "Extent inode 0x%lx is not attached to " @@ -2890,7 +2888,7 @@ rollback_error: return 0; rollback: /* Rollback what we did... */ - down(&base_ni->extent_lock); + mutex_lock(&base_ni->extent_lock); extent_nis = base_ni->ext.extent_ntfs_inos; if (!(base_ni->nr_extents & 3)) { int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*); @@ -2899,7 +2897,7 @@ rollback: if (unlikely(!extent_nis)) { ntfs_error(vol->sb, "Failed to allocate internal " "buffer during rollback.%s", es); - up(&base_ni->extent_lock); + mutex_unlock(&base_ni->extent_lock); NVolSetErrors(vol); goto rollback_error; } @@ -2914,7 +2912,7 @@ rollback: m->flags |= MFT_RECORD_IN_USE; m->sequence_number = old_seq_no; extent_nis[base_ni->nr_extents++] = ni; - up(&base_ni->extent_lock); + mutex_unlock(&base_ni->extent_lock); mark_mft_record_dirty(ni); return err; } diff --git a/fs/ntfs/mft.h b/fs/ntfs/mft.h index 407de2cef1d6..639cd1bab08b 100644 --- a/fs/ntfs/mft.h +++ b/fs/ntfs/mft.h @@ -97,10 +97,7 @@ extern int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync); * uptodate. * * On success, clean the mft record and return 0. On error, leave the mft - * record dirty and return -errno. The caller should call make_bad_inode() on - * the base inode to ensure no more access happens to this inode. We do not do - * it here as the caller may want to finish writing other extent mft records - * first to minimize on-disk metadata inconsistencies. + * record dirty and return -errno. */ static inline int write_mft_record(ntfs_inode *ni, MFT_RECORD *m, int sync) { diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c index 5ea9eb93af62..eddb2247cec5 100644 --- a/fs/ntfs/namei.c +++ b/fs/ntfs/namei.c @@ -2,7 +2,7 @@ * namei.c - NTFS kernel directory inode operations. Part of the Linux-NTFS * project. * - * Copyright (c) 2001-2004 Anton Altaparmakov + * Copyright (c) 2001-2006 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published @@ -115,7 +115,9 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent, uname_len = ntfs_nlstoucs(vol, dent->d_name.name, dent->d_name.len, &uname); if (uname_len < 0) { - ntfs_error(vol->sb, "Failed to convert name to Unicode."); + if (uname_len != -ENAMETOOLONG) + ntfs_error(vol->sb, "Failed to convert name to " + "Unicode."); return ERR_PTR(uname_len); } mref = ntfs_lookup_inode_by_name(NTFS_I(dir_ino), uname, uname_len, @@ -157,7 +159,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent, /* Return the error code. */ return (struct dentry *)dent_inode; } - /* It is guaranteed that name is no longer allocated at this point. */ + /* It is guaranteed that @name is no longer allocated at this point. */ if (MREF_ERR(mref) == -ENOENT) { ntfs_debug("Entry was not found, adding negative dentry."); /* The dcache will handle negative entries. */ @@ -168,7 +170,6 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent, ntfs_error(vol->sb, "ntfs_lookup_ino_by_name() failed with error " "code %i.", -MREF_ERR(mref)); return ERR_PTR(MREF_ERR(mref)); - // TODO: Consider moving this lot to a separate function! (AIA) handle_name: { diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h index 653d2a5c4899..0624c8ef4d9c 100644 --- a/fs/ntfs/ntfs.h +++ b/fs/ntfs/ntfs.h @@ -91,7 +91,7 @@ extern void free_compression_buffers(void); /* From fs/ntfs/super.c */ #define default_upcase_len 0x10000 -extern struct semaphore ntfs_lock; +extern struct mutex ntfs_lock; typedef struct { int val; diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c index 061b5ff6b73c..eb52b801512b 100644 --- a/fs/ntfs/runlist.c +++ b/fs/ntfs/runlist.c @@ -381,6 +381,7 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst, static inline runlist_element *ntfs_rl_replace(runlist_element *dst, int dsize, runlist_element *src, int ssize, int loc) { + signed delta; BOOL left = FALSE; /* Left end of @src needs merging. */ BOOL right = FALSE; /* Right end of @src needs merging. */ int tail; /* Start of tail of @dst. */ @@ -396,11 +397,14 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst, left = ntfs_are_rl_mergeable(dst + loc - 1, src); /* * Allocate some space. We will need less if the left, right, or both - * ends get merged. + * ends get merged. The -1 accounts for the run being replaced. */ - dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); - if (IS_ERR(dst)) - return dst; + delta = ssize - 1 - left - right; + if (delta > 0) { + dst = ntfs_rl_realloc(dst, dsize, dsize + delta); + if (IS_ERR(dst)) + return dst; + } /* * We are guaranteed to succeed from here so can start modifying the * original runlists. diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 368a8ec10668..7646b5059389 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -1099,26 +1099,38 @@ static BOOL check_mft_mirror(ntfs_volume *vol) kmirr = page_address(mirr_page); ++index; } - /* Make sure the record is ok. */ - if (ntfs_is_baad_recordp((le32*)kmft)) { - ntfs_error(sb, "Incomplete multi sector transfer " - "detected in mft record %i.", i); + /* Do not check the record if it is not in use. */ + if (((MFT_RECORD*)kmft)->flags & MFT_RECORD_IN_USE) { + /* Make sure the record is ok. */ + if (ntfs_is_baad_recordp((le32*)kmft)) { + ntfs_error(sb, "Incomplete multi sector " + "transfer detected in mft " + "record %i.", i); mm_unmap_out: - ntfs_unmap_page(mirr_page); + ntfs_unmap_page(mirr_page); mft_unmap_out: - ntfs_unmap_page(mft_page); - return FALSE; + ntfs_unmap_page(mft_page); + return FALSE; + } } - if (ntfs_is_baad_recordp((le32*)kmirr)) { - ntfs_error(sb, "Incomplete multi sector transfer " - "detected in mft mirror record %i.", i); - goto mm_unmap_out; + /* Do not check the mirror record if it is not in use. */ + if (((MFT_RECORD*)kmirr)->flags & MFT_RECORD_IN_USE) { + if (ntfs_is_baad_recordp((le32*)kmirr)) { + ntfs_error(sb, "Incomplete multi sector " + "transfer detected in mft " + "mirror record %i.", i); + goto mm_unmap_out; + } } /* Get the amount of data in the current record. */ bytes = le32_to_cpu(((MFT_RECORD*)kmft)->bytes_in_use); - if (!bytes || bytes > vol->mft_record_size) { + if (bytes < sizeof(MFT_RECORD_OLD) || + bytes > vol->mft_record_size || + ntfs_is_baad_recordp((le32*)kmft)) { bytes = le32_to_cpu(((MFT_RECORD*)kmirr)->bytes_in_use); - if (!bytes || bytes > vol->mft_record_size) + if (bytes < sizeof(MFT_RECORD_OLD) || + bytes > vol->mft_record_size || + ntfs_is_baad_recordp((le32*)kmirr)) bytes = vol->mft_record_size; } /* Compare the two records. */ @@ -1665,11 +1677,11 @@ read_partial_upcase_page: ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).", i_size, 64 * 1024 * sizeof(ntfschar)); iput(ino); - down(&ntfs_lock); + mutex_lock(&ntfs_lock); if (!default_upcase) { ntfs_debug("Using volume specified $UpCase since default is " "not present."); - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); return TRUE; } max = default_upcase_len; @@ -1683,12 +1695,12 @@ read_partial_upcase_page: vol->upcase = default_upcase; vol->upcase_len = max; ntfs_nr_upcase_users++; - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); ntfs_debug("Volume specified $UpCase matches default. Using " "default."); return TRUE; } - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); ntfs_debug("Using volume specified $UpCase since it does not match " "the default."); return TRUE; @@ -1697,17 +1709,17 @@ iput_upcase_failed: ntfs_free(vol->upcase); vol->upcase = NULL; upcase_failed: - down(&ntfs_lock); + mutex_lock(&ntfs_lock); if (default_upcase) { vol->upcase = default_upcase; vol->upcase_len = default_upcase_len; ntfs_nr_upcase_users++; - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); ntfs_error(sb, "Failed to load $UpCase from the volume. Using " "default."); return TRUE; } - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); ntfs_error(sb, "Failed to initialize upcase table."); return FALSE; } @@ -2183,12 +2195,12 @@ iput_attrdef_err_out: iput_upcase_err_out: #endif /* NTFS_RW */ vol->upcase_len = 0; - down(&ntfs_lock); + mutex_lock(&ntfs_lock); if (vol->upcase == default_upcase) { ntfs_nr_upcase_users--; vol->upcase = NULL; } - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); if (vol->upcase) { ntfs_free(vol->upcase); vol->upcase = NULL; @@ -2393,7 +2405,7 @@ static void ntfs_put_super(struct super_block *sb) * Destroy the global default upcase table if necessary. Also decrease * the number of upcase users if we are a user. */ - down(&ntfs_lock); + mutex_lock(&ntfs_lock); if (vol->upcase == default_upcase) { ntfs_nr_upcase_users--; vol->upcase = NULL; @@ -2404,7 +2416,7 @@ static void ntfs_put_super(struct super_block *sb) } if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users) free_compression_buffers(); - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); if (vol->upcase) { ntfs_free(vol->upcase); vol->upcase = NULL; @@ -2878,7 +2890,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) ntfs_error(sb, "Failed to load essential metadata."); goto iput_tmp_ino_err_out_now; } - down(&ntfs_lock); + mutex_lock(&ntfs_lock); /* * The current mount is a compression user if the cluster size is * less than or equal 4kiB. @@ -2889,7 +2901,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) ntfs_error(NULL, "Failed to allocate buffers " "for compression engine."); ntfs_nr_compression_users--; - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); goto iput_tmp_ino_err_out_now; } } @@ -2901,7 +2913,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) if (!default_upcase) default_upcase = generate_default_upcase(); ntfs_nr_upcase_users++; - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); /* * From now on, ignore @silent parameter. If we fail below this line, * it will be due to a corrupt fs or a system error, so we report it. @@ -2919,12 +2931,12 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) atomic_inc(&vol->root_ino->i_count); ntfs_debug("Exiting, status successful."); /* Release the default upcase if it has no users. */ - down(&ntfs_lock); + mutex_lock(&ntfs_lock); if (!--ntfs_nr_upcase_users && default_upcase) { ntfs_free(default_upcase); default_upcase = NULL; } - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); sb->s_export_op = &ntfs_export_ops; lock_kernel(); return 0; @@ -2992,12 +3004,12 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) vol->attrdef = NULL; } vol->upcase_len = 0; - down(&ntfs_lock); + mutex_lock(&ntfs_lock); if (vol->upcase == default_upcase) { ntfs_nr_upcase_users--; vol->upcase = NULL; } - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); if (vol->upcase) { ntfs_free(vol->upcase); vol->upcase = NULL; @@ -3012,14 +3024,14 @@ unl_upcase_iput_tmp_ino_err_out_now: * Decrease the number of upcase users and destroy the global default * upcase table if necessary. */ - down(&ntfs_lock); + mutex_lock(&ntfs_lock); if (!--ntfs_nr_upcase_users && default_upcase) { ntfs_free(default_upcase); default_upcase = NULL; } if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users) free_compression_buffers(); - up(&ntfs_lock); + mutex_unlock(&ntfs_lock); iput_tmp_ino_err_out_now: iput(tmp_ino); if (vol->mft_ino && vol->mft_ino != tmp_ino) @@ -3078,8 +3090,8 @@ static void ntfs_big_inode_init_once(void *foo, struct kmem_cache *cachep, struct kmem_cache *ntfs_attr_ctx_cache; struct kmem_cache *ntfs_index_ctx_cache; -/* Driver wide semaphore. */ -DECLARE_MUTEX(ntfs_lock); +/* Driver wide mutex. */ +DEFINE_MUTEX(ntfs_lock); static struct super_block *ntfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) @@ -3234,7 +3246,7 @@ static void __exit exit_ntfs_fs(void) } MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>"); -MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2005 Anton Altaparmakov"); +MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2006 Anton Altaparmakov"); MODULE_VERSION(NTFS_VERSION); MODULE_LICENSE("GPL"); #ifdef DEBUG diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c index 0ea887fc859c..b123c0fa6bf6 100644 --- a/fs/ntfs/unistr.c +++ b/fs/ntfs/unistr.c @@ -1,7 +1,7 @@ /* * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. * - * Copyright (c) 2001-2005 Anton Altaparmakov + * Copyright (c) 2001-2006 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published @@ -19,6 +19,8 @@ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/slab.h> + #include "types.h" #include "debug.h" #include "ntfs.h" @@ -242,7 +244,7 @@ int ntfs_file_compare_values(FILE_NAME_ATTR *file_name_attr1, * map dictates, into a little endian, 2-byte Unicode string. * * This function allocates the string and the caller is responsible for - * calling kmem_cache_free(ntfs_name_cache, @outs); when finished with it. + * calling kmem_cache_free(ntfs_name_cache, *@outs); when finished with it. * * On success the function returns the number of Unicode characters written to * the output string *@outs (>= 0), not counting the terminating Unicode NULL @@ -262,37 +264,48 @@ int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins, wchar_t wc; int i, o, wc_len; - /* We don't trust outside sources. */ - if (ins) { + /* We do not trust outside sources. */ + if (likely(ins)) { ucs = kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS); - if (ucs) { + if (likely(ucs)) { for (i = o = 0; i < ins_len; i += wc_len) { wc_len = nls->char2uni(ins + i, ins_len - i, &wc); - if (wc_len >= 0) { - if (wc) { + if (likely(wc_len >= 0 && + o < NTFS_MAX_NAME_LEN)) { + if (likely(wc)) { ucs[o++] = cpu_to_le16(wc); continue; - } /* else (!wc) */ + } /* else if (!wc) */ break; - } /* else (wc_len < 0) */ - goto conversion_err; + } /* else if (wc_len < 0 || + o >= NTFS_MAX_NAME_LEN) */ + goto name_err; } ucs[o] = 0; *outs = ucs; return o; - } /* else (!ucs) */ - ntfs_error(vol->sb, "Failed to allocate name from " - "ntfs_name_cache!"); + } /* else if (!ucs) */ + ntfs_error(vol->sb, "Failed to allocate buffer for converted " + "name from ntfs_name_cache."); return -ENOMEM; - } /* else (!ins) */ - ntfs_error(NULL, "Received NULL pointer."); + } /* else if (!ins) */ + ntfs_error(vol->sb, "Received NULL pointer."); return -EINVAL; -conversion_err: - ntfs_error(vol->sb, "Name using character set %s contains characters " - "that cannot be converted to Unicode.", nls->charset); +name_err: kmem_cache_free(ntfs_name_cache, ucs); - return -EILSEQ; + if (wc_len < 0) { + ntfs_error(vol->sb, "Name using character set %s contains " + "characters that cannot be converted to " + "Unicode.", nls->charset); + i = -EILSEQ; + } else /* if (o >= NTFS_MAX_NAME_LEN) */ { + ntfs_error(vol->sb, "Name is too long (maximum length for a " + "name on NTFS is %d Unicode characters.", + NTFS_MAX_NAME_LEN); + i = -ENAMETOOLONG; + } + return i; } /** diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 8dd3aafec499..09e1c57a86a0 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -959,7 +959,7 @@ static int ocfs2_initialize_mem_caches(void) ocfs2_lock_cache = kmem_cache_create("ocfs2_lock", sizeof(struct ocfs2_journal_lock), 0, - SLAB_NO_REAP|SLAB_HWCACHE_ALIGN, + SLAB_HWCACHE_ALIGN, NULL, NULL); if (!ocfs2_lock_cache) return -ENOMEM; diff --git a/fs/open.c b/fs/open.c index 70e0230d8e77..1091dadd6c38 100644 --- a/fs/open.c +++ b/fs/open.c @@ -973,7 +973,7 @@ repeat: fdt = files_fdtable(files); fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fdset, - fdt->next_fd); + files->next_fd); /* * N.B. For clone tasks sharing a files structure, this test @@ -998,7 +998,7 @@ repeat: FD_SET(fd, fdt->open_fds); FD_CLR(fd, fdt->close_on_exec); - fdt->next_fd = fd + 1; + files->next_fd = fd + 1; #if 1 /* Sanity check */ if (fdt->fd[fd] != NULL) { @@ -1019,8 +1019,8 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __FD_CLR(fd, fdt->open_fds); - if (fd < fdt->next_fd) - fdt->next_fd = fd; + if (fd < files->next_fd) + files->next_fd = fd; } void fastcall put_unused_fd(unsigned int fd) diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 1d24fead51a6..826c131994c3 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -312,7 +312,7 @@ static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) case BLK_HDR: info->state = BLK_LIST; (*pos)++; - break; + /*fallthrough*/ case BLK_LIST: if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) { /* diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c index b471315e24ef..c33963fded9e 100644 --- a/fs/qnx4/file.c +++ b/fs/qnx4/file.c @@ -12,10 +12,7 @@ * 27-06-1998 by Frank Denis : file overwriting. */ -#include <linux/config.h> -#include <linux/types.h> #include <linux/fs.h> -#include <linux/time.h> #include <linux/qnx4_fs.h> /* diff --git a/fs/quota.c b/fs/quota.c index ba9e0bf32f67..d6a2be826e29 100644 --- a/fs/quota.c +++ b/fs/quota.c @@ -170,10 +170,10 @@ static void quota_sync_sb(struct super_block *sb, int type) /* Now when everything is written we can discard the pagecache so * that userspace sees the changes. We need i_mutex and so we could - * not do it inside dqonoff_sem. Moreover we need to be carefull + * not do it inside dqonoff_mutex. Moreover we need to be carefull * about races with quotaoff() (that is the reason why we have own * reference to inode). */ - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { discard[cnt] = NULL; if (type != -1 && cnt != type) @@ -182,7 +182,7 @@ static void quota_sync_sb(struct super_block *sb, int type) continue; discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); } - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (discard[cnt]) { mutex_lock(&discard[cnt]->i_mutex); diff --git a/fs/quota_v2.c b/fs/quota_v2.c index b4199ec3ece4..c519a583e681 100644 --- a/fs/quota_v2.c +++ b/fs/quota_v2.c @@ -394,7 +394,7 @@ static int v2_write_dquot(struct dquot *dquot) ssize_t ret; struct v2_disk_dqblk ddquot, empty; - /* dq_off is guarded by dqio_sem */ + /* dq_off is guarded by dqio_mutex */ if (!dquot->dq_off) if ((ret = dq_insert_tree(dquot)) < 0) { printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret); diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index 2115383dcc8d..6ada2095b9ac 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c @@ -24,18 +24,7 @@ * caches is sufficient. */ -#include <linux/module.h> #include <linux/fs.h> -#include <linux/pagemap.h> -#include <linux/highmem.h> -#include <linux/init.h> -#include <linux/string.h> -#include <linux/smp_lock.h> -#include <linux/backing-dev.h> -#include <linux/ramfs.h> - -#include <asm/uaccess.h> -#include "internal.h" struct address_space_operations ramfs_aops = { .readpage = simple_readpage, diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 3f810acd0bfa..b1ca234068f6 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -87,8 +87,7 @@ static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) xpages = 1UL << order; npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; - for (loop = 0; loop < npages; loop++) - set_page_count(pages + loop, 1); + split_page(pages, order); /* trim off any pages we don't actually require */ for (loop = npages; loop < xpages; loop++) diff --git a/fs/relayfs/Makefile b/fs/relayfs/Makefile deleted file mode 100644 index e76e182cdb38..000000000000 --- a/fs/relayfs/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -obj-$(CONFIG_RELAYFS_FS) += relayfs.o - -relayfs-y := relay.o inode.o buffers.o - diff --git a/fs/relayfs/buffers.c b/fs/relayfs/buffers.c deleted file mode 100644 index 10187812771e..000000000000 --- a/fs/relayfs/buffers.c +++ /dev/null @@ -1,190 +0,0 @@ -/* - * RelayFS buffer management code. - * - * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp - * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com) - * - * This file is released under the GPL. - */ - -#include <linux/module.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> -#include <linux/relayfs_fs.h> -#include "relay.h" -#include "buffers.h" - -/* - * close() vm_op implementation for relayfs file mapping. - */ -static void relay_file_mmap_close(struct vm_area_struct *vma) -{ - struct rchan_buf *buf = vma->vm_private_data; - buf->chan->cb->buf_unmapped(buf, vma->vm_file); -} - -/* - * nopage() vm_op implementation for relayfs file mapping. - */ -static struct page *relay_buf_nopage(struct vm_area_struct *vma, - unsigned long address, - int *type) -{ - struct page *page; - struct rchan_buf *buf = vma->vm_private_data; - unsigned long offset = address - vma->vm_start; - - if (address > vma->vm_end) - return NOPAGE_SIGBUS; /* Disallow mremap */ - if (!buf) - return NOPAGE_OOM; - - page = vmalloc_to_page(buf->start + offset); - if (!page) - return NOPAGE_OOM; - get_page(page); - - if (type) - *type = VM_FAULT_MINOR; - - return page; -} - -/* - * vm_ops for relay file mappings. - */ -static struct vm_operations_struct relay_file_mmap_ops = { - .nopage = relay_buf_nopage, - .close = relay_file_mmap_close, -}; - -/** - * relay_mmap_buf: - mmap channel buffer to process address space - * @buf: relay channel buffer - * @vma: vm_area_struct describing memory to be mapped - * - * Returns 0 if ok, negative on error - * - * Caller should already have grabbed mmap_sem. - */ -int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) -{ - unsigned long length = vma->vm_end - vma->vm_start; - struct file *filp = vma->vm_file; - - if (!buf) - return -EBADF; - - if (length != (unsigned long)buf->chan->alloc_size) - return -EINVAL; - - vma->vm_ops = &relay_file_mmap_ops; - vma->vm_private_data = buf; - buf->chan->cb->buf_mapped(buf, filp); - - return 0; -} - -/** - * relay_alloc_buf - allocate a channel buffer - * @buf: the buffer struct - * @size: total size of the buffer - * - * Returns a pointer to the resulting buffer, NULL if unsuccessful - */ -static void *relay_alloc_buf(struct rchan_buf *buf, unsigned long size) -{ - void *mem; - unsigned int i, j, n_pages; - - size = PAGE_ALIGN(size); - n_pages = size >> PAGE_SHIFT; - - buf->page_array = kcalloc(n_pages, sizeof(struct page *), GFP_KERNEL); - if (!buf->page_array) - return NULL; - - for (i = 0; i < n_pages; i++) { - buf->page_array[i] = alloc_page(GFP_KERNEL); - if (unlikely(!buf->page_array[i])) - goto depopulate; - } - mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL); - if (!mem) - goto depopulate; - - memset(mem, 0, size); - buf->page_count = n_pages; - return mem; - -depopulate: - for (j = 0; j < i; j++) - __free_page(buf->page_array[j]); - kfree(buf->page_array); - return NULL; -} - -/** - * relay_create_buf - allocate and initialize a channel buffer - * @alloc_size: size of the buffer to allocate - * @n_subbufs: number of sub-buffers in the channel - * - * Returns channel buffer if successful, NULL otherwise - */ -struct rchan_buf *relay_create_buf(struct rchan *chan) -{ - struct rchan_buf *buf = kcalloc(1, sizeof(struct rchan_buf), GFP_KERNEL); - if (!buf) - return NULL; - - buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); - if (!buf->padding) - goto free_buf; - - buf->start = relay_alloc_buf(buf, chan->alloc_size); - if (!buf->start) - goto free_buf; - - buf->chan = chan; - kref_get(&buf->chan->kref); - return buf; - -free_buf: - kfree(buf->padding); - kfree(buf); - return NULL; -} - -/** - * relay_destroy_buf - destroy an rchan_buf struct and associated buffer - * @buf: the buffer struct - */ -void relay_destroy_buf(struct rchan_buf *buf) -{ - struct rchan *chan = buf->chan; - unsigned int i; - - if (likely(buf->start)) { - vunmap(buf->start); - for (i = 0; i < buf->page_count; i++) - __free_page(buf->page_array[i]); - kfree(buf->page_array); - } - kfree(buf->padding); - kfree(buf); - kref_put(&chan->kref, relay_destroy_channel); -} - -/** - * relay_remove_buf - remove a channel buffer - * - * Removes the file from the relayfs fileystem, which also frees the - * rchan_buf_struct and the channel buffer. Should only be called from - * kref_put(). - */ -void relay_remove_buf(struct kref *kref) -{ - struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref); - buf->chan->cb->remove_buf_file(buf->dentry); - relay_destroy_buf(buf); -} diff --git a/fs/relayfs/buffers.h b/fs/relayfs/buffers.h deleted file mode 100644 index 37a12493f641..000000000000 --- a/fs/relayfs/buffers.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _BUFFERS_H -#define _BUFFERS_H - -/* This inspired by rtai/shmem */ -#define FIX_SIZE(x) (((x) - 1) & PAGE_MASK) + PAGE_SIZE - -extern int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma); -extern struct rchan_buf *relay_create_buf(struct rchan *chan); -extern void relay_destroy_buf(struct rchan_buf *buf); -extern void relay_remove_buf(struct kref *kref); - -#endif/* _BUFFERS_H */ diff --git a/fs/relayfs/inode.c b/fs/relayfs/inode.c deleted file mode 100644 index 383523011aad..000000000000 --- a/fs/relayfs/inode.c +++ /dev/null @@ -1,581 +0,0 @@ -/* - * VFS-related code for RelayFS, a high-speed data relay filesystem. - * - * Copyright (C) 2003-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp - * Copyright (C) 2003-2005 - Karim Yaghmour <karim@opersys.com> - * - * Based on ramfs, Copyright (C) 2002 - Linus Torvalds - * - * This file is released under the GPL. - */ - -#include <linux/module.h> -#include <linux/fs.h> -#include <linux/mount.h> -#include <linux/pagemap.h> -#include <linux/init.h> -#include <linux/string.h> -#include <linux/backing-dev.h> -#include <linux/namei.h> -#include <linux/poll.h> -#include <linux/relayfs_fs.h> -#include "relay.h" -#include "buffers.h" - -#define RELAYFS_MAGIC 0xF0B4A981 - -static struct vfsmount * relayfs_mount; -static int relayfs_mount_count; - -static struct backing_dev_info relayfs_backing_dev_info = { - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, -}; - -static struct inode *relayfs_get_inode(struct super_block *sb, - int mode, - struct file_operations *fops, - void *data) -{ - struct inode *inode; - - inode = new_inode(sb); - if (!inode) - return NULL; - - inode->i_mode = mode; - inode->i_uid = 0; - inode->i_gid = 0; - inode->i_blksize = PAGE_CACHE_SIZE; - inode->i_blocks = 0; - inode->i_mapping->backing_dev_info = &relayfs_backing_dev_info; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - switch (mode & S_IFMT) { - case S_IFREG: - inode->i_fop = fops; - if (data) - inode->u.generic_ip = data; - break; - case S_IFDIR: - inode->i_op = &simple_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - - /* directory inodes start off with i_nlink == 2 (for "." entry) */ - inode->i_nlink++; - break; - default: - break; - } - - return inode; -} - -/** - * relayfs_create_entry - create a relayfs directory or file - * @name: the name of the file to create - * @parent: parent directory - * @mode: mode - * @fops: file operations to use for the file - * @data: user-associated data for this file - * - * Returns the new dentry, NULL on failure - * - * Creates a file or directory with the specifed permissions. - */ -static struct dentry *relayfs_create_entry(const char *name, - struct dentry *parent, - int mode, - struct file_operations *fops, - void *data) -{ - struct dentry *d; - struct inode *inode; - int error = 0; - - BUG_ON(!name || !(S_ISREG(mode) || S_ISDIR(mode))); - - error = simple_pin_fs("relayfs", &relayfs_mount, &relayfs_mount_count); - if (error) { - printk(KERN_ERR "Couldn't mount relayfs: errcode %d\n", error); - return NULL; - } - - if (!parent && relayfs_mount && relayfs_mount->mnt_sb) - parent = relayfs_mount->mnt_sb->s_root; - - if (!parent) { - simple_release_fs(&relayfs_mount, &relayfs_mount_count); - return NULL; - } - - parent = dget(parent); - mutex_lock(&parent->d_inode->i_mutex); - d = lookup_one_len(name, parent, strlen(name)); - if (IS_ERR(d)) { - d = NULL; - goto release_mount; - } - - if (d->d_inode) { - d = NULL; - goto release_mount; - } - - inode = relayfs_get_inode(parent->d_inode->i_sb, mode, fops, data); - if (!inode) { - d = NULL; - goto release_mount; - } - - d_instantiate(d, inode); - dget(d); /* Extra count - pin the dentry in core */ - - if (S_ISDIR(mode)) - parent->d_inode->i_nlink++; - - goto exit; - -release_mount: - simple_release_fs(&relayfs_mount, &relayfs_mount_count); - -exit: - mutex_unlock(&parent->d_inode->i_mutex); - dput(parent); - return d; -} - -/** - * relayfs_create_file - create a file in the relay filesystem - * @name: the name of the file to create - * @parent: parent directory - * @mode: mode, if not specied the default perms are used - * @fops: file operations to use for the file - * @data: user-associated data for this file - * - * Returns file dentry if successful, NULL otherwise. - * - * The file will be created user r on behalf of current user. - */ -struct dentry *relayfs_create_file(const char *name, - struct dentry *parent, - int mode, - struct file_operations *fops, - void *data) -{ - BUG_ON(!fops); - - if (!mode) - mode = S_IRUSR; - mode = (mode & S_IALLUGO) | S_IFREG; - - return relayfs_create_entry(name, parent, mode, fops, data); -} - -/** - * relayfs_create_dir - create a directory in the relay filesystem - * @name: the name of the directory to create - * @parent: parent directory, NULL if parent should be fs root - * - * Returns directory dentry if successful, NULL otherwise. - * - * The directory will be created world rwx on behalf of current user. - */ -struct dentry *relayfs_create_dir(const char *name, struct dentry *parent) -{ - int mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; - return relayfs_create_entry(name, parent, mode, NULL, NULL); -} - -/** - * relayfs_remove - remove a file or directory in the relay filesystem - * @dentry: file or directory dentry - * - * Returns 0 if successful, negative otherwise. - */ -int relayfs_remove(struct dentry *dentry) -{ - struct dentry *parent; - int error = 0; - - if (!dentry) - return -EINVAL; - parent = dentry->d_parent; - if (!parent) - return -EINVAL; - - parent = dget(parent); - mutex_lock(&parent->d_inode->i_mutex); - if (dentry->d_inode) { - if (S_ISDIR(dentry->d_inode->i_mode)) - error = simple_rmdir(parent->d_inode, dentry); - else - error = simple_unlink(parent->d_inode, dentry); - if (!error) - d_delete(dentry); - } - if (!error) - dput(dentry); - mutex_unlock(&parent->d_inode->i_mutex); - dput(parent); - - if (!error) - simple_release_fs(&relayfs_mount, &relayfs_mount_count); - - return error; -} - -/** - * relayfs_remove_file - remove a file from relay filesystem - * @dentry: directory dentry - * - * Returns 0 if successful, negative otherwise. - */ -int relayfs_remove_file(struct dentry *dentry) -{ - return relayfs_remove(dentry); -} - -/** - * relayfs_remove_dir - remove a directory in the relay filesystem - * @dentry: directory dentry - * - * Returns 0 if successful, negative otherwise. - */ -int relayfs_remove_dir(struct dentry *dentry) -{ - return relayfs_remove(dentry); -} - -/** - * relay_file_open - open file op for relay files - * @inode: the inode - * @filp: the file - * - * Increments the channel buffer refcount. - */ -static int relay_file_open(struct inode *inode, struct file *filp) -{ - struct rchan_buf *buf = inode->u.generic_ip; - kref_get(&buf->kref); - filp->private_data = buf; - - return 0; -} - -/** - * relay_file_mmap - mmap file op for relay files - * @filp: the file - * @vma: the vma describing what to map - * - * Calls upon relay_mmap_buf to map the file into user space. - */ -static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct rchan_buf *buf = filp->private_data; - return relay_mmap_buf(buf, vma); -} - -/** - * relay_file_poll - poll file op for relay files - * @filp: the file - * @wait: poll table - * - * Poll implemention. - */ -static unsigned int relay_file_poll(struct file *filp, poll_table *wait) -{ - unsigned int mask = 0; - struct rchan_buf *buf = filp->private_data; - - if (buf->finalized) - return POLLERR; - - if (filp->f_mode & FMODE_READ) { - poll_wait(filp, &buf->read_wait, wait); - if (!relay_buf_empty(buf)) - mask |= POLLIN | POLLRDNORM; - } - - return mask; -} - -/** - * relay_file_release - release file op for relay files - * @inode: the inode - * @filp: the file - * - * Decrements the channel refcount, as the filesystem is - * no longer using it. - */ -static int relay_file_release(struct inode *inode, struct file *filp) -{ - struct rchan_buf *buf = filp->private_data; - kref_put(&buf->kref, relay_remove_buf); - - return 0; -} - -/** - * relay_file_read_consume - update the consumed count for the buffer - */ -static void relay_file_read_consume(struct rchan_buf *buf, - size_t read_pos, - size_t bytes_consumed) -{ - size_t subbuf_size = buf->chan->subbuf_size; - size_t n_subbufs = buf->chan->n_subbufs; - size_t read_subbuf; - - if (buf->bytes_consumed + bytes_consumed > subbuf_size) { - relay_subbufs_consumed(buf->chan, buf->cpu, 1); - buf->bytes_consumed = 0; - } - - buf->bytes_consumed += bytes_consumed; - read_subbuf = read_pos / buf->chan->subbuf_size; - if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) { - if ((read_subbuf == buf->subbufs_produced % n_subbufs) && - (buf->offset == subbuf_size)) - return; - relay_subbufs_consumed(buf->chan, buf->cpu, 1); - buf->bytes_consumed = 0; - } -} - -/** - * relay_file_read_avail - boolean, are there unconsumed bytes available? - */ -static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos) -{ - size_t bytes_produced, bytes_consumed, write_offset; - size_t subbuf_size = buf->chan->subbuf_size; - size_t n_subbufs = buf->chan->n_subbufs; - size_t produced = buf->subbufs_produced % n_subbufs; - size_t consumed = buf->subbufs_consumed % n_subbufs; - - write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset; - - if (consumed > produced) { - if ((produced > n_subbufs) && - (produced + n_subbufs - consumed <= n_subbufs)) - produced += n_subbufs; - } else if (consumed == produced) { - if (buf->offset > subbuf_size) { - produced += n_subbufs; - if (buf->subbufs_produced == buf->subbufs_consumed) - consumed += n_subbufs; - } - } - - if (buf->offset > subbuf_size) - bytes_produced = (produced - 1) * subbuf_size + write_offset; - else - bytes_produced = produced * subbuf_size + write_offset; - bytes_consumed = consumed * subbuf_size + buf->bytes_consumed; - - if (bytes_produced == bytes_consumed) - return 0; - - relay_file_read_consume(buf, read_pos, 0); - - return 1; -} - -/** - * relay_file_read_subbuf_avail - return bytes available in sub-buffer - */ -static size_t relay_file_read_subbuf_avail(size_t read_pos, - struct rchan_buf *buf) -{ - size_t padding, avail = 0; - size_t read_subbuf, read_offset, write_subbuf, write_offset; - size_t subbuf_size = buf->chan->subbuf_size; - - write_subbuf = (buf->data - buf->start) / subbuf_size; - write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset; - read_subbuf = read_pos / subbuf_size; - read_offset = read_pos % subbuf_size; - padding = buf->padding[read_subbuf]; - - if (read_subbuf == write_subbuf) { - if (read_offset + padding < write_offset) - avail = write_offset - (read_offset + padding); - } else - avail = (subbuf_size - padding) - read_offset; - - return avail; -} - -/** - * relay_file_read_start_pos - find the first available byte to read - * - * If the read_pos is in the middle of padding, return the - * position of the first actually available byte, otherwise - * return the original value. - */ -static size_t relay_file_read_start_pos(size_t read_pos, - struct rchan_buf *buf) -{ - size_t read_subbuf, padding, padding_start, padding_end; - size_t subbuf_size = buf->chan->subbuf_size; - size_t n_subbufs = buf->chan->n_subbufs; - - read_subbuf = read_pos / subbuf_size; - padding = buf->padding[read_subbuf]; - padding_start = (read_subbuf + 1) * subbuf_size - padding; - padding_end = (read_subbuf + 1) * subbuf_size; - if (read_pos >= padding_start && read_pos < padding_end) { - read_subbuf = (read_subbuf + 1) % n_subbufs; - read_pos = read_subbuf * subbuf_size; - } - - return read_pos; -} - -/** - * relay_file_read_end_pos - return the new read position - */ -static size_t relay_file_read_end_pos(struct rchan_buf *buf, - size_t read_pos, - size_t count) -{ - size_t read_subbuf, padding, end_pos; - size_t subbuf_size = buf->chan->subbuf_size; - size_t n_subbufs = buf->chan->n_subbufs; - - read_subbuf = read_pos / subbuf_size; - padding = buf->padding[read_subbuf]; - if (read_pos % subbuf_size + count + padding == subbuf_size) - end_pos = (read_subbuf + 1) * subbuf_size; - else - end_pos = read_pos + count; - if (end_pos >= subbuf_size * n_subbufs) - end_pos = 0; - - return end_pos; -} - -/** - * relay_file_read - read file op for relay files - * @filp: the file - * @buffer: the userspace buffer - * @count: number of bytes to read - * @ppos: position to read from - * - * Reads count bytes or the number of bytes available in the - * current sub-buffer being read, whichever is smaller. - */ -static ssize_t relay_file_read(struct file *filp, - char __user *buffer, - size_t count, - loff_t *ppos) -{ - struct rchan_buf *buf = filp->private_data; - struct inode *inode = filp->f_dentry->d_inode; - size_t read_start, avail; - ssize_t ret = 0; - void *from; - - mutex_lock(&inode->i_mutex); - if(!relay_file_read_avail(buf, *ppos)) - goto out; - - read_start = relay_file_read_start_pos(*ppos, buf); - avail = relay_file_read_subbuf_avail(read_start, buf); - if (!avail) - goto out; - - from = buf->start + read_start; - ret = count = min(count, avail); - if (copy_to_user(buffer, from, count)) { - ret = -EFAULT; - goto out; - } - relay_file_read_consume(buf, read_start, count); - *ppos = relay_file_read_end_pos(buf, read_start, count); -out: - mutex_unlock(&inode->i_mutex); - return ret; -} - -struct file_operations relay_file_operations = { - .open = relay_file_open, - .poll = relay_file_poll, - .mmap = relay_file_mmap, - .read = relay_file_read, - .llseek = no_llseek, - .release = relay_file_release, -}; - -static struct super_operations relayfs_ops = { - .statfs = simple_statfs, - .drop_inode = generic_delete_inode, -}; - -static int relayfs_fill_super(struct super_block * sb, void * data, int silent) -{ - struct inode *inode; - struct dentry *root; - int mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; - - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = RELAYFS_MAGIC; - sb->s_op = &relayfs_ops; - inode = relayfs_get_inode(sb, mode, NULL, NULL); - - if (!inode) - return -ENOMEM; - - root = d_alloc_root(inode); - if (!root) { - iput(inode); - return -ENOMEM; - } - sb->s_root = root; - - return 0; -} - -static struct super_block * relayfs_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data) -{ - return get_sb_single(fs_type, flags, data, relayfs_fill_super); -} - -static struct file_system_type relayfs_fs_type = { - .owner = THIS_MODULE, - .name = "relayfs", - .get_sb = relayfs_get_sb, - .kill_sb = kill_litter_super, -}; - -static int __init init_relayfs_fs(void) -{ - return register_filesystem(&relayfs_fs_type); -} - -static void __exit exit_relayfs_fs(void) -{ - - - - - - unregister_filesystem(&relayfs_fs_type); -} - -module_init(init_relayfs_fs) -module_exit(exit_relayfs_fs) - -EXPORT_SYMBOL_GPL(relay_file_operations); -EXPORT_SYMBOL_GPL(relayfs_create_dir); -EXPORT_SYMBOL_GPL(relayfs_remove_dir); -EXPORT_SYMBOL_GPL(relayfs_create_file); -EXPORT_SYMBOL_GPL(relayfs_remove_file); - -MODULE_AUTHOR("Tom Zanussi <zanussi@us.ibm.com> and Karim Yaghmour <karim@opersys.com>"); -MODULE_DESCRIPTION("Relay Filesystem"); -MODULE_LICENSE("GPL"); - diff --git a/fs/relayfs/relay.c b/fs/relayfs/relay.c deleted file mode 100644 index abf3ceaace49..000000000000 --- a/fs/relayfs/relay.c +++ /dev/null @@ -1,482 +0,0 @@ -/* - * Public API and common code for RelayFS. - * - * See Documentation/filesystems/relayfs.txt for an overview of relayfs. - * - * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp - * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com) - * - * This file is released under the GPL. - */ - -#include <linux/errno.h> -#include <linux/stddef.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/relayfs_fs.h> -#include "relay.h" -#include "buffers.h" - -/** - * relay_buf_empty - boolean, is the channel buffer empty? - * @buf: channel buffer - * - * Returns 1 if the buffer is empty, 0 otherwise. - */ -int relay_buf_empty(struct rchan_buf *buf) -{ - return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1; -} - -/** - * relay_buf_full - boolean, is the channel buffer full? - * @buf: channel buffer - * - * Returns 1 if the buffer is full, 0 otherwise. - */ -int relay_buf_full(struct rchan_buf *buf) -{ - size_t ready = buf->subbufs_produced - buf->subbufs_consumed; - return (ready >= buf->chan->n_subbufs) ? 1 : 0; -} - -/* - * High-level relayfs kernel API and associated functions. - */ - -/* - * rchan_callback implementations defining default channel behavior. Used - * in place of corresponding NULL values in client callback struct. - */ - -/* - * subbuf_start() default callback. Does nothing. - */ -static int subbuf_start_default_callback (struct rchan_buf *buf, - void *subbuf, - void *prev_subbuf, - size_t prev_padding) -{ - if (relay_buf_full(buf)) - return 0; - - return 1; -} - -/* - * buf_mapped() default callback. Does nothing. - */ -static void buf_mapped_default_callback(struct rchan_buf *buf, - struct file *filp) -{ -} - -/* - * buf_unmapped() default callback. Does nothing. - */ -static void buf_unmapped_default_callback(struct rchan_buf *buf, - struct file *filp) -{ -} - -/* - * create_buf_file_create() default callback. Creates file to represent buf. - */ -static struct dentry *create_buf_file_default_callback(const char *filename, - struct dentry *parent, - int mode, - struct rchan_buf *buf, - int *is_global) -{ - return relayfs_create_file(filename, parent, mode, - &relay_file_operations, buf); -} - -/* - * remove_buf_file() default callback. Removes file representing relay buffer. - */ -static int remove_buf_file_default_callback(struct dentry *dentry) -{ - return relayfs_remove(dentry); -} - -/* relay channel default callbacks */ -static struct rchan_callbacks default_channel_callbacks = { - .subbuf_start = subbuf_start_default_callback, - .buf_mapped = buf_mapped_default_callback, - .buf_unmapped = buf_unmapped_default_callback, - .create_buf_file = create_buf_file_default_callback, - .remove_buf_file = remove_buf_file_default_callback, -}; - -/** - * wakeup_readers - wake up readers waiting on a channel - * @private: the channel buffer - * - * This is the work function used to defer reader waking. The - * reason waking is deferred is that calling directly from write - * causes problems if you're writing from say the scheduler. - */ -static void wakeup_readers(void *private) -{ - struct rchan_buf *buf = private; - wake_up_interruptible(&buf->read_wait); -} - -/** - * __relay_reset - reset a channel buffer - * @buf: the channel buffer - * @init: 1 if this is a first-time initialization - * - * See relay_reset for description of effect. - */ -static inline void __relay_reset(struct rchan_buf *buf, unsigned int init) -{ - size_t i; - - if (init) { - init_waitqueue_head(&buf->read_wait); - kref_init(&buf->kref); - INIT_WORK(&buf->wake_readers, NULL, NULL); - } else { - cancel_delayed_work(&buf->wake_readers); - flush_scheduled_work(); - } - - buf->subbufs_produced = 0; - buf->subbufs_consumed = 0; - buf->bytes_consumed = 0; - buf->finalized = 0; - buf->data = buf->start; - buf->offset = 0; - - for (i = 0; i < buf->chan->n_subbufs; i++) - buf->padding[i] = 0; - - buf->chan->cb->subbuf_start(buf, buf->data, NULL, 0); -} - -/** - * relay_reset - reset the channel - * @chan: the channel - * - * This has the effect of erasing all data from all channel buffers - * and restarting the channel in its initial state. The buffers - * are not freed, so any mappings are still in effect. - * - * NOTE: Care should be taken that the channel isn't actually - * being used by anything when this call is made. - */ -void relay_reset(struct rchan *chan) -{ - unsigned int i; - struct rchan_buf *prev = NULL; - - if (!chan) - return; - - for (i = 0; i < NR_CPUS; i++) { - if (!chan->buf[i] || chan->buf[i] == prev) - break; - __relay_reset(chan->buf[i], 0); - prev = chan->buf[i]; - } -} - -/** - * relay_open_buf - create a new channel buffer in relayfs - * - * Internal - used by relay_open(). - */ -static struct rchan_buf *relay_open_buf(struct rchan *chan, - const char *filename, - struct dentry *parent, - int *is_global) -{ - struct rchan_buf *buf; - struct dentry *dentry; - - if (*is_global) - return chan->buf[0]; - - buf = relay_create_buf(chan); - if (!buf) - return NULL; - - /* Create file in fs */ - dentry = chan->cb->create_buf_file(filename, parent, S_IRUSR, - buf, is_global); - if (!dentry) { - relay_destroy_buf(buf); - return NULL; - } - - buf->dentry = dentry; - __relay_reset(buf, 1); - - return buf; -} - -/** - * relay_close_buf - close a channel buffer - * @buf: channel buffer - * - * Marks the buffer finalized and restores the default callbacks. - * The channel buffer and channel buffer data structure are then freed - * automatically when the last reference is given up. - */ -static inline void relay_close_buf(struct rchan_buf *buf) -{ - buf->finalized = 1; - buf->chan->cb = &default_channel_callbacks; - cancel_delayed_work(&buf->wake_readers); - flush_scheduled_work(); - kref_put(&buf->kref, relay_remove_buf); -} - -static inline void setup_callbacks(struct rchan *chan, - struct rchan_callbacks *cb) -{ - if (!cb) { - chan->cb = &default_channel_callbacks; - return; - } - - if (!cb->subbuf_start) - cb->subbuf_start = subbuf_start_default_callback; - if (!cb->buf_mapped) - cb->buf_mapped = buf_mapped_default_callback; - if (!cb->buf_unmapped) - cb->buf_unmapped = buf_unmapped_default_callback; - if (!cb->create_buf_file) - cb->create_buf_file = create_buf_file_default_callback; - if (!cb->remove_buf_file) - cb->remove_buf_file = remove_buf_file_default_callback; - chan->cb = cb; -} - -/** - * relay_open - create a new relayfs channel - * @base_filename: base name of files to create - * @parent: dentry of parent directory, NULL for root directory - * @subbuf_size: size of sub-buffers - * @n_subbufs: number of sub-buffers - * @cb: client callback functions - * - * Returns channel pointer if successful, NULL otherwise. - * - * Creates a channel buffer for each cpu using the sizes and - * attributes specified. The created channel buffer files - * will be named base_filename0...base_filenameN-1. File - * permissions will be S_IRUSR. - */ -struct rchan *relay_open(const char *base_filename, - struct dentry *parent, - size_t subbuf_size, - size_t n_subbufs, - struct rchan_callbacks *cb) -{ - unsigned int i; - struct rchan *chan; - char *tmpname; - int is_global = 0; - - if (!base_filename) - return NULL; - - if (!(subbuf_size && n_subbufs)) - return NULL; - - chan = kcalloc(1, sizeof(struct rchan), GFP_KERNEL); - if (!chan) - return NULL; - - chan->version = RELAYFS_CHANNEL_VERSION; - chan->n_subbufs = n_subbufs; - chan->subbuf_size = subbuf_size; - chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); - setup_callbacks(chan, cb); - kref_init(&chan->kref); - - tmpname = kmalloc(NAME_MAX + 1, GFP_KERNEL); - if (!tmpname) - goto free_chan; - - for_each_online_cpu(i) { - sprintf(tmpname, "%s%d", base_filename, i); - chan->buf[i] = relay_open_buf(chan, tmpname, parent, - &is_global); - chan->buf[i]->cpu = i; - if (!chan->buf[i]) - goto free_bufs; - } - - kfree(tmpname); - return chan; - -free_bufs: - for (i = 0; i < NR_CPUS; i++) { - if (!chan->buf[i]) - break; - relay_close_buf(chan->buf[i]); - if (is_global) - break; - } - kfree(tmpname); - -free_chan: - kref_put(&chan->kref, relay_destroy_channel); - return NULL; -} - -/** - * relay_switch_subbuf - switch to a new sub-buffer - * @buf: channel buffer - * @length: size of current event - * - * Returns either the length passed in or 0 if full. - - * Performs sub-buffer-switch tasks such as invoking callbacks, - * updating padding counts, waking up readers, etc. - */ -size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) -{ - void *old, *new; - size_t old_subbuf, new_subbuf; - - if (unlikely(length > buf->chan->subbuf_size)) - goto toobig; - - if (buf->offset != buf->chan->subbuf_size + 1) { - buf->prev_padding = buf->chan->subbuf_size - buf->offset; - old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; - buf->padding[old_subbuf] = buf->prev_padding; - buf->subbufs_produced++; - if (waitqueue_active(&buf->read_wait)) { - PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf); - schedule_delayed_work(&buf->wake_readers, 1); - } - } - - old = buf->data; - new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; - new = buf->start + new_subbuf * buf->chan->subbuf_size; - buf->offset = 0; - if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) { - buf->offset = buf->chan->subbuf_size + 1; - return 0; - } - buf->data = new; - buf->padding[new_subbuf] = 0; - - if (unlikely(length + buf->offset > buf->chan->subbuf_size)) - goto toobig; - - return length; - -toobig: - buf->chan->last_toobig = length; - return 0; -} - -/** - * relay_subbufs_consumed - update the buffer's sub-buffers-consumed count - * @chan: the channel - * @cpu: the cpu associated with the channel buffer to update - * @subbufs_consumed: number of sub-buffers to add to current buf's count - * - * Adds to the channel buffer's consumed sub-buffer count. - * subbufs_consumed should be the number of sub-buffers newly consumed, - * not the total consumed. - * - * NOTE: kernel clients don't need to call this function if the channel - * mode is 'overwrite'. - */ -void relay_subbufs_consumed(struct rchan *chan, - unsigned int cpu, - size_t subbufs_consumed) -{ - struct rchan_buf *buf; - - if (!chan) - return; - - if (cpu >= NR_CPUS || !chan->buf[cpu]) - return; - - buf = chan->buf[cpu]; - buf->subbufs_consumed += subbufs_consumed; - if (buf->subbufs_consumed > buf->subbufs_produced) - buf->subbufs_consumed = buf->subbufs_produced; -} - -/** - * relay_destroy_channel - free the channel struct - * - * Should only be called from kref_put(). - */ -void relay_destroy_channel(struct kref *kref) -{ - struct rchan *chan = container_of(kref, struct rchan, kref); - kfree(chan); -} - -/** - * relay_close - close the channel - * @chan: the channel - * - * Closes all channel buffers and frees the channel. - */ -void relay_close(struct rchan *chan) -{ - unsigned int i; - struct rchan_buf *prev = NULL; - - if (!chan) - return; - - for (i = 0; i < NR_CPUS; i++) { - if (!chan->buf[i] || chan->buf[i] == prev) - break; - relay_close_buf(chan->buf[i]); - prev = chan->buf[i]; - } - - if (chan->last_toobig) - printk(KERN_WARNING "relayfs: one or more items not logged " - "[item size (%Zd) > sub-buffer size (%Zd)]\n", - chan->last_toobig, chan->subbuf_size); - - kref_put(&chan->kref, relay_destroy_channel); -} - -/** - * relay_flush - close the channel - * @chan: the channel - * - * Flushes all channel buffers i.e. forces buffer switch. - */ -void relay_flush(struct rchan *chan) -{ - unsigned int i; - struct rchan_buf *prev = NULL; - - if (!chan) - return; - - for (i = 0; i < NR_CPUS; i++) { - if (!chan->buf[i] || chan->buf[i] == prev) - break; - relay_switch_subbuf(chan->buf[i], 0); - prev = chan->buf[i]; - } -} - -EXPORT_SYMBOL_GPL(relay_open); -EXPORT_SYMBOL_GPL(relay_close); -EXPORT_SYMBOL_GPL(relay_flush); -EXPORT_SYMBOL_GPL(relay_reset); -EXPORT_SYMBOL_GPL(relay_subbufs_consumed); -EXPORT_SYMBOL_GPL(relay_switch_subbuf); -EXPORT_SYMBOL_GPL(relay_buf_full); diff --git a/fs/relayfs/relay.h b/fs/relayfs/relay.h deleted file mode 100644 index 0993d3e5753b..000000000000 --- a/fs/relayfs/relay.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _RELAY_H -#define _RELAY_H - -extern int relayfs_remove(struct dentry *dentry); -extern int relay_buf_empty(struct rchan_buf *buf); -extern void relay_destroy_channel(struct kref *kref); - -#endif /* _RELAY_H */ diff --git a/fs/seq_file.c b/fs/seq_file.c index 7c40570b71dc..555b9ac04c25 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -37,7 +37,7 @@ int seq_open(struct file *file, struct seq_operations *op) file->private_data = p; } memset(p, 0, sizeof(*p)); - sema_init(&p->sem, 1); + mutex_init(&p->lock); p->op = op; /* @@ -71,7 +71,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) void *p; int err = 0; - down(&m->sem); + mutex_lock(&m->lock); /* * seq_file->op->..m_start/m_stop/m_next may do special actions * or optimisations based on the file->f_version, so we want to @@ -164,7 +164,7 @@ Done: else *ppos += copied; file->f_version = m->version; - up(&m->sem); + mutex_unlock(&m->lock); return copied; Enomem: err = -ENOMEM; @@ -237,7 +237,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) struct seq_file *m = (struct seq_file *)file->private_data; long long retval = -EINVAL; - down(&m->sem); + mutex_lock(&m->lock); m->version = file->f_version; switch (origin) { case 1: @@ -260,7 +260,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) } } } - up(&m->sem); + mutex_unlock(&m->lock); file->f_version = m->version; return retval; } diff --git a/fs/super.c b/fs/super.c index e20b5580afd5..425861cb1caa 100644 --- a/fs/super.c +++ b/fs/super.c @@ -76,9 +76,9 @@ static struct super_block *alloc_super(void) down_write(&s->s_umount); s->s_count = S_BIAS; atomic_set(&s->s_active, 1); - sema_init(&s->s_vfs_rename_sem,1); - sema_init(&s->s_dquot.dqio_sem, 1); - sema_init(&s->s_dquot.dqonoff_sem, 1); + mutex_init(&s->s_vfs_rename_mutex); + mutex_init(&s->s_dquot.dqio_mutex); + mutex_init(&s->s_dquot.dqonoff_mutex); init_rwsem(&s->s_dquot.dqptr_sem); init_waitqueue_head(&s->s_wait_unfrozen); s->s_maxbytes = MAX_NON_LFS; @@ -693,9 +693,9 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type, * will protect the lockfs code from trying to start a snapshot * while we are mounting */ - down(&bdev->bd_mount_sem); + mutex_lock(&bdev->bd_mount_mutex); s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); - up(&bdev->bd_mount_sem); + mutex_unlock(&bdev->bd_mount_mutex); if (IS_ERR(s)) goto out; diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 49bd219275db..9ee956864445 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -50,6 +50,32 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd, return sd; } +/** + * + * Return -EEXIST if there is already a sysfs element with the same name for + * the same parent. + * + * called with parent inode's i_mutex held + */ +int sysfs_dirent_exist(struct sysfs_dirent *parent_sd, + const unsigned char *new) +{ + struct sysfs_dirent * sd; + + list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { + if (sd->s_element) { + const unsigned char *existing = sysfs_get_name(sd); + if (strcmp(existing, new)) + continue; + else + return -EEXIST; + } + } + + return 0; +} + + int sysfs_make_dirent(struct sysfs_dirent * parent_sd, struct dentry * dentry, void * element, umode_t mode, int type) { @@ -102,7 +128,11 @@ static int create_dir(struct kobject * k, struct dentry * p, mutex_lock(&p->d_inode->i_mutex); *d = lookup_one_len(n, p, strlen(n)); if (!IS_ERR(*d)) { - error = sysfs_make_dirent(p->d_fsdata, *d, k, mode, SYSFS_DIR); + if (sysfs_dirent_exist(p->d_fsdata, n)) + error = -EEXIST; + else + error = sysfs_make_dirent(p->d_fsdata, *d, k, mode, + SYSFS_DIR); if (!error) { error = sysfs_create(*d, mode, init_dir); if (!error) { @@ -302,6 +332,7 @@ void sysfs_remove_dir(struct kobject * kobj) * Drop reference from dget() on entrance. */ dput(dentry); + kobj->dentry = NULL; } int sysfs_rename_dir(struct kobject * kobj, const char *new_name) @@ -479,7 +510,3 @@ struct file_operations sysfs_dir_operations = { .read = generic_read_dir, .readdir = sysfs_readdir, }; - -EXPORT_SYMBOL_GPL(sysfs_create_dir); -EXPORT_SYMBOL_GPL(sysfs_remove_dir); -EXPORT_SYMBOL_GPL(sysfs_rename_dir); diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index d0e3d8495165..5e83e7246788 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -301,9 +301,8 @@ static int check_perm(struct inode * inode, struct file * file) /* No error? Great, allocate a buffer for the file, and store it * it in file->private_data for easy access. */ - buffer = kmalloc(sizeof(struct sysfs_buffer),GFP_KERNEL); + buffer = kzalloc(sizeof(struct sysfs_buffer), GFP_KERNEL); if (buffer) { - memset(buffer,0,sizeof(struct sysfs_buffer)); init_MUTEX(&buffer->sem); buffer->needs_read_fill = 1; buffer->ops = ops; @@ -362,10 +361,12 @@ int sysfs_add_file(struct dentry * dir, const struct attribute * attr, int type) { struct sysfs_dirent * parent_sd = dir->d_fsdata; umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG; - int error = 0; + int error = -EEXIST; mutex_lock(&dir->d_inode->i_mutex); - error = sysfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type); + if (!sysfs_dirent_exist(parent_sd, attr->name)) + error = sysfs_make_dirent(parent_sd, NULL, (void *)attr, + mode, type); mutex_unlock(&dir->d_inode->i_mutex); return error; diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 689f7bcfaf30..4c29ac41ac3e 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -54,11 +54,10 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr) if (!sd_iattr) { /* setting attributes for the first time, allocate now */ - sd_iattr = kmalloc(sizeof(struct iattr), GFP_KERNEL); + sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL); if (!sd_iattr) return -ENOMEM; /* assign default attributes */ - memset(sd_iattr, 0, sizeof(struct iattr)); sd_iattr->ia_mode = sd->s_mode; sd_iattr->ia_uid = 0; sd_iattr->ia_gid = 0; @@ -227,12 +226,16 @@ void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent) void sysfs_hash_and_remove(struct dentry * dir, const char * name) { struct sysfs_dirent * sd; - struct sysfs_dirent * parent_sd = dir->d_fsdata; + struct sysfs_dirent * parent_sd; + + if (!dir) + return; if (dir->d_inode == NULL) /* no inode means this hasn't been made visible yet */ return; + parent_sd = dir->d_fsdata; mutex_lock(&dir->d_inode->i_mutex); list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { if (!sd->s_element) diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c index e38d6338a20d..d2eac3ceed5f 100644 --- a/fs/sysfs/symlink.c +++ b/fs/sysfs/symlink.c @@ -66,6 +66,7 @@ static int sysfs_add_link(struct dentry * parent, const char * name, struct kobj if (!error) return 0; + kobject_put(target); kfree(sl->link_name); exit2: kfree(sl); @@ -82,12 +83,13 @@ exit1: int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name) { struct dentry * dentry = kobj->dentry; - int error = 0; + int error = -EEXIST; BUG_ON(!kobj || !kobj->dentry || !name); mutex_lock(&dentry->d_inode->i_mutex); - error = sysfs_add_link(dentry, name, target); + if (!sysfs_dirent_exist(dentry->d_fsdata, name)) + error = sysfs_add_link(dentry, name, target); mutex_unlock(&dentry->d_inode->i_mutex); return error; } diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 3f8953e0e5d0..cf11d5b789d9 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -5,6 +5,7 @@ extern kmem_cache_t *sysfs_dir_cachep; extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *); extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *)); +extern int sysfs_dirent_exist(struct sysfs_dirent *, const unsigned char *); extern int sysfs_make_dirent(struct sysfs_dirent *, struct dentry *, void *, umode_t, int); diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 7f0e4b53085e..b8a73f716fbe 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -16,18 +16,6 @@ #include <linux/smp_lock.h> #include "sysv.h" -static inline void inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static int add_nondir(struct dentry *dentry, struct inode *inode) { int err = sysv_add_link(dentry, inode); @@ -35,7 +23,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -124,7 +112,7 @@ out: return err; out_fail: - dec_count(inode); + inode_dec_link_count(inode); iput(inode); goto out; } @@ -138,7 +126,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); return add_nondir(dentry, inode); @@ -151,7 +139,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) goto out; - inc_count(dir); + inode_inc_link_count(dir); inode = sysv_new_inode(dir, S_IFDIR|mode); err = PTR_ERR(inode); @@ -160,7 +148,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) sysv_set_inode(inode, 0); - inc_count(inode); + inode_inc_link_count(inode); err = sysv_make_empty(inode, dir); if (err) @@ -175,11 +163,11 @@ out: return err; out_fail: - dec_count(inode); - dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput(inode); out_dir: - dec_count(dir); + inode_dec_link_count(dir); goto out; } @@ -199,7 +187,7 @@ static int sysv_unlink(struct inode * dir, struct dentry * dentry) goto out; inode->i_ctime = dir->i_ctime; - dec_count(inode); + inode_dec_link_count(inode); out: return err; } @@ -213,8 +201,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry) err = sysv_unlink(dir, dentry); if (!err) { inode->i_size = 0; - dec_count(inode); - dec_count(dir); + inode_dec_link_count(inode); + inode_dec_link_count(dir); } } return err; @@ -258,34 +246,34 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, new_de = sysv_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inc_count(old_inode); + inode_inc_link_count(old_inode); sysv_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) goto out_dir; } - inc_count(old_inode); + inode_inc_link_count(old_inode); err = sysv_add_link(new_dentry, old_inode); if (err) { - dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - inc_count(new_dir); + inode_inc_link_count(new_dir); } sysv_delete_entry(old_de, old_page); - dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { sysv_set_link(dir_de, dir_page, new_dir); - dec_count(old_dir); + inode_dec_link_count(old_dir); } return 0; diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 201049ac8a96..ea521f846d97 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -152,7 +152,7 @@ static void udf_bitmap_free_blocks(struct super_block * sb, int bitmap_nr; unsigned long overflow; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (bloc.logicalBlockNum < 0 || (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { @@ -211,7 +211,7 @@ error_return: sb->s_dirt = 1; if (UDF_SB_LVIDBH(sb)) mark_buffer_dirty(UDF_SB_LVIDBH(sb)); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return; } @@ -226,7 +226,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb, int nr_groups, bitmap_nr; struct buffer_head *bh; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) goto out; @@ -275,7 +275,7 @@ out: mark_buffer_dirty(UDF_SB_LVIDBH(sb)); } sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } @@ -291,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block * sb, int newblock = 0; *err = -ENOSPC; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); repeat: if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) @@ -364,7 +364,7 @@ repeat: } if (i >= (nr_groups*2)) { - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return newblock; } if (bit < sb->s_blocksize << 3) @@ -373,7 +373,7 @@ repeat: bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); if (bit >= sb->s_blocksize << 3) { - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return 0; } @@ -387,7 +387,7 @@ got_block: */ if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; return 0; } @@ -410,13 +410,13 @@ got_block: mark_buffer_dirty(UDF_SB_LVIDBH(sb)); } sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; error_return: *err = -EIO; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return 0; } @@ -433,7 +433,7 @@ static void udf_table_free_blocks(struct super_block * sb, int8_t etype; int i; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (bloc.logicalBlockNum < 0 || (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { @@ -666,7 +666,7 @@ static void udf_table_free_blocks(struct super_block * sb, error_return: sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return; } @@ -692,7 +692,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, else return 0; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); extoffset = sizeof(struct unallocSpaceEntry); bloc = UDF_I_LOCATION(table); @@ -736,7 +736,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, mark_buffer_dirty(UDF_SB_LVIDBH(sb)); sb->s_dirt = 1; } - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } @@ -761,7 +761,7 @@ static int udf_table_new_block(struct super_block * sb, else return newblock; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) goal = 0; @@ -811,7 +811,7 @@ static int udf_table_new_block(struct super_block * sb, if (spread == 0xFFFFFFFF) { udf_release_data(goal_bh); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return 0; } @@ -827,7 +827,7 @@ static int udf_table_new_block(struct super_block * sb, if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { udf_release_data(goal_bh); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; return 0; } @@ -846,7 +846,7 @@ static int udf_table_new_block(struct super_block * sb, } sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; } diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index c9b707b470ca..3873c672cb4c 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -42,7 +42,7 @@ void udf_free_inode(struct inode * inode) clear_inode(inode); - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (sbi->s_lvidbh) { if (S_ISDIR(inode->i_mode)) UDF_SB_LVIDIU(sb)->numDirs = @@ -53,7 +53,7 @@ void udf_free_inode(struct inode * inode) mark_buffer_dirty(sbi->s_lvidbh); } - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); } @@ -83,7 +83,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) return NULL; } - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); UDF_I_UNIQUE(inode) = 0; UDF_I_LENEXTENTS(inode) = 0; UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; @@ -148,7 +148,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); insert_inode_hash(inode); mark_inode_dirty(inode); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); if (DQUOT_ALLOC_INODE(inode)) { diff --git a/fs/udf/super.c b/fs/udf/super.c index 368d8f81fe54..9303c50c5d55 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1515,7 +1515,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) sb->s_fs_info = sbi; memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); - init_MUTEX(&sbi->s_alloc_sem); + mutex_init(&sbi->s_alloc_mutex); if (!udf_parse_options((char *)options, &uopt)) goto error_out; diff --git a/fs/ufs/file.c b/fs/ufs/file.c index ed69d7fe1b5d..62ad481810ef 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c @@ -23,18 +23,8 @@ * ext2 fs regular file handling primitives */ -#include <asm/uaccess.h> -#include <asm/system.h> - -#include <linux/errno.h> #include <linux/fs.h> #include <linux/ufs_fs.h> -#include <linux/fcntl.h> -#include <linux/time.h> -#include <linux/stat.h> -#include <linux/mm.h> -#include <linux/pagemap.h> -#include <linux/smp_lock.h> /* * We have mostly NULL's here: the current defaults are ok for diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 2958cde7d3d6..8d5f98a01c74 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -43,18 +43,6 @@ #define UFSD(x) #endif -static inline void ufs_inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void ufs_dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ufs_add_link(dentry, inode); @@ -62,7 +50,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - ufs_dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -173,7 +161,7 @@ out: return err; out_fail: - ufs_dec_count(inode); + inode_dec_link_count(inode); iput(inode); goto out; } @@ -191,7 +179,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, } inode->i_ctime = CURRENT_TIME_SEC; - ufs_inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); error = ufs_add_nondir(dentry, inode); @@ -208,7 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) goto out; lock_kernel(); - ufs_inc_count(dir); + inode_inc_link_count(dir); inode = ufs_new_inode(dir, S_IFDIR|mode); err = PTR_ERR(inode); @@ -218,7 +206,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) inode->i_op = &ufs_dir_inode_operations; inode->i_fop = &ufs_dir_operations; - ufs_inc_count(inode); + inode_inc_link_count(inode); err = ufs_make_empty(inode, dir); if (err) @@ -234,11 +222,11 @@ out: return err; out_fail: - ufs_dec_count(inode); - ufs_dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput (inode); out_dir: - ufs_dec_count(dir); + inode_dec_link_count(dir); unlock_kernel(); goto out; } @@ -260,7 +248,7 @@ static int ufs_unlink(struct inode * dir, struct dentry *dentry) goto out; inode->i_ctime = dir->i_ctime; - ufs_dec_count(inode); + inode_dec_link_count(inode); err = 0; out: unlock_kernel(); @@ -277,8 +265,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry) err = ufs_unlink(dir, dentry); if (!err) { inode->i_size = 0; - ufs_dec_count(inode); - ufs_dec_count(dir); + inode_dec_link_count(inode); + inode_dec_link_count(dir); } } unlock_kernel(); @@ -319,35 +307,35 @@ static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ufs_find_entry (new_dentry, &new_bh); if (!new_de) goto out_dir; - ufs_inc_count(old_inode); + inode_inc_link_count(old_inode); ufs_set_link(new_dir, new_de, new_bh, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - ufs_dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= UFS_LINK_MAX) goto out_dir; } - ufs_inc_count(old_inode); + inode_inc_link_count(old_inode); err = ufs_add_link(new_dentry, old_inode); if (err) { - ufs_dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - ufs_inc_count(new_dir); + inode_inc_link_count(new_dir); } ufs_delete_entry (old_dir, old_de, old_bh); - ufs_dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { ufs_set_link(old_inode, dir_de, dir_bh, new_dir); - ufs_dec_count(old_dir); + inode_dec_link_count(old_dir); } unlock_kernel(); return 0; diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6 index 97bd4743b461..5d73eaa1971f 100644 --- a/fs/xfs/Makefile-linux-2.6 +++ b/fs/xfs/Makefile-linux-2.6 @@ -1,33 +1,19 @@ # -# Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. +# Copyright (c) 2000-2005 Silicon Graphics, Inc. +# All Rights Reserved. # -# This program is free software; you can redistribute it and/or modify it -# under the terms of version 2 of the GNU General Public License as +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as # published by the Free Software Foundation. # -# This program is distributed in the hope that it would be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# This program is distributed in the hope that it would be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. # -# Further, this software is distributed without any warranty that it is -# free of the rightful claim of any third person regarding infringement -# or the like. Any license provided herein, whether implied or -# otherwise, applies only to this software file. Patent licenses, if -# any, provided herein do not apply to combinations of this program with -# other software, or any other product whatsoever. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write the Free Software Foundation, Inc., 59 -# Temple Place - Suite 330, Boston MA 02111-1307, USA. -# -# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -# Mountain View, CA 94043, or: -# -# http://www.sgi.com -# -# For further information regarding this notice, see: -# -# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# You should have received a copy of the GNU General Public License +# along with this program; if not, write the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char @@ -36,7 +22,7 @@ XFS_LINUX := linux-2.6 ifeq ($(CONFIG_XFS_DEBUG),y) EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG - EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING + EXTRA_CFLAGS += -DXFS_BUF_LOCK_TRACKING endif ifeq ($(CONFIG_XFS_TRACE),y) EXTRA_CFLAGS += -DXFS_ALLOC_TRACE @@ -50,7 +36,7 @@ ifeq ($(CONFIG_XFS_TRACE),y) EXTRA_CFLAGS += -DXFS_ILOCK_TRACE EXTRA_CFLAGS += -DXFS_LOG_TRACE EXTRA_CFLAGS += -DXFS_RW_TRACE - EXTRA_CFLAGS += -DPAGEBUF_TRACE + EXTRA_CFLAGS += -DXFS_BUF_TRACE EXTRA_CFLAGS += -DXFS_VNODE_TRACE endif diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index c64a29cdfff3..f0268a84e6fd 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h @@ -23,17 +23,8 @@ #include <linux/mm.h> /* - * memory management routines + * Process flags handling */ -#define KM_SLEEP 0x0001u -#define KM_NOSLEEP 0x0002u -#define KM_NOFS 0x0004u -#define KM_MAYFAIL 0x0008u - -#define kmem_zone kmem_cache -#define kmem_zone_t struct kmem_cache - -typedef unsigned long xfs_pflags_t; #define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO) #define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS) @@ -67,74 +58,102 @@ typedef unsigned long xfs_pflags_t; *(NSTATEP) = *(OSTATEP); \ } while (0) -static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags) +/* + * General memory allocation interfaces + */ + +#define KM_SLEEP 0x0001u +#define KM_NOSLEEP 0x0002u +#define KM_NOFS 0x0004u +#define KM_MAYFAIL 0x0008u + +/* + * We use a special process flag to avoid recursive callbacks into + * the filesystem during transactions. We will also issue our own + * warnings, so we explicitly skip any generic ones (silly of us). + */ +static inline gfp_t +kmem_flags_convert(unsigned int __nocast flags) { - gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */ + gfp_t lflags; -#ifdef DEBUG - if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) { - printk(KERN_WARNING - "XFS: memory allocation with wrong flags (%x)\n", flags); - BUG(); - } -#endif + BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); if (flags & KM_NOSLEEP) { - lflags |= GFP_ATOMIC; + lflags = GFP_ATOMIC | __GFP_NOWARN; } else { - lflags |= GFP_KERNEL; - - /* avoid recusive callbacks to filesystem during transactions */ + lflags = GFP_KERNEL | __GFP_NOWARN; if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS)) lflags &= ~__GFP_FS; } - - return lflags; + return lflags; } -static __inline kmem_zone_t * +extern void *kmem_alloc(size_t, unsigned int __nocast); +extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); +extern void *kmem_zalloc(size_t, unsigned int __nocast); +extern void kmem_free(void *, size_t); + +/* + * Zone interfaces + */ + +#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN +#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT +#define KM_ZONE_SPREAD 0 + +#define kmem_zone kmem_cache +#define kmem_zone_t struct kmem_cache + +static inline kmem_zone_t * kmem_zone_init(int size, char *zone_name) { return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); } -static __inline void +static inline kmem_zone_t * +kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, + void (*construct)(void *, kmem_zone_t *, unsigned long)) +{ + return kmem_cache_create(zone_name, size, 0, flags, construct, NULL); +} + +static inline void kmem_zone_free(kmem_zone_t *zone, void *ptr) { kmem_cache_free(zone, ptr); } -static __inline void +static inline void kmem_zone_destroy(kmem_zone_t *zone) { if (zone && kmem_cache_destroy(zone)) BUG(); } -extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); +extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); -extern void *kmem_alloc(size_t, unsigned int __nocast); -extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); -extern void *kmem_zalloc(size_t, unsigned int __nocast); -extern void kmem_free(void *, size_t); +/* + * Low memory cache shrinkers + */ typedef struct shrinker *kmem_shaker_t; typedef int (*kmem_shake_func_t)(int, gfp_t); -static __inline kmem_shaker_t +static inline kmem_shaker_t kmem_shake_register(kmem_shake_func_t sfunc) { return set_shrinker(DEFAULT_SEEKS, sfunc); } -static __inline void +static inline void kmem_shake_deregister(kmem_shaker_t shrinker) { remove_shrinker(shrinker); } -static __inline int +static inline int kmem_shake_allow(gfp_t gfp_mask) { return (gfp_mask & __GFP_WAIT); diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 74d8be87f983..97fc056130eb 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -43,7 +43,29 @@ #include <linux/pagevec.h> #include <linux/writeback.h> -STATIC void xfs_count_page_state(struct page *, int *, int *, int *); +STATIC void +xfs_count_page_state( + struct page *page, + int *delalloc, + int *unmapped, + int *unwritten) +{ + struct buffer_head *bh, *head; + + *delalloc = *unmapped = *unwritten = 0; + + bh = head = page_buffers(page); + do { + if (buffer_uptodate(bh) && !buffer_mapped(bh)) + (*unmapped) = 1; + else if (buffer_unwritten(bh) && !buffer_delay(bh)) + clear_buffer_unwritten(bh); + else if (buffer_unwritten(bh)) + (*unwritten) = 1; + else if (buffer_delay(bh)) + (*delalloc) = 1; + } while ((bh = bh->b_this_page) != head); +} #if defined(XFS_RW_TRACE) void @@ -54,7 +76,7 @@ xfs_page_trace( int mask) { xfs_inode_t *ip; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); loff_t isize = i_size_read(inode); loff_t offset = page_offset(page); int delalloc = -1, unmapped = -1, unwritten = -1; @@ -81,7 +103,7 @@ xfs_page_trace( (void *)((unsigned long)delalloc), (void *)((unsigned long)unmapped), (void *)((unsigned long)unwritten), - (void *)NULL, + (void *)((unsigned long)current_pid()), (void *)NULL); } #else @@ -192,7 +214,7 @@ xfs_alloc_ioend( ioend->io_uptodate = 1; /* cleared if any I/O fails */ ioend->io_list = NULL; ioend->io_type = type; - ioend->io_vnode = LINVFS_GET_VP(inode); + ioend->io_vnode = vn_from_inode(inode); ioend->io_buffer_head = NULL; ioend->io_buffer_tail = NULL; atomic_inc(&ioend->io_vnode->v_iocount); @@ -217,7 +239,7 @@ xfs_map_blocks( xfs_iomap_t *mapp, int flags) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error, nmaps = 1; VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error); @@ -462,28 +484,37 @@ xfs_add_to_ioend( } STATIC void +xfs_map_buffer( + struct buffer_head *bh, + xfs_iomap_t *mp, + xfs_off_t offset, + uint block_bits) +{ + sector_t bn; + + ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL); + + bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) + + ((offset - mp->iomap_offset) >> block_bits); + + ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME)); + + bh->b_blocknr = bn; + set_buffer_mapped(bh); +} + +STATIC void xfs_map_at_offset( struct buffer_head *bh, loff_t offset, int block_bits, xfs_iomap_t *iomapp) { - xfs_daddr_t bn; - int sector_shift; - ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); - ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); - - sector_shift = block_bits - BBSHIFT; - bn = (iomapp->iomap_bn >> sector_shift) + - ((offset - iomapp->iomap_offset) >> block_bits); - - ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME)); - ASSERT((bn << sector_shift) >= iomapp->iomap_bn); lock_buffer(bh); - bh->b_blocknr = bn; + xfs_map_buffer(bh, iomapp, offset, block_bits); bh->b_bdev = iomapp->iomap_target->bt_bdev; set_buffer_mapped(bh); clear_buffer_delay(bh); @@ -616,7 +647,7 @@ xfs_is_delayed_page( acceptable = (type == IOMAP_UNWRITTEN); else if (buffer_delay(bh)) acceptable = (type == IOMAP_DELAY); - else if (buffer_mapped(bh)) + else if (buffer_dirty(bh) && buffer_mapped(bh)) acceptable = (type == 0); else break; @@ -1040,8 +1071,159 @@ error: return err; } +/* + * writepage: Called from one of two places: + * + * 1. we are flushing a delalloc buffer head. + * + * 2. we are writing out a dirty page. Typically the page dirty + * state is cleared before we get here. In this case is it + * conceivable we have no buffer heads. + * + * For delalloc space on the page we need to allocate space and + * flush it. For unmapped buffer heads on the page we should + * allocate space if the page is uptodate. For any other dirty + * buffer heads on the page we should flush them. + * + * If we detect that a transaction would be required to flush + * the page, we have to check the process flags first, if we + * are already in a transaction or disk I/O during allocations + * is off, we need to fail the writepage and redirty the page. + */ + +STATIC int +xfs_vm_writepage( + struct page *page, + struct writeback_control *wbc) +{ + int error; + int need_trans; + int delalloc, unmapped, unwritten; + struct inode *inode = page->mapping->host; + + xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); + + /* + * We need a transaction if: + * 1. There are delalloc buffers on the page + * 2. The page is uptodate and we have unmapped buffers + * 3. The page is uptodate and we have no buffers + * 4. There are unwritten buffers on the page + */ + + if (!page_has_buffers(page)) { + unmapped = 1; + need_trans = 1; + } else { + xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!PageUptodate(page)) + unmapped = 0; + need_trans = delalloc + unmapped + unwritten; + } + + /* + * If we need a transaction and the process flags say + * we are already in a transaction, or no IO is allowed + * then mark the page dirty again and leave the page + * as is. + */ + if (PFLAGS_TEST_FSTRANS() && need_trans) + goto out_fail; + + /* + * Delay hooking up buffer heads until we have + * made our go/no-go decision. + */ + if (!page_has_buffers(page)) + create_empty_buffers(page, 1 << inode->i_blkbits, 0); + + /* + * Convert delayed allocate, unwritten or unmapped space + * to real space and flush out to disk. + */ + error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); + if (error == -EAGAIN) + goto out_fail; + if (unlikely(error < 0)) + goto out_unlock; + + return 0; + +out_fail: + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; +out_unlock: + unlock_page(page); + return error; +} + +/* + * Called to move a page into cleanable state - and from there + * to be released. Possibly the page is already clean. We always + * have buffer heads in this call. + * + * Returns 0 if the page is ok to release, 1 otherwise. + * + * Possible scenarios are: + * + * 1. We are being called to release a page which has been written + * to via regular I/O. buffer heads will be dirty and possibly + * delalloc. If no delalloc buffer heads in this case then we + * can just return zero. + * + * 2. We are called to release a page which has been written via + * mmap, all we need to do is ensure there is no delalloc + * state in the buffer heads, if not we can let the caller + * free them and we should come back later via writepage. + */ STATIC int -__linvfs_get_block( +xfs_vm_releasepage( + struct page *page, + gfp_t gfp_mask) +{ + struct inode *inode = page->mapping->host; + int dirty, delalloc, unmapped, unwritten; + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = 1, + }; + + xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); + + if (!page_has_buffers(page)) + return 0; + + xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!delalloc && !unwritten) + goto free_buffers; + + if (!(gfp_mask & __GFP_FS)) + return 0; + + /* If we are already inside a transaction or the thread cannot + * do I/O, we cannot release this page. + */ + if (PFLAGS_TEST_FSTRANS()) + return 0; + + /* + * Convert delalloc space to real space, do not flush the + * data out to disk, that will be done by the caller. + * Never need to allocate space here - we will always + * come back to writepage in that case. + */ + dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); + if (dirty == 0 && !unwritten) + goto free_buffers; + return 0; + +free_buffers: + return try_to_free_buffers(page); +} + +STATIC int +__xfs_get_block( struct inode *inode, sector_t iblock, unsigned long blocks, @@ -1050,7 +1232,7 @@ __linvfs_get_block( int direct, bmapi_flags_t flags) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); xfs_iomap_t iomap; xfs_off_t offset; ssize_t size; @@ -1073,21 +1255,13 @@ __linvfs_get_block( return 0; if (iomap.iomap_bn != IOMAP_DADDR_NULL) { - xfs_daddr_t bn; - xfs_off_t delta; - - /* For unwritten extents do not report a disk address on + /* + * For unwritten extents do not report a disk address on * the read case (treat as if we're reading into a hole). */ if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) { - delta = offset - iomap.iomap_offset; - delta >>= inode->i_blkbits; - - bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT); - bn += delta; - BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME)); - bh_result->b_blocknr = bn; - set_buffer_mapped(bh_result); + xfs_map_buffer(bh_result, &iomap, offset, + inode->i_blkbits); } if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) { if (direct) @@ -1130,30 +1304,30 @@ __linvfs_get_block( } int -linvfs_get_block( +xfs_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - return __linvfs_get_block(inode, iblock, 0, bh_result, + return __xfs_get_block(inode, iblock, 0, bh_result, create, 0, BMAPI_WRITE); } STATIC int -linvfs_get_blocks_direct( +xfs_get_blocks_direct( struct inode *inode, sector_t iblock, unsigned long max_blocks, struct buffer_head *bh_result, int create) { - return __linvfs_get_block(inode, iblock, max_blocks, bh_result, + return __xfs_get_block(inode, iblock, max_blocks, bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); } STATIC void -linvfs_end_io_direct( +xfs_end_io_direct( struct kiocb *iocb, loff_t offset, ssize_t size, @@ -1191,7 +1365,7 @@ linvfs_end_io_direct( } STATIC ssize_t -linvfs_direct_IO( +xfs_vm_direct_IO( int rw, struct kiocb *iocb, const struct iovec *iov, @@ -1200,7 +1374,7 @@ linvfs_direct_IO( { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); xfs_iomap_t iomap; int maps = 1; int error; @@ -1215,164 +1389,61 @@ linvfs_direct_IO( ret = blockdev_direct_IO_own_locking(rw, iocb, inode, iomap.iomap_target->bt_bdev, iov, offset, nr_segs, - linvfs_get_blocks_direct, - linvfs_end_io_direct); + xfs_get_blocks_direct, + xfs_end_io_direct); if (unlikely(ret <= 0 && iocb->private)) xfs_destroy_ioend(iocb->private); return ret; } +STATIC int +xfs_vm_prepare_write( + struct file *file, + struct page *page, + unsigned int from, + unsigned int to) +{ + return block_prepare_write(page, from, to, xfs_get_block); +} STATIC sector_t -linvfs_bmap( +xfs_vm_bmap( struct address_space *mapping, sector_t block) { struct inode *inode = (struct inode *)mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error; - vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); VOP_RWLOCK(vp, VRWLOCK_READ); VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); VOP_RWUNLOCK(vp, VRWLOCK_READ); - return generic_block_bmap(mapping, block, linvfs_get_block); + return generic_block_bmap(mapping, block, xfs_get_block); } STATIC int -linvfs_readpage( +xfs_vm_readpage( struct file *unused, struct page *page) { - return mpage_readpage(page, linvfs_get_block); + return mpage_readpage(page, xfs_get_block); } STATIC int -linvfs_readpages( +xfs_vm_readpages( struct file *unused, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { - return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block); -} - -STATIC void -xfs_count_page_state( - struct page *page, - int *delalloc, - int *unmapped, - int *unwritten) -{ - struct buffer_head *bh, *head; - - *delalloc = *unmapped = *unwritten = 0; - - bh = head = page_buffers(page); - do { - if (buffer_uptodate(bh) && !buffer_mapped(bh)) - (*unmapped) = 1; - else if (buffer_unwritten(bh) && !buffer_delay(bh)) - clear_buffer_unwritten(bh); - else if (buffer_unwritten(bh)) - (*unwritten) = 1; - else if (buffer_delay(bh)) - (*delalloc) = 1; - } while ((bh = bh->b_this_page) != head); + return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); } - -/* - * writepage: Called from one of two places: - * - * 1. we are flushing a delalloc buffer head. - * - * 2. we are writing out a dirty page. Typically the page dirty - * state is cleared before we get here. In this case is it - * conceivable we have no buffer heads. - * - * For delalloc space on the page we need to allocate space and - * flush it. For unmapped buffer heads on the page we should - * allocate space if the page is uptodate. For any other dirty - * buffer heads on the page we should flush them. - * - * If we detect that a transaction would be required to flush - * the page, we have to check the process flags first, if we - * are already in a transaction or disk I/O during allocations - * is off, we need to fail the writepage and redirty the page. - */ - STATIC int -linvfs_writepage( - struct page *page, - struct writeback_control *wbc) -{ - int error; - int need_trans; - int delalloc, unmapped, unwritten; - struct inode *inode = page->mapping->host; - - xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); - - /* - * We need a transaction if: - * 1. There are delalloc buffers on the page - * 2. The page is uptodate and we have unmapped buffers - * 3. The page is uptodate and we have no buffers - * 4. There are unwritten buffers on the page - */ - - if (!page_has_buffers(page)) { - unmapped = 1; - need_trans = 1; - } else { - xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - if (!PageUptodate(page)) - unmapped = 0; - need_trans = delalloc + unmapped + unwritten; - } - - /* - * If we need a transaction and the process flags say - * we are already in a transaction, or no IO is allowed - * then mark the page dirty again and leave the page - * as is. - */ - if (PFLAGS_TEST_FSTRANS() && need_trans) - goto out_fail; - - /* - * Delay hooking up buffer heads until we have - * made our go/no-go decision. - */ - if (!page_has_buffers(page)) - create_empty_buffers(page, 1 << inode->i_blkbits, 0); - - /* - * Convert delayed allocate, unwritten or unmapped space - * to real space and flush out to disk. - */ - error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); - if (error == -EAGAIN) - goto out_fail; - if (unlikely(error < 0)) - goto out_unlock; - - return 0; - -out_fail: - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; -out_unlock: - unlock_page(page); - return error; -} - -STATIC int -linvfs_invalidate_page( +xfs_vm_invalidatepage( struct page *page, unsigned long offset) { @@ -1381,87 +1452,16 @@ linvfs_invalidate_page( return block_invalidatepage(page, offset); } -/* - * Called to move a page into cleanable state - and from there - * to be released. Possibly the page is already clean. We always - * have buffer heads in this call. - * - * Returns 0 if the page is ok to release, 1 otherwise. - * - * Possible scenarios are: - * - * 1. We are being called to release a page which has been written - * to via regular I/O. buffer heads will be dirty and possibly - * delalloc. If no delalloc buffer heads in this case then we - * can just return zero. - * - * 2. We are called to release a page which has been written via - * mmap, all we need to do is ensure there is no delalloc - * state in the buffer heads, if not we can let the caller - * free them and we should come back later via writepage. - */ -STATIC int -linvfs_release_page( - struct page *page, - gfp_t gfp_mask) -{ - struct inode *inode = page->mapping->host; - int dirty, delalloc, unmapped, unwritten; - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = 1, - }; - - xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); - - xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - if (!delalloc && !unwritten) - goto free_buffers; - - if (!(gfp_mask & __GFP_FS)) - return 0; - - /* If we are already inside a transaction or the thread cannot - * do I/O, we cannot release this page. - */ - if (PFLAGS_TEST_FSTRANS()) - return 0; - - /* - * Convert delalloc space to real space, do not flush the - * data out to disk, that will be done by the caller. - * Never need to allocate space here - we will always - * come back to writepage in that case. - */ - dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); - if (dirty == 0 && !unwritten) - goto free_buffers; - return 0; - -free_buffers: - return try_to_free_buffers(page); -} - -STATIC int -linvfs_prepare_write( - struct file *file, - struct page *page, - unsigned int from, - unsigned int to) -{ - return block_prepare_write(page, from, to, linvfs_get_block); -} - -struct address_space_operations linvfs_aops = { - .readpage = linvfs_readpage, - .readpages = linvfs_readpages, - .writepage = linvfs_writepage, +struct address_space_operations xfs_address_space_operations = { + .readpage = xfs_vm_readpage, + .readpages = xfs_vm_readpages, + .writepage = xfs_vm_writepage, .sync_page = block_sync_page, - .releasepage = linvfs_release_page, - .invalidatepage = linvfs_invalidate_page, - .prepare_write = linvfs_prepare_write, + .releasepage = xfs_vm_releasepage, + .invalidatepage = xfs_vm_invalidatepage, + .prepare_write = xfs_vm_prepare_write, .commit_write = generic_commit_write, - .bmap = linvfs_bmap, - .direct_IO = linvfs_direct_IO, + .bmap = xfs_vm_bmap, + .direct_IO = xfs_vm_direct_IO, .migratepage = buffer_migrate_page, }; diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index 55339dd5a30d..795699f121d2 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h @@ -40,7 +40,7 @@ typedef struct xfs_ioend { struct work_struct io_work; /* xfsdatad work queue */ } xfs_ioend_t; -extern struct address_space_operations linvfs_aops; -extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int); +extern struct address_space_operations xfs_address_space_operations; +extern int xfs_get_block(struct inode *, sector_t, struct buffer_head *, int); #endif /* __XFS_IOPS_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index bfb4f2917bb6..9fb0312665ca 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -29,6 +29,7 @@ #include <linux/blkdev.h> #include <linux/hash.h> #include <linux/kthread.h> +#include <linux/migrate.h> #include "xfs_linux.h" STATIC kmem_zone_t *xfs_buf_zone; @@ -1805,13 +1806,12 @@ xfs_flush_buftarg( int __init xfs_buf_init(void) { - int error = -ENOMEM; - #ifdef XFS_BUF_TRACE xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); #endif - xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); + xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", + KM_ZONE_HWALIGN, NULL); if (!xfs_buf_zone) goto out_free_trace_buf; @@ -1839,7 +1839,7 @@ xfs_buf_init(void) #ifdef XFS_BUF_TRACE ktrace_free(xfs_buf_trace_buf); #endif - return error; + return -ENOMEM; } void diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index 80eb249f2fa0..b768ea910bbe 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c @@ -25,6 +25,8 @@ #include "xfs_mount.h" #include "xfs_export.h" +STATIC struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, }; + /* * XFS encodes and decodes the fileid portion of NFS filehandles * itself instead of letting the generic NFS code do it. This @@ -37,7 +39,7 @@ */ STATIC struct dentry * -linvfs_decode_fh( +xfs_fs_decode_fh( struct super_block *sb, __u32 *fh, int fh_len, @@ -78,12 +80,12 @@ linvfs_decode_fh( } fh = (__u32 *)&ifid; - return find_exported_dentry(sb, fh, parent, acceptable, context); + return sb->s_export_op->find_exported_dentry(sb, fh, parent, acceptable, context); } STATIC int -linvfs_encode_fh( +xfs_fs_encode_fh( struct dentry *dentry, __u32 *fh, int *max_len, @@ -95,7 +97,7 @@ linvfs_encode_fh( int len; int is64 = 0; #if XFS_BIG_INUMS - vfs_t *vfs = LINVFS_GET_VFS(inode->i_sb); + vfs_t *vfs = vfs_from_sb(inode->i_sb); if (!(vfs->vfs_flag & VFS_32BITINODES)) { /* filesystem may contain 64bit inode numbers */ @@ -130,21 +132,21 @@ linvfs_encode_fh( } STATIC struct dentry * -linvfs_get_dentry( +xfs_fs_get_dentry( struct super_block *sb, void *data) { vnode_t *vp; struct inode *inode; struct dentry *result; - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; VFS_VGET(vfsp, &vp, (fid_t *)data, error); if (error || vp == NULL) return ERR_PTR(-ESTALE) ; - inode = LINVFS_GET_IP(vp); + inode = vn_to_inode(vp); result = d_alloc_anon(inode); if (!result) { iput(inode); @@ -154,25 +156,20 @@ linvfs_get_dentry( } STATIC struct dentry * -linvfs_get_parent( +xfs_fs_get_parent( struct dentry *child) { int error; vnode_t *vp, *cvp; struct dentry *parent; - struct dentry dotdot; - - dotdot.d_name.name = ".."; - dotdot.d_name.len = 2; - dotdot.d_inode = NULL; cvp = NULL; - vp = LINVFS_GET_VP(child->d_inode); + vp = vn_from_inode(child->d_inode); VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error); if (unlikely(error)) return ERR_PTR(-error); - parent = d_alloc_anon(LINVFS_GET_IP(cvp)); + parent = d_alloc_anon(vn_to_inode(cvp)); if (unlikely(!parent)) { VN_RELE(cvp); return ERR_PTR(-ENOMEM); @@ -180,9 +177,9 @@ linvfs_get_parent( return parent; } -struct export_operations linvfs_export_ops = { - .decode_fh = linvfs_decode_fh, - .encode_fh = linvfs_encode_fh, - .get_parent = linvfs_get_parent, - .get_dentry = linvfs_get_dentry, +struct export_operations xfs_export_operations = { + .decode_fh = xfs_fs_decode_fh, + .encode_fh = xfs_fs_encode_fh, + .get_parent = xfs_fs_get_parent, + .get_dentry = xfs_fs_get_dentry, }; diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index ced4404339c7..185567a6a561 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -43,13 +43,13 @@ #include <linux/dcache.h> #include <linux/smp_lock.h> -static struct vm_operations_struct linvfs_file_vm_ops; +static struct vm_operations_struct xfs_file_vm_ops; #ifdef CONFIG_XFS_DMAPI -static struct vm_operations_struct linvfs_dmapi_file_vm_ops; +static struct vm_operations_struct xfs_dmapi_file_vm_ops; #endif STATIC inline ssize_t -__linvfs_read( +__xfs_file_read( struct kiocb *iocb, char __user *buf, int ioflags, @@ -58,7 +58,7 @@ __linvfs_read( { struct iovec iov = {buf, count}; struct file *file = iocb->ki_filp; - vnode_t *vp = LINVFS_GET_VP(file->f_dentry->d_inode); + vnode_t *vp = vn_from_inode(file->f_dentry->d_inode); ssize_t rval; BUG_ON(iocb->ki_pos != pos); @@ -71,28 +71,28 @@ __linvfs_read( STATIC ssize_t -linvfs_aio_read( +xfs_file_aio_read( struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) { - return __linvfs_read(iocb, buf, IO_ISAIO, count, pos); + return __xfs_file_read(iocb, buf, IO_ISAIO, count, pos); } STATIC ssize_t -linvfs_aio_read_invis( +xfs_file_aio_read_invis( struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) { - return __linvfs_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); + return __xfs_file_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); } STATIC inline ssize_t -__linvfs_write( +__xfs_file_write( struct kiocb *iocb, const char __user *buf, int ioflags, @@ -102,7 +102,7 @@ __linvfs_write( struct iovec iov = {(void __user *)buf, count}; struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); ssize_t rval; BUG_ON(iocb->ki_pos != pos); @@ -115,28 +115,28 @@ __linvfs_write( STATIC ssize_t -linvfs_aio_write( +xfs_file_aio_write( struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) { - return __linvfs_write(iocb, buf, IO_ISAIO, count, pos); + return __xfs_file_write(iocb, buf, IO_ISAIO, count, pos); } STATIC ssize_t -linvfs_aio_write_invis( +xfs_file_aio_write_invis( struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) { - return __linvfs_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); + return __xfs_file_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); } STATIC inline ssize_t -__linvfs_readv( +__xfs_file_readv( struct file *file, const struct iovec *iov, int ioflags, @@ -144,8 +144,8 @@ __linvfs_readv( loff_t *ppos) { struct inode *inode = file->f_mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); - struct kiocb kiocb; + vnode_t *vp = vn_from_inode(inode); + struct kiocb kiocb; ssize_t rval; init_sync_kiocb(&kiocb, file); @@ -160,28 +160,28 @@ __linvfs_readv( } STATIC ssize_t -linvfs_readv( +xfs_file_readv( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __linvfs_readv(file, iov, 0, nr_segs, ppos); + return __xfs_file_readv(file, iov, 0, nr_segs, ppos); } STATIC ssize_t -linvfs_readv_invis( +xfs_file_readv_invis( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __linvfs_readv(file, iov, IO_INVIS, nr_segs, ppos); + return __xfs_file_readv(file, iov, IO_INVIS, nr_segs, ppos); } STATIC inline ssize_t -__linvfs_writev( +__xfs_file_writev( struct file *file, const struct iovec *iov, int ioflags, @@ -189,8 +189,8 @@ __linvfs_writev( loff_t *ppos) { struct inode *inode = file->f_mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); - struct kiocb kiocb; + vnode_t *vp = vn_from_inode(inode); + struct kiocb kiocb; ssize_t rval; init_sync_kiocb(&kiocb, file); @@ -206,34 +206,34 @@ __linvfs_writev( STATIC ssize_t -linvfs_writev( +xfs_file_writev( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __linvfs_writev(file, iov, 0, nr_segs, ppos); + return __xfs_file_writev(file, iov, 0, nr_segs, ppos); } STATIC ssize_t -linvfs_writev_invis( +xfs_file_writev_invis( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __linvfs_writev(file, iov, IO_INVIS, nr_segs, ppos); + return __xfs_file_writev(file, iov, IO_INVIS, nr_segs, ppos); } STATIC ssize_t -linvfs_sendfile( +xfs_file_sendfile( struct file *filp, loff_t *ppos, size_t count, read_actor_t actor, void *target) { - vnode_t *vp = LINVFS_GET_VP(filp->f_dentry->d_inode); + vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode); ssize_t rval; VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval); @@ -242,11 +242,11 @@ linvfs_sendfile( STATIC int -linvfs_open( +xfs_file_open( struct inode *inode, struct file *filp) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error; if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) @@ -259,11 +259,11 @@ linvfs_open( STATIC int -linvfs_release( +xfs_file_release( struct inode *inode, struct file *filp) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error = 0; if (vp) @@ -273,13 +273,13 @@ linvfs_release( STATIC int -linvfs_fsync( +xfs_file_fsync( struct file *filp, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error; int flags = FSYNC_WAIT; @@ -292,7 +292,7 @@ linvfs_fsync( } /* - * linvfs_readdir maps to VOP_READDIR(). + * xfs_file_readdir maps to VOP_READDIR(). * We need to build a uio, cred, ... */ @@ -301,13 +301,13 @@ linvfs_fsync( #ifdef CONFIG_XFS_DMAPI STATIC struct page * -linvfs_filemap_nopage( +xfs_vm_nopage( struct vm_area_struct *area, unsigned long address, int *type) { struct inode *inode = area->vm_file->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); int error; @@ -324,7 +324,7 @@ linvfs_filemap_nopage( STATIC int -linvfs_readdir( +xfs_file_readdir( struct file *filp, void *dirent, filldir_t filldir) @@ -340,7 +340,7 @@ linvfs_readdir( xfs_off_t start_offset, curr_offset; xfs_dirent_t *dbp = NULL; - vp = LINVFS_GET_VP(filp->f_dentry->d_inode); + vp = vn_from_inode(filp->f_dentry->d_inode); ASSERT(vp); /* Try fairly hard to get memory */ @@ -404,39 +404,40 @@ done: STATIC int -linvfs_file_mmap( +xfs_file_mmap( struct file *filp, struct vm_area_struct *vma) { struct inode *ip = filp->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(ip); - vattr_t va = { .va_mask = XFS_AT_UPDATIME }; + vnode_t *vp = vn_from_inode(ip); + vattr_t vattr; int error; - vma->vm_ops = &linvfs_file_vm_ops; + vma->vm_ops = &xfs_file_vm_ops; #ifdef CONFIG_XFS_DMAPI if (vp->v_vfsp->vfs_flag & VFS_DMI) { - vma->vm_ops = &linvfs_dmapi_file_vm_ops; + vma->vm_ops = &xfs_dmapi_file_vm_ops; } #endif /* CONFIG_XFS_DMAPI */ - VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); - if (!error) - vn_revalidate(vp); /* update Linux inode flags */ + vattr.va_mask = XFS_AT_UPDATIME; + VOP_SETATTR(vp, &vattr, XFS_AT_UPDATIME, NULL, error); + if (likely(!error)) + __vn_revalidate(vp, &vattr); /* update flags */ return 0; } STATIC long -linvfs_ioctl( +xfs_file_ioctl( struct file *filp, unsigned int cmd, unsigned long arg) { int error; - struct inode *inode = filp->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + struct inode *inode = filp->f_dentry->d_inode; + vnode_t *vp = vn_from_inode(inode); VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error); VMODIFY(vp); @@ -451,14 +452,14 @@ linvfs_ioctl( } STATIC long -linvfs_ioctl_invis( +xfs_file_ioctl_invis( struct file *filp, unsigned int cmd, unsigned long arg) { int error; - struct inode *inode = filp->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + struct inode *inode = filp->f_dentry->d_inode; + vnode_t *vp = vn_from_inode(inode); ASSERT(vp); VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error); @@ -476,11 +477,11 @@ linvfs_ioctl_invis( #ifdef CONFIG_XFS_DMAPI #ifdef HAVE_VMOP_MPROTECT STATIC int -linvfs_mprotect( +xfs_vm_mprotect( struct vm_area_struct *vma, unsigned int newflags) { - vnode_t *vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode); + vnode_t *vp = vn_from_inode(vma->vm_file->f_dentry->d_inode); int error = 0; if (vp->v_vfsp->vfs_flag & VFS_DMI) { @@ -503,10 +504,10 @@ linvfs_mprotect( * it back online. */ STATIC int -linvfs_open_exec( +xfs_file_open_exec( struct inode *inode) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); int error = 0; xfs_inode_t *ip; @@ -527,69 +528,69 @@ open_exec_out: } #endif /* HAVE_FOP_OPEN_EXEC */ -struct file_operations linvfs_file_operations = { +struct file_operations xfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, - .readv = linvfs_readv, - .writev = linvfs_writev, - .aio_read = linvfs_aio_read, - .aio_write = linvfs_aio_write, - .sendfile = linvfs_sendfile, - .unlocked_ioctl = linvfs_ioctl, + .readv = xfs_file_readv, + .writev = xfs_file_writev, + .aio_read = xfs_file_aio_read, + .aio_write = xfs_file_aio_write, + .sendfile = xfs_file_sendfile, + .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = linvfs_compat_ioctl, + .compat_ioctl = xfs_file_compat_ioctl, #endif - .mmap = linvfs_file_mmap, - .open = linvfs_open, - .release = linvfs_release, - .fsync = linvfs_fsync, + .mmap = xfs_file_mmap, + .open = xfs_file_open, + .release = xfs_file_release, + .fsync = xfs_file_fsync, #ifdef HAVE_FOP_OPEN_EXEC - .open_exec = linvfs_open_exec, + .open_exec = xfs_file_open_exec, #endif }; -struct file_operations linvfs_invis_file_operations = { +struct file_operations xfs_invis_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, - .readv = linvfs_readv_invis, - .writev = linvfs_writev_invis, - .aio_read = linvfs_aio_read_invis, - .aio_write = linvfs_aio_write_invis, - .sendfile = linvfs_sendfile, - .unlocked_ioctl = linvfs_ioctl_invis, + .readv = xfs_file_readv_invis, + .writev = xfs_file_writev_invis, + .aio_read = xfs_file_aio_read_invis, + .aio_write = xfs_file_aio_write_invis, + .sendfile = xfs_file_sendfile, + .unlocked_ioctl = xfs_file_ioctl_invis, #ifdef CONFIG_COMPAT - .compat_ioctl = linvfs_compat_invis_ioctl, + .compat_ioctl = xfs_file_compat_invis_ioctl, #endif - .mmap = linvfs_file_mmap, - .open = linvfs_open, - .release = linvfs_release, - .fsync = linvfs_fsync, + .mmap = xfs_file_mmap, + .open = xfs_file_open, + .release = xfs_file_release, + .fsync = xfs_file_fsync, }; -struct file_operations linvfs_dir_operations = { +struct file_operations xfs_dir_file_operations = { .read = generic_read_dir, - .readdir = linvfs_readdir, - .unlocked_ioctl = linvfs_ioctl, + .readdir = xfs_file_readdir, + .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = linvfs_compat_ioctl, + .compat_ioctl = xfs_file_compat_ioctl, #endif - .fsync = linvfs_fsync, + .fsync = xfs_file_fsync, }; -static struct vm_operations_struct linvfs_file_vm_ops = { +static struct vm_operations_struct xfs_file_vm_ops = { .nopage = filemap_nopage, .populate = filemap_populate, }; #ifdef CONFIG_XFS_DMAPI -static struct vm_operations_struct linvfs_dmapi_file_vm_ops = { - .nopage = linvfs_filemap_nopage, +static struct vm_operations_struct xfs_dmapi_file_vm_ops = { + .nopage = xfs_vm_nopage, .populate = filemap_populate, #ifdef HAVE_VMOP_MPROTECT - .mprotect = linvfs_mprotect, + .mprotect = xfs_vm_mprotect, #endif }; #endif /* CONFIG_XFS_DMAPI */ diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index 4fa4b1a5187e..575f2a790f31 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c @@ -57,7 +57,7 @@ fs_tosspages( int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); if (VN_CACHED(vp)) truncate_inode_pages(ip->i_mapping, first); @@ -76,7 +76,7 @@ fs_flushinval_pages( int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); if (VN_CACHED(vp)) { filemap_write_and_wait(ip->i_mapping); @@ -98,7 +98,7 @@ fs_flush_pages( int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); if (VN_CACHED(vp)) { filemap_fdatawrite(ip->i_mapping); diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 4db47790415c..84478491609b 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -138,7 +138,7 @@ xfs_find_handle( } /* we need the vnode */ - vp = LINVFS_GET_VP(inode); + vp = vn_from_inode(inode); /* now we can grab the fsid */ memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); @@ -256,7 +256,7 @@ xfs_vget_fsop_handlereq( } vpp = XFS_ITOV(ip); - inodep = LINVFS_GET_IP(vpp); + inodep = vn_to_inode(vpp); xfs_iunlock(ip, XFS_ILOCK_SHARED); *vp = vpp; @@ -344,7 +344,7 @@ xfs_open_by_handle( return -XFS_ERROR(-PTR_ERR(filp)); } if (inode->i_mode & S_IFREG) - filp->f_op = &linvfs_invis_file_operations; + filp->f_op = &xfs_invis_file_operations; fd_install(new_fd, filp); return new_fd; @@ -715,7 +715,7 @@ xfs_ioctl( xfs_inode_t *ip; xfs_mount_t *mp; - vp = LINVFS_GET_VP(inode); + vp = vn_from_inode(inode); vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address); @@ -1160,105 +1160,129 @@ xfs_ioc_xattr( void __user *arg) { struct fsxattr fa; - vattr_t va; - int error; + struct vattr *vattr; + int error = 0; int attr_flags; unsigned int flags; + vattr = kmalloc(sizeof(*vattr), GFP_KERNEL); + if (unlikely(!vattr)) + return -ENOMEM; + switch (cmd) { case XFS_IOC_FSGETXATTR: { - va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ - XFS_AT_NEXTENTS | XFS_AT_PROJID; - VOP_GETATTR(vp, &va, 0, NULL, error); - if (error) - return -error; + vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ + XFS_AT_NEXTENTS | XFS_AT_PROJID; + VOP_GETATTR(vp, vattr, 0, NULL, error); + if (unlikely(error)) { + error = -error; + break; + } - fa.fsx_xflags = va.va_xflags; - fa.fsx_extsize = va.va_extsize; - fa.fsx_nextents = va.va_nextents; - fa.fsx_projid = va.va_projid; + fa.fsx_xflags = vattr->va_xflags; + fa.fsx_extsize = vattr->va_extsize; + fa.fsx_nextents = vattr->va_nextents; + fa.fsx_projid = vattr->va_projid; - if (copy_to_user(arg, &fa, sizeof(fa))) - return -XFS_ERROR(EFAULT); - return 0; + if (copy_to_user(arg, &fa, sizeof(fa))) { + error = -EFAULT; + break; + } + break; } case XFS_IOC_FSSETXATTR: { - if (copy_from_user(&fa, arg, sizeof(fa))) - return -XFS_ERROR(EFAULT); + if (copy_from_user(&fa, arg, sizeof(fa))) { + error = -EFAULT; + break; + } attr_flags = 0; if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) attr_flags |= ATTR_NONBLOCK; - va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; - va.va_xflags = fa.fsx_xflags; - va.va_extsize = fa.fsx_extsize; - va.va_projid = fa.fsx_projid; + vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; + vattr->va_xflags = fa.fsx_xflags; + vattr->va_extsize = fa.fsx_extsize; + vattr->va_projid = fa.fsx_projid; - VOP_SETATTR(vp, &va, attr_flags, NULL, error); - if (!error) - vn_revalidate(vp); /* update Linux inode flags */ - return -error; + VOP_SETATTR(vp, vattr, attr_flags, NULL, error); + if (likely(!error)) + __vn_revalidate(vp, vattr); /* update flags */ + error = -error; + break; } case XFS_IOC_FSGETXATTRA: { - va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ - XFS_AT_ANEXTENTS | XFS_AT_PROJID; - VOP_GETATTR(vp, &va, 0, NULL, error); - if (error) - return -error; + vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ + XFS_AT_ANEXTENTS | XFS_AT_PROJID; + VOP_GETATTR(vp, vattr, 0, NULL, error); + if (unlikely(error)) { + error = -error; + break; + } - fa.fsx_xflags = va.va_xflags; - fa.fsx_extsize = va.va_extsize; - fa.fsx_nextents = va.va_anextents; - fa.fsx_projid = va.va_projid; + fa.fsx_xflags = vattr->va_xflags; + fa.fsx_extsize = vattr->va_extsize; + fa.fsx_nextents = vattr->va_anextents; + fa.fsx_projid = vattr->va_projid; - if (copy_to_user(arg, &fa, sizeof(fa))) - return -XFS_ERROR(EFAULT); - return 0; + if (copy_to_user(arg, &fa, sizeof(fa))) { + error = -EFAULT; + break; + } + break; } case XFS_IOC_GETXFLAGS: { flags = xfs_di2lxflags(ip->i_d.di_flags); if (copy_to_user(arg, &flags, sizeof(flags))) - return -XFS_ERROR(EFAULT); - return 0; + error = -EFAULT; + break; } case XFS_IOC_SETXFLAGS: { - if (copy_from_user(&flags, arg, sizeof(flags))) - return -XFS_ERROR(EFAULT); + if (copy_from_user(&flags, arg, sizeof(flags))) { + error = -EFAULT; + break; + } if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \ LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \ - LINUX_XFLAG_SYNC)) - return -XFS_ERROR(EOPNOTSUPP); + LINUX_XFLAG_SYNC)) { + error = -EOPNOTSUPP; + break; + } attr_flags = 0; if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) attr_flags |= ATTR_NONBLOCK; - va.va_mask = XFS_AT_XFLAGS; - va.va_xflags = xfs_merge_ioc_xflags(flags, - xfs_ip2xflags(ip)); + vattr->va_mask = XFS_AT_XFLAGS; + vattr->va_xflags = xfs_merge_ioc_xflags(flags, + xfs_ip2xflags(ip)); - VOP_SETATTR(vp, &va, attr_flags, NULL, error); - if (!error) - vn_revalidate(vp); /* update Linux inode flags */ - return -error; + VOP_SETATTR(vp, vattr, attr_flags, NULL, error); + if (likely(!error)) + __vn_revalidate(vp, vattr); /* update flags */ + error = -error; + break; } case XFS_IOC_GETVERSION: { - flags = LINVFS_GET_IP(vp)->i_generation; + flags = vn_to_inode(vp)->i_generation; if (copy_to_user(arg, &flags, sizeof(flags))) - return -XFS_ERROR(EFAULT); - return 0; + error = -EFAULT; + break; } default: - return -ENOTTY; + error = -ENOTTY; + break; } + + kfree(vattr); + return error; } STATIC int diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index a7c9ba1a9f7b..b6321abd9a81 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -107,11 +107,11 @@ xfs_ioctl32_bulkstat( #endif STATIC long -__linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) +xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) { int error; struct inode *inode = f->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_to_inode(inode); switch (cmd) { case XFS_IOC_DIOINFO: @@ -196,19 +196,19 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) } long -linvfs_compat_ioctl( +xfs_file_compat_ioctl( struct file *f, unsigned cmd, unsigned long arg) { - return __linvfs_compat_ioctl(0, f, cmd, arg); + return xfs_compat_ioctl(0, f, cmd, arg); } long -linvfs_compat_invis_ioctl( +xfs_file_compat_invis_ioctl( struct file *f, unsigned cmd, unsigned long arg) { - return __linvfs_compat_ioctl(IO_INVIS, f, cmd, arg); + return xfs_compat_ioctl(IO_INVIS, f, cmd, arg); } diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index 011c273bec50..02de6e62ee37 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -18,7 +18,7 @@ #ifndef __XFS_IOCTL32_H__ #define __XFS_IOCTL32_H__ -extern long linvfs_compat_ioctl(struct file *, unsigned, unsigned long); -extern long linvfs_compat_invis_ioctl(struct file *f, unsigned, unsigned long); +extern long xfs_file_compat_ioctl(struct file *, unsigned, unsigned long); +extern long xfs_file_compat_invis_ioctl(struct file *, unsigned, unsigned long); #endif /* __XFS_IOCTL32_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index d7f6f2d8ac8e..af487437bd7e 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -106,7 +106,7 @@ xfs_ichgtime( xfs_inode_t *ip, int flags) { - struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); timespec_t tv; nanotime(&tv); @@ -198,22 +198,22 @@ xfs_ichgtime_fast( * Pull the link count and size up from the xfs inode to the linux inode */ STATIC void -validate_fields( - struct inode *ip) +xfs_validate_fields( + struct inode *ip, + struct vattr *vattr) { - vnode_t *vp = LINVFS_GET_VP(ip); - vattr_t va; + vnode_t *vp = vn_from_inode(ip); int error; - va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; - VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error); - if (likely(!error)) { - ip->i_nlink = va.va_nlink; - ip->i_blocks = va.va_nblocks; + vattr->va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; + VOP_GETATTR(vp, vattr, ATTR_LAZY, NULL, error); + if (likely(!error)) { + ip->i_nlink = vattr->va_nlink; + ip->i_blocks = vattr->va_nblocks; - /* we're under i_mutex so i_size can't change under us */ - if (i_size_read(ip) != va.va_size) - i_size_write(ip, va.va_size); + /* we're under i_sem so i_size can't change under us */ + if (i_size_read(ip) != vattr->va_size) + i_size_write(ip, vattr->va_size); } } @@ -224,11 +224,11 @@ validate_fields( * inode, of course, such that log replay can't cause these to be lost). */ STATIC int -linvfs_init_security( +xfs_init_security( struct vnode *vp, struct inode *dir) { - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); size_t length; void *value; char *name; @@ -257,46 +257,46 @@ linvfs_init_security( * XXX(hch): nfsd is broken, better fix it instead. */ STATIC inline int -has_fs_struct(struct task_struct *task) +xfs_has_fs_struct(struct task_struct *task) { return (task->fs != init_task.fs); } STATIC inline void -cleanup_inode( +xfs_cleanup_inode( vnode_t *dvp, vnode_t *vp, - struct dentry *dentry, + struct dentry *dentry, int mode) { struct dentry teardown = {}; - int err2; + int error; /* Oh, the horror. - * If we can't add the ACL or we fail in - * linvfs_init_security we must back out. + * If we can't add the ACL or we fail in + * xfs_init_security we must back out. * ENOSPC can hit here, among other things. */ - teardown.d_inode = LINVFS_GET_IP(vp); + teardown.d_inode = vn_to_inode(vp); teardown.d_name = dentry->d_name; if (S_ISDIR(mode)) - VOP_RMDIR(dvp, &teardown, NULL, err2); + VOP_RMDIR(dvp, &teardown, NULL, error); else - VOP_REMOVE(dvp, &teardown, NULL, err2); + VOP_REMOVE(dvp, &teardown, NULL, error); VN_RELE(vp); } STATIC int -linvfs_mknod( +xfs_vn_mknod( struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct inode *ip; - vattr_t va; - vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir); + vattr_t vattr = { 0 }; + vnode_t *vp = NULL, *dvp = vn_from_inode(dir); xfs_acl_t *default_acl = NULL; attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS; int error; @@ -305,99 +305,98 @@ linvfs_mknod( * Irix uses Missed'em'V split, but doesn't want to see * the upper 5 bits of (14bit) major. */ - if (!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff) + if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) return -EINVAL; - if (test_default_acl && test_default_acl(dvp)) { - if (!_ACL_ALLOC(default_acl)) + if (unlikely(test_default_acl && test_default_acl(dvp))) { + if (!_ACL_ALLOC(default_acl)) { return -ENOMEM; + } if (!_ACL_GET_DEFAULT(dvp, default_acl)) { _ACL_FREE(default_acl); default_acl = NULL; } } - if (IS_POSIXACL(dir) && !default_acl && has_fs_struct(current)) + if (IS_POSIXACL(dir) && !default_acl && xfs_has_fs_struct(current)) mode &= ~current->fs->umask; - memset(&va, 0, sizeof(va)); - va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; - va.va_mode = mode; + vattr.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + vattr.va_mode = mode; switch (mode & S_IFMT) { case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: - va.va_rdev = sysv_encode_dev(rdev); - va.va_mask |= XFS_AT_RDEV; + vattr.va_rdev = sysv_encode_dev(rdev); + vattr.va_mask |= XFS_AT_RDEV; /*FALLTHROUGH*/ case S_IFREG: - VOP_CREATE(dvp, dentry, &va, &vp, NULL, error); + VOP_CREATE(dvp, dentry, &vattr, &vp, NULL, error); break; case S_IFDIR: - VOP_MKDIR(dvp, dentry, &va, &vp, NULL, error); + VOP_MKDIR(dvp, dentry, &vattr, &vp, NULL, error); break; default: error = EINVAL; break; } - if (!error) - { - error = linvfs_init_security(vp, dir); + if (unlikely(!error)) { + error = xfs_init_security(vp, dir); if (error) - cleanup_inode(dvp, vp, dentry, mode); + xfs_cleanup_inode(dvp, vp, dentry, mode); } - if (default_acl) { + if (unlikely(default_acl)) { if (!error) { - error = _ACL_INHERIT(vp, &va, default_acl); - if (!error) + error = _ACL_INHERIT(vp, &vattr, default_acl); + if (!error) VMODIFY(vp); else - cleanup_inode(dvp, vp, dentry, mode); + xfs_cleanup_inode(dvp, vp, dentry, mode); } _ACL_FREE(default_acl); } - if (!error) { + if (likely(!error)) { ASSERT(vp); - ip = LINVFS_GET_IP(vp); + ip = vn_to_inode(vp); if (S_ISCHR(mode) || S_ISBLK(mode)) ip->i_rdev = rdev; else if (S_ISDIR(mode)) - validate_fields(ip); + xfs_validate_fields(ip, &vattr); d_instantiate(dentry, ip); - validate_fields(dir); + xfs_validate_fields(dir, &vattr); } return -error; } STATIC int -linvfs_create( +xfs_vn_create( struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { - return linvfs_mknod(dir, dentry, mode, 0); + return xfs_vn_mknod(dir, dentry, mode, 0); } STATIC int -linvfs_mkdir( +xfs_vn_mkdir( struct inode *dir, struct dentry *dentry, int mode) { - return linvfs_mknod(dir, dentry, mode|S_IFDIR, 0); + return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0); } STATIC struct dentry * -linvfs_lookup( +xfs_vn_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { - struct vnode *vp = LINVFS_GET_VP(dir), *cvp; + struct vnode *vp = vn_from_inode(dir), *cvp; int error; if (dentry->d_name.len >= MAXNAMELEN) @@ -411,11 +410,11 @@ linvfs_lookup( return NULL; } - return d_splice_alias(LINVFS_GET_IP(cvp), dentry); + return d_splice_alias(vn_to_inode(cvp), dentry); } STATIC int -linvfs_link( +xfs_vn_link( struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) @@ -423,99 +422,102 @@ linvfs_link( struct inode *ip; /* inode of guy being linked to */ vnode_t *tdvp; /* target directory for new name/link */ vnode_t *vp; /* vp of name being linked */ + vattr_t vattr; int error; ip = old_dentry->d_inode; /* inode being linked to */ if (S_ISDIR(ip->i_mode)) return -EPERM; - tdvp = LINVFS_GET_VP(dir); - vp = LINVFS_GET_VP(ip); + tdvp = vn_from_inode(dir); + vp = vn_from_inode(ip); VOP_LINK(tdvp, vp, dentry, NULL, error); - if (!error) { + if (likely(!error)) { VMODIFY(tdvp); VN_HOLD(vp); - validate_fields(ip); + xfs_validate_fields(ip, &vattr); d_instantiate(dentry, ip); } return -error; } STATIC int -linvfs_unlink( +xfs_vn_unlink( struct inode *dir, struct dentry *dentry) { struct inode *inode; vnode_t *dvp; /* directory containing name to remove */ + vattr_t vattr; int error; inode = dentry->d_inode; - dvp = LINVFS_GET_VP(dir); + dvp = vn_from_inode(dir); VOP_REMOVE(dvp, dentry, NULL, error); - if (!error) { - validate_fields(dir); /* For size only */ - validate_fields(inode); + if (likely(!error)) { + xfs_validate_fields(dir, &vattr); /* size needs update */ + xfs_validate_fields(inode, &vattr); } - return -error; } STATIC int -linvfs_symlink( +xfs_vn_symlink( struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *ip; - vattr_t va; + vattr_t vattr = { 0 }; vnode_t *dvp; /* directory containing name of symlink */ vnode_t *cvp; /* used to lookup symlink to put in dentry */ int error; - dvp = LINVFS_GET_VP(dir); + dvp = vn_from_inode(dir); cvp = NULL; - memset(&va, 0, sizeof(va)); - va.va_mode = S_IFLNK | + vattr.va_mode = S_IFLNK | (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO); - va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + vattr.va_mask = XFS_AT_TYPE|XFS_AT_MODE; error = 0; - VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error); + VOP_SYMLINK(dvp, dentry, &vattr, (char *)symname, &cvp, NULL, error); if (likely(!error && cvp)) { - error = linvfs_init_security(cvp, dir); + error = xfs_init_security(cvp, dir); if (likely(!error)) { - ip = LINVFS_GET_IP(cvp); + ip = vn_to_inode(cvp); d_instantiate(dentry, ip); - validate_fields(dir); - validate_fields(ip); + xfs_validate_fields(dir, &vattr); + xfs_validate_fields(ip, &vattr); + } else { + xfs_cleanup_inode(dvp, cvp, dentry, 0); } } return -error; } STATIC int -linvfs_rmdir( +xfs_vn_rmdir( struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; - vnode_t *dvp = LINVFS_GET_VP(dir); + vnode_t *dvp = vn_from_inode(dir); + vattr_t vattr; int error; VOP_RMDIR(dvp, dentry, NULL, error); - if (!error) { - validate_fields(inode); - validate_fields(dir); + if (likely(!error)) { + xfs_validate_fields(inode, &vattr); + xfs_validate_fields(dir, &vattr); } return -error; } STATIC int -linvfs_rename( +xfs_vn_rename( struct inode *odir, struct dentry *odentry, struct inode *ndir, @@ -524,22 +526,21 @@ linvfs_rename( struct inode *new_inode = ndentry->d_inode; vnode_t *fvp; /* from directory */ vnode_t *tvp; /* target directory */ + vattr_t vattr; int error; - fvp = LINVFS_GET_VP(odir); - tvp = LINVFS_GET_VP(ndir); + fvp = vn_from_inode(odir); + tvp = vn_from_inode(ndir); VOP_RENAME(fvp, odentry, tvp, ndentry, NULL, error); - if (error) - return -error; - - if (new_inode) - validate_fields(new_inode); - - validate_fields(odir); - if (ndir != odir) - validate_fields(ndir); - return 0; + if (likely(!error)) { + if (new_inode) + xfs_validate_fields(new_inode, &vattr); + xfs_validate_fields(odir, &vattr); + if (ndir != odir) + xfs_validate_fields(ndir, &vattr); + } + return -error; } /* @@ -548,7 +549,7 @@ linvfs_rename( * uio is kmalloced for this reason... */ STATIC void * -linvfs_follow_link( +xfs_vn_follow_link( struct dentry *dentry, struct nameidata *nd) { @@ -574,7 +575,7 @@ linvfs_follow_link( return NULL; } - vp = LINVFS_GET_VP(dentry->d_inode); + vp = vn_from_inode(dentry->d_inode); iov.iov_base = link; iov.iov_len = MAXPATHLEN; @@ -599,7 +600,7 @@ linvfs_follow_link( } STATIC void -linvfs_put_link( +xfs_vn_put_link( struct dentry *dentry, struct nameidata *nd, void *p) @@ -612,12 +613,12 @@ linvfs_put_link( #ifdef CONFIG_XFS_POSIX_ACL STATIC int -linvfs_permission( +xfs_vn_permission( struct inode *inode, int mode, struct nameidata *nd) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error; mode <<= 6; /* convert from linux to vnode access bits */ @@ -625,17 +626,17 @@ linvfs_permission( return -error; } #else -#define linvfs_permission NULL +#define xfs_vn_permission NULL #endif STATIC int -linvfs_getattr( +xfs_vn_getattr( struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error = 0; if (unlikely(vp->v_flag & VMODIFIED)) @@ -646,18 +647,17 @@ linvfs_getattr( } STATIC int -linvfs_setattr( +xfs_vn_setattr( struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; unsigned int ia_valid = attr->ia_valid; - vnode_t *vp = LINVFS_GET_VP(inode); - vattr_t vattr; + vnode_t *vp = vn_from_inode(inode); + vattr_t vattr = { 0 }; int flags = 0; int error; - memset(&vattr, 0, sizeof(vattr_t)); if (ia_valid & ATTR_UID) { vattr.va_mask |= XFS_AT_UID; vattr.va_uid = attr->ia_uid; @@ -699,28 +699,27 @@ linvfs_setattr( #endif VOP_SETATTR(vp, &vattr, flags, NULL, error); - if (error) - return -error; - vn_revalidate(vp); - return error; + if (likely(!error)) + __vn_revalidate(vp, &vattr); + return -error; } STATIC void -linvfs_truncate( +xfs_vn_truncate( struct inode *inode) { - block_truncate_page(inode->i_mapping, inode->i_size, linvfs_get_block); + block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_block); } STATIC int -linvfs_setxattr( +xfs_vn_setxattr( struct dentry *dentry, const char *name, const void *data, size_t size, int flags) { - vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + vnode_t *vp = vn_from_inode(dentry->d_inode); char *attr = (char *)name; attrnames_t *namesp; int xflags = 0; @@ -744,13 +743,13 @@ linvfs_setxattr( } STATIC ssize_t -linvfs_getxattr( +xfs_vn_getxattr( struct dentry *dentry, const char *name, void *data, size_t size) { - vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + vnode_t *vp = vn_from_inode(dentry->d_inode); char *attr = (char *)name; attrnames_t *namesp; int xflags = 0; @@ -774,12 +773,12 @@ linvfs_getxattr( } STATIC ssize_t -linvfs_listxattr( +xfs_vn_listxattr( struct dentry *dentry, char *data, size_t size) { - vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + vnode_t *vp = vn_from_inode(dentry->d_inode); int error, xflags = ATTR_KERNAMELS; ssize_t result; @@ -794,11 +793,11 @@ linvfs_listxattr( } STATIC int -linvfs_removexattr( +xfs_vn_removexattr( struct dentry *dentry, const char *name) { - vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + vnode_t *vp = vn_from_inode(dentry->d_inode); char *attr = (char *)name; attrnames_t *namesp; int xflags = 0; @@ -816,45 +815,45 @@ linvfs_removexattr( } -struct inode_operations linvfs_file_inode_operations = { - .permission = linvfs_permission, - .truncate = linvfs_truncate, - .getattr = linvfs_getattr, - .setattr = linvfs_setattr, - .setxattr = linvfs_setxattr, - .getxattr = linvfs_getxattr, - .listxattr = linvfs_listxattr, - .removexattr = linvfs_removexattr, +struct inode_operations xfs_inode_operations = { + .permission = xfs_vn_permission, + .truncate = xfs_vn_truncate, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = xfs_vn_setxattr, + .getxattr = xfs_vn_getxattr, + .listxattr = xfs_vn_listxattr, + .removexattr = xfs_vn_removexattr, }; -struct inode_operations linvfs_dir_inode_operations = { - .create = linvfs_create, - .lookup = linvfs_lookup, - .link = linvfs_link, - .unlink = linvfs_unlink, - .symlink = linvfs_symlink, - .mkdir = linvfs_mkdir, - .rmdir = linvfs_rmdir, - .mknod = linvfs_mknod, - .rename = linvfs_rename, - .permission = linvfs_permission, - .getattr = linvfs_getattr, - .setattr = linvfs_setattr, - .setxattr = linvfs_setxattr, - .getxattr = linvfs_getxattr, - .listxattr = linvfs_listxattr, - .removexattr = linvfs_removexattr, +struct inode_operations xfs_dir_inode_operations = { + .create = xfs_vn_create, + .lookup = xfs_vn_lookup, + .link = xfs_vn_link, + .unlink = xfs_vn_unlink, + .symlink = xfs_vn_symlink, + .mkdir = xfs_vn_mkdir, + .rmdir = xfs_vn_rmdir, + .mknod = xfs_vn_mknod, + .rename = xfs_vn_rename, + .permission = xfs_vn_permission, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = xfs_vn_setxattr, + .getxattr = xfs_vn_getxattr, + .listxattr = xfs_vn_listxattr, + .removexattr = xfs_vn_removexattr, }; -struct inode_operations linvfs_symlink_inode_operations = { +struct inode_operations xfs_symlink_inode_operations = { .readlink = generic_readlink, - .follow_link = linvfs_follow_link, - .put_link = linvfs_put_link, - .permission = linvfs_permission, - .getattr = linvfs_getattr, - .setattr = linvfs_setattr, - .setxattr = linvfs_setxattr, - .getxattr = linvfs_getxattr, - .listxattr = linvfs_listxattr, - .removexattr = linvfs_removexattr, + .follow_link = xfs_vn_follow_link, + .put_link = xfs_vn_put_link, + .permission = xfs_vn_permission, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = xfs_vn_setxattr, + .getxattr = xfs_vn_getxattr, + .listxattr = xfs_vn_listxattr, + .removexattr = xfs_vn_removexattr, }; diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h index 6899a6b4a50a..a8417d7af5f9 100644 --- a/fs/xfs/linux-2.6/xfs_iops.h +++ b/fs/xfs/linux-2.6/xfs_iops.h @@ -18,13 +18,13 @@ #ifndef __XFS_IOPS_H__ #define __XFS_IOPS_H__ -extern struct inode_operations linvfs_file_inode_operations; -extern struct inode_operations linvfs_dir_inode_operations; -extern struct inode_operations linvfs_symlink_inode_operations; +extern struct inode_operations xfs_inode_operations; +extern struct inode_operations xfs_dir_inode_operations; +extern struct inode_operations xfs_symlink_inode_operations; -extern struct file_operations linvfs_file_operations; -extern struct file_operations linvfs_invis_file_operations; -extern struct file_operations linvfs_dir_operations; +extern struct file_operations xfs_file_operations; +extern struct file_operations xfs_dir_file_operations; +extern struct file_operations xfs_invis_file_operations; extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *, int, unsigned int, void __user *); diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 67389b745526..1fe09f2d6519 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -73,6 +73,9 @@ #include <linux/list.h> #include <linux/proc_fs.h> #include <linux/sort.h> +#include <linux/cpu.h> +#include <linux/notifier.h> +#include <linux/delay.h> #include <asm/page.h> #include <asm/div64.h> @@ -100,6 +103,11 @@ */ #undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */ #define HAVE_SENDFILE /* sendfile(2) exists in 2.6, but not in 2.4 */ +#ifdef CONFIG_SMP +#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ +#else +#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ +#endif /* * State flag for unwritten extent buffers. @@ -226,7 +234,7 @@ BUFFER_FNS(PrivateStart, unwritten); #define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) #define xfs_stack_trace() dump_stack() #define xfs_itruncate_data(ip, off) \ - (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) + (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off))) #define xfs_statvfs_fsid(statp, mp) \ ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \ __kernel_fsid_t *fsid = &(statp)->f_fsid; \ diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index e0ab45fbfebd..0169360475c4 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -83,7 +83,7 @@ xfs_rw_enter_trace( (void *)((unsigned long)ioflags), (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), (void *)((unsigned long)(io->io_new_size & 0xffffffff)), - (void *)NULL, + (void *)((unsigned long)current_pid()), (void *)NULL, (void *)NULL, (void *)NULL, @@ -113,7 +113,7 @@ xfs_inval_cached_trace( (void *)((unsigned long)(first & 0xffffffff)), (void *)((unsigned long)((last >> 32) & 0xffffffff)), (void *)((unsigned long)(last & 0xffffffff)), - (void *)NULL, + (void *)((unsigned long)current_pid()), (void *)NULL, (void *)NULL, (void *)NULL, @@ -249,9 +249,8 @@ xfs_read( if (n < size) size = n; - if (XFS_FORCED_SHUTDOWN(mp)) { + if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; - } if (unlikely(ioflags & IO_ISDIRECT)) mutex_lock(&inode->i_mutex); @@ -267,10 +266,14 @@ xfs_read( dmflags, &locktype); if (ret) { xfs_iunlock(ip, XFS_IOLOCK_SHARED); - goto unlock_isem; + goto unlock_mutex; } } + if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp))) + VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(*offset)), + -1, FI_REMAPF_LOCKED); + xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, (void *)iovp, segs, *offset, ioflags); ret = __generic_file_aio_read(iocb, iovp, segs, offset); @@ -281,7 +284,7 @@ xfs_read( xfs_iunlock(ip, XFS_IOLOCK_SHARED); -unlock_isem: +unlock_mutex: if (unlikely(ioflags & IO_ISDIRECT)) mutex_unlock(&inode->i_mutex); return ret; @@ -432,7 +435,7 @@ xfs_zero_eof( xfs_fsize_t isize, /* current inode size */ xfs_fsize_t end_size) /* terminal inode size */ { - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); xfs_fileoff_t start_zero_fsb; xfs_fileoff_t end_zero_fsb; xfs_fileoff_t zero_count_fsb; @@ -573,7 +576,7 @@ xfs_write( vrwlock_t locktype; size_t ocount = 0, count; loff_t pos; - int need_isem = 1, need_flush = 0; + int need_i_mutex = 1, need_flush = 0; XFS_STATS_INC(xs_write_calls); @@ -622,14 +625,14 @@ xfs_write( return XFS_ERROR(-EINVAL); if (!VN_CACHED(vp) && pos < i_size_read(inode)) - need_isem = 0; + need_i_mutex = 0; if (VN_CACHED(vp)) need_flush = 1; } relock: - if (need_isem) { + if (need_i_mutex) { iolock = XFS_IOLOCK_EXCL; locktype = VRWLOCK_WRITE; @@ -651,7 +654,7 @@ start: S_ISBLK(inode->i_mode)); if (error) { xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); - goto out_unlock_isem; + goto out_unlock_mutex; } new_size = pos + count; @@ -663,7 +666,7 @@ start: loff_t savedsize = pos; int dmflags = FILP_DELAY_FLAG(file); - if (need_isem) + if (need_i_mutex) dmflags |= DM_FLAGS_IMUX; xfs_iunlock(xip, XFS_ILOCK_EXCL); @@ -672,7 +675,7 @@ start: dmflags, &locktype); if (error) { xfs_iunlock(xip, iolock); - goto out_unlock_isem; + goto out_unlock_mutex; } xfs_ilock(xip, XFS_ILOCK_EXCL); eventsent = 1; @@ -710,7 +713,7 @@ start: isize, pos + count); if (error) { xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); - goto out_unlock_isem; + goto out_unlock_mutex; } } xfs_iunlock(xip, XFS_ILOCK_EXCL); @@ -731,7 +734,7 @@ start: error = -remove_suid(file->f_dentry); if (unlikely(error)) { xfs_iunlock(xip, iolock); - goto out_unlock_isem; + goto out_unlock_mutex; } } @@ -747,14 +750,14 @@ retry: -1, FI_REMAPF_LOCKED); } - if (need_isem) { + if (need_i_mutex) { /* demote the lock now the cached pages are gone */ XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); mutex_unlock(&inode->i_mutex); iolock = XFS_IOLOCK_SHARED; locktype = VRWLOCK_WRITE_DIRECT; - need_isem = 0; + need_i_mutex = 0; } xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs, @@ -772,7 +775,7 @@ retry: pos += ret; count -= ret; - need_isem = 1; + need_i_mutex = 1; ioflags &= ~IO_ISDIRECT; xfs_iunlock(xip, iolock); goto relock; @@ -794,14 +797,14 @@ retry: !(ioflags & IO_INVIS)) { xfs_rwunlock(bdp, locktype); - if (need_isem) + if (need_i_mutex) mutex_unlock(&inode->i_mutex); error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp, DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ if (error) goto out_nounlocks; - if (need_isem) + if (need_i_mutex) mutex_lock(&inode->i_mutex); xfs_rwlock(bdp, locktype); pos = xip->i_d.di_size; @@ -905,9 +908,9 @@ retry: if (error) goto out_unlock_internal; } - + xfs_rwunlock(bdp, locktype); - if (need_isem) + if (need_i_mutex) mutex_unlock(&inode->i_mutex); error = sync_page_range(inode, mapping, pos, ret); @@ -918,8 +921,8 @@ retry: out_unlock_internal: xfs_rwunlock(bdp, locktype); - out_unlock_isem: - if (need_isem) + out_unlock_mutex: + if (need_i_mutex) mutex_unlock(&inode->i_mutex); out_nounlocks: return -error; diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c index 8955720a2c6b..713e6a7505d0 100644 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ b/fs/xfs/linux-2.6/xfs_stats.c @@ -62,18 +62,15 @@ xfs_read_xfsstats( while (j < xstats[i].endpoint) { val = 0; /* sum over all cpus */ - for (c = 0; c < NR_CPUS; c++) { - if (!cpu_possible(c)) continue; + for_each_cpu(c) val += *(((__u32*)&per_cpu(xfsstats, c) + j)); - } len += sprintf(buffer + len, " %u", val); j++; } buffer[len++] = '\n'; } /* extra precision counters */ - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) continue; + for_each_cpu(i) { xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index f22e426d9e42..8355faf8ffde 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -59,8 +59,8 @@ #include <linux/writeback.h> #include <linux/kthread.h> -STATIC struct quotactl_ops linvfs_qops; -STATIC struct super_operations linvfs_sops; +STATIC struct quotactl_ops xfs_quotactl_operations; +STATIC struct super_operations xfs_super_operations; STATIC kmem_zone_t *xfs_vnode_zone; STATIC kmem_zone_t *xfs_ioend_zone; mempool_t *xfs_ioend_pool; @@ -76,8 +76,6 @@ xfs_args_allocate( strncpy(args->fsname, sb->s_id, MAXNAMELEN); /* Copy the already-parsed mount(2) flags we're interested in */ - if (sb->s_flags & MS_NOATIME) - args->flags |= XFSMNT_NOATIME; if (sb->s_flags & MS_DIRSYNC) args->flags |= XFSMNT_DIRSYNC; if (sb->s_flags & MS_SYNCHRONOUS) @@ -129,21 +127,21 @@ xfs_set_inodeops( { switch (inode->i_mode & S_IFMT) { case S_IFREG: - inode->i_op = &linvfs_file_inode_operations; - inode->i_fop = &linvfs_file_operations; - inode->i_mapping->a_ops = &linvfs_aops; + inode->i_op = &xfs_inode_operations; + inode->i_fop = &xfs_file_operations; + inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: - inode->i_op = &linvfs_dir_inode_operations; - inode->i_fop = &linvfs_dir_operations; + inode->i_op = &xfs_dir_inode_operations; + inode->i_fop = &xfs_dir_file_operations; break; case S_IFLNK: - inode->i_op = &linvfs_symlink_inode_operations; + inode->i_op = &xfs_symlink_inode_operations; if (inode->i_blocks) - inode->i_mapping->a_ops = &linvfs_aops; + inode->i_mapping->a_ops = &xfs_address_space_operations; break; default: - inode->i_op = &linvfs_file_inode_operations; + inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } @@ -155,7 +153,7 @@ xfs_revalidate_inode( vnode_t *vp, xfs_inode_t *ip) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); inode->i_mode = ip->i_d.di_mode; inode->i_nlink = ip->i_d.di_nlink; @@ -212,7 +210,7 @@ xfs_initialize_vnode( int unlock) { xfs_inode_t *ip = XFS_BHVTOI(inode_bhv); - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); if (!inode_bhv->bd_vobj) { vp->v_vfsp = bhvtovfs(bdp); @@ -230,7 +228,7 @@ xfs_initialize_vnode( if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) { xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); xfs_set_inodeops(inode); - + ip->i_flags &= ~XFS_INEW; barrier(); @@ -334,43 +332,42 @@ xfs_blkdev_issue_flush( } STATIC struct inode * -linvfs_alloc_inode( +xfs_fs_alloc_inode( struct super_block *sb) { vnode_t *vp; - vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP)); - if (!vp) + vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP); + if (unlikely(!vp)) return NULL; - return LINVFS_GET_IP(vp); + return vn_to_inode(vp); } STATIC void -linvfs_destroy_inode( +xfs_fs_destroy_inode( struct inode *inode) { - kmem_zone_free(xfs_vnode_zone, LINVFS_GET_VP(inode)); + kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode)); } STATIC void -linvfs_inode_init_once( - void *data, - kmem_cache_t *cachep, +xfs_fs_inode_init_once( + void *vnode, + kmem_zone_t *zonep, unsigned long flags) { - vnode_t *vp = (vnode_t *)data; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - inode_init_once(LINVFS_GET_IP(vp)); + SLAB_CTOR_CONSTRUCTOR) + inode_init_once(vn_to_inode((vnode_t *)vnode)); } STATIC int -linvfs_init_zones(void) +xfs_init_zones(void) { - xfs_vnode_zone = kmem_cache_create("xfs_vnode", - sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT, - linvfs_inode_init_once, NULL); + xfs_vnode_zone = kmem_zone_init_flags(sizeof(vnode_t), "xfs_vnode_t", + KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | + KM_ZONE_SPREAD, + xfs_fs_inode_init_once); if (!xfs_vnode_zone) goto out; @@ -379,14 +376,12 @@ linvfs_init_zones(void) goto out_destroy_vnode_zone; xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, - mempool_alloc_slab, mempool_free_slab, - xfs_ioend_zone); + mempool_alloc_slab, mempool_free_slab, + xfs_ioend_zone); if (!xfs_ioend_pool) goto out_free_ioend_zone; - return 0; - out_free_ioend_zone: kmem_zone_destroy(xfs_ioend_zone); out_destroy_vnode_zone: @@ -396,7 +391,7 @@ linvfs_init_zones(void) } STATIC void -linvfs_destroy_zones(void) +xfs_destroy_zones(void) { mempool_destroy(xfs_ioend_pool); kmem_zone_destroy(xfs_vnode_zone); @@ -407,14 +402,14 @@ linvfs_destroy_zones(void) * Attempt to flush the inode, this will actually fail * if the inode is pinned, but we dirty the inode again * at the point when it is unpinned after a log write, - * since this is when the inode itself becomes flushable. + * since this is when the inode itself becomes flushable. */ STATIC int -linvfs_write_inode( +xfs_fs_write_inode( struct inode *inode, int sync) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error = 0, flags = FLUSH_INODE; if (vp) { @@ -434,13 +429,13 @@ linvfs_write_inode( } STATIC void -linvfs_clear_inode( +xfs_fs_clear_inode( struct inode *inode) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error, cache; - vn_trace_entry(vp, "clear_inode", (inst_t *)__return_address); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); XFS_STATS_INC(vn_rele); XFS_STATS_INC(vn_remove); @@ -516,7 +511,7 @@ void xfs_flush_inode( xfs_inode_t *ip) { - struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); struct vfs *vfs = XFS_MTOVFS(ip->i_mount); igrab(inode); @@ -541,7 +536,7 @@ void xfs_flush_device( xfs_inode_t *ip) { - struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); struct vfs *vfs = XFS_MTOVFS(ip->i_mount); igrab(inode); @@ -550,7 +545,7 @@ xfs_flush_device( xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); } -#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR) +#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR|SYNC_REFCACHE) STATIC void vfs_sync_worker( vfs_t *vfsp, @@ -613,7 +608,7 @@ xfssyncd( } STATIC int -linvfs_start_syncd( +xfs_fs_start_syncd( vfs_t *vfsp) { vfsp->vfs_sync_work.w_syncer = vfs_sync_worker; @@ -625,20 +620,20 @@ linvfs_start_syncd( } STATIC void -linvfs_stop_syncd( +xfs_fs_stop_syncd( vfs_t *vfsp) { kthread_stop(vfsp->vfs_sync_task); } STATIC void -linvfs_put_super( +xfs_fs_put_super( struct super_block *sb) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; - linvfs_stop_syncd(vfsp); + xfs_fs_stop_syncd(vfsp); VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error); if (!error) VFS_UNMOUNT(vfsp, 0, NULL, error); @@ -652,10 +647,10 @@ linvfs_put_super( } STATIC void -linvfs_write_super( +xfs_fs_write_super( struct super_block *sb) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; if (sb->s_flags & MS_RDONLY) { @@ -668,11 +663,11 @@ linvfs_write_super( } STATIC int -linvfs_sync_super( +xfs_fs_sync_super( struct super_block *sb, int wait) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; int flags = SYNC_FSDATA; @@ -707,11 +702,11 @@ linvfs_sync_super( } STATIC int -linvfs_statfs( +xfs_fs_statfs( struct super_block *sb, struct kstatfs *statp) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; VFS_STATVFS(vfsp, statp, NULL, error); @@ -719,12 +714,12 @@ linvfs_statfs( } STATIC int -linvfs_remount( +xfs_fs_remount( struct super_block *sb, int *flags, char *options) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); struct xfs_mount_args *args = xfs_args_allocate(sb); int error; @@ -736,18 +731,18 @@ linvfs_remount( } STATIC void -linvfs_freeze_fs( +xfs_fs_lockfs( struct super_block *sb) { - VFS_FREEZE(LINVFS_GET_VFS(sb)); + VFS_FREEZE(vfs_from_sb(sb)); } STATIC int -linvfs_show_options( +xfs_fs_show_options( struct seq_file *m, struct vfsmount *mnt) { - struct vfs *vfsp = LINVFS_GET_VFS(mnt->mnt_sb); + struct vfs *vfsp = vfs_from_sb(mnt->mnt_sb); int error; VFS_SHOWARGS(vfsp, m, error); @@ -755,11 +750,11 @@ linvfs_show_options( } STATIC int -linvfs_quotasync( +xfs_fs_quotasync( struct super_block *sb, int type) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error; VFS_QUOTACTL(vfsp, Q_XQUOTASYNC, 0, (caddr_t)NULL, error); @@ -767,11 +762,11 @@ linvfs_quotasync( } STATIC int -linvfs_getxstate( +xfs_fs_getxstate( struct super_block *sb, struct fs_quota_stat *fqs) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error; VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error); @@ -779,12 +774,12 @@ linvfs_getxstate( } STATIC int -linvfs_setxstate( +xfs_fs_setxstate( struct super_block *sb, unsigned int flags, int op) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error; VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error); @@ -792,13 +787,13 @@ linvfs_setxstate( } STATIC int -linvfs_getxquota( +xfs_fs_getxquota( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error, getmode; getmode = (type == USRQUOTA) ? Q_XGETQUOTA : @@ -808,13 +803,13 @@ linvfs_getxquota( } STATIC int -linvfs_setxquota( +xfs_fs_setxquota( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error, setmode; setmode = (type == USRQUOTA) ? Q_XSETQLIM : @@ -824,21 +819,17 @@ linvfs_setxquota( } STATIC int -linvfs_fill_super( +xfs_fs_fill_super( struct super_block *sb, void *data, int silent) { vnode_t *rootvp; - struct vfs *vfsp = vfs_allocate(); + struct vfs *vfsp = vfs_allocate(sb); struct xfs_mount_args *args = xfs_args_allocate(sb); struct kstatfs statvfs; int error, error2; - vfsp->vfs_super = sb; - LINVFS_SET_VFS(sb, vfsp); - if (sb->s_flags & MS_RDONLY) - vfsp->vfs_flag |= VFS_RDONLY; bhv_insert_all_vfsops(vfsp); VFS_PARSEARGS(vfsp, (char *)data, args, 0, error); @@ -849,10 +840,10 @@ linvfs_fill_super( sb_min_blocksize(sb, BBSIZE); #ifdef CONFIG_XFS_EXPORT - sb->s_export_op = &linvfs_export_ops; + sb->s_export_op = &xfs_export_operations; #endif - sb->s_qcop = &linvfs_qops; - sb->s_op = &linvfs_sops; + sb->s_qcop = &xfs_quotactl_operations; + sb->s_op = &xfs_super_operations; VFS_MOUNT(vfsp, args, NULL, error); if (error) { @@ -876,7 +867,7 @@ linvfs_fill_super( if (error) goto fail_unmount; - sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp)); + sb->s_root = d_alloc_root(vn_to_inode(rootvp)); if (!sb->s_root) { error = ENOMEM; goto fail_vnrele; @@ -885,7 +876,7 @@ linvfs_fill_super( error = EINVAL; goto fail_vnrele; } - if ((error = linvfs_start_syncd(vfsp))) + if ((error = xfs_fs_start_syncd(vfsp))) goto fail_vnrele; vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address); @@ -910,41 +901,41 @@ fail_vfsop: } STATIC struct super_block * -linvfs_get_sb( +xfs_fs_get_sb( struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return get_sb_bdev(fs_type, flags, dev_name, data, linvfs_fill_super); -} - -STATIC struct super_operations linvfs_sops = { - .alloc_inode = linvfs_alloc_inode, - .destroy_inode = linvfs_destroy_inode, - .write_inode = linvfs_write_inode, - .clear_inode = linvfs_clear_inode, - .put_super = linvfs_put_super, - .write_super = linvfs_write_super, - .sync_fs = linvfs_sync_super, - .write_super_lockfs = linvfs_freeze_fs, - .statfs = linvfs_statfs, - .remount_fs = linvfs_remount, - .show_options = linvfs_show_options, + return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); +} + +STATIC struct super_operations xfs_super_operations = { + .alloc_inode = xfs_fs_alloc_inode, + .destroy_inode = xfs_fs_destroy_inode, + .write_inode = xfs_fs_write_inode, + .clear_inode = xfs_fs_clear_inode, + .put_super = xfs_fs_put_super, + .write_super = xfs_fs_write_super, + .sync_fs = xfs_fs_sync_super, + .write_super_lockfs = xfs_fs_lockfs, + .statfs = xfs_fs_statfs, + .remount_fs = xfs_fs_remount, + .show_options = xfs_fs_show_options, }; -STATIC struct quotactl_ops linvfs_qops = { - .quota_sync = linvfs_quotasync, - .get_xstate = linvfs_getxstate, - .set_xstate = linvfs_setxstate, - .get_xquota = linvfs_getxquota, - .set_xquota = linvfs_setxquota, +STATIC struct quotactl_ops xfs_quotactl_operations = { + .quota_sync = xfs_fs_quotasync, + .get_xstate = xfs_fs_getxstate, + .set_xstate = xfs_fs_setxstate, + .get_xquota = xfs_fs_getxquota, + .set_xquota = xfs_fs_setxquota, }; STATIC struct file_system_type xfs_fs_type = { .owner = THIS_MODULE, .name = "xfs", - .get_sb = linvfs_get_sb, + .get_sb = xfs_fs_get_sb, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; @@ -965,7 +956,7 @@ init_xfs_fs( void ) ktrace_init(64); - error = linvfs_init_zones(); + error = xfs_init_zones(); if (error < 0) goto undo_zones; @@ -981,14 +972,13 @@ init_xfs_fs( void ) error = register_filesystem(&xfs_fs_type); if (error) goto undo_register; - XFS_DM_INIT(&xfs_fs_type); return 0; undo_register: xfs_buf_terminate(); undo_buffers: - linvfs_destroy_zones(); + xfs_destroy_zones(); undo_zones: return error; @@ -998,11 +988,10 @@ STATIC void __exit exit_xfs_fs( void ) { vfs_exitquota(); - XFS_DM_EXIT(&xfs_fs_type); unregister_filesystem(&xfs_fs_type); xfs_cleanup(); xfs_buf_terminate(); - linvfs_destroy_zones(); + xfs_destroy_zones(); ktrace_uninit(); } diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index df59408dca06..376b96cb513a 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h @@ -98,11 +98,6 @@ extern void xfs_qm_exit(void); XFS_DMAPI_STRING \ XFS_DBG_STRING /* DBG must be last */ -#define LINVFS_GET_VFS(s) \ - (vfs_t *)((s)->s_fs_info) -#define LINVFS_SET_VFS(s, vfsp) \ - ((s)->s_fs_info = vfsp) - struct xfs_inode; struct xfs_mount; struct xfs_buftarg; @@ -120,6 +115,6 @@ extern int xfs_blkdev_get(struct xfs_mount *, const char *, extern void xfs_blkdev_put(struct block_device *); extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); -extern struct export_operations linvfs_export_ops; +extern struct export_operations xfs_export_operations; #endif /* __XFS_SUPER_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index a02564972420..7079cc837210 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c @@ -38,8 +38,7 @@ xfs_stats_clear_proc_handler( if (!ret && write && *valp) { printk("XFS Clearing xfsstats\n"); - for (c = 0; c < NR_CPUS; c++) { - if (!cpu_possible(c)) continue; + for_each_cpu(c) { preempt_disable(); /* save vn_active, it's a universal truth! */ vn_active = per_cpu(xfsstats, c).vn_active; diff --git a/fs/xfs/linux-2.6/xfs_vfs.c b/fs/xfs/linux-2.6/xfs_vfs.c index c855d62e5344..6f7c9f7a8624 100644 --- a/fs/xfs/linux-2.6/xfs_vfs.c +++ b/fs/xfs/linux-2.6/xfs_vfs.c @@ -227,7 +227,8 @@ vfs_freeze( } vfs_t * -vfs_allocate( void ) +vfs_allocate( + struct super_block *sb) { struct vfs *vfsp; @@ -236,9 +237,23 @@ vfs_allocate( void ) INIT_LIST_HEAD(&vfsp->vfs_sync_list); spin_lock_init(&vfsp->vfs_sync_lock); init_waitqueue_head(&vfsp->vfs_wait_single_sync_task); + + vfsp->vfs_super = sb; + sb->s_fs_info = vfsp; + + if (sb->s_flags & MS_RDONLY) + vfsp->vfs_flag |= VFS_RDONLY; + return vfsp; } +vfs_t * +vfs_from_sb( + struct super_block *sb) +{ + return (vfs_t *)sb->s_fs_info; +} + void vfs_deallocate( struct vfs *vfsp) @@ -295,7 +310,7 @@ bhv_remove_all_vfsops( bhv_remove_vfsops(vfsp, VFS_POSITION_DM); if (!freebase) return; - mp = XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfsp), &xfs_vfsops)); + mp = XFS_VFSTOM(vfsp); VFS_REMOVEBHV(vfsp, &mp->m_bhv); xfs_mount_free(mp, 0); } diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h index 57caf9eddee0..8fed356db055 100644 --- a/fs/xfs/linux-2.6/xfs_vfs.h +++ b/fs/xfs/linux-2.6/xfs_vfs.h @@ -193,7 +193,8 @@ typedef struct bhv_vfsops { #define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o)) #define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL ) -extern vfs_t *vfs_allocate(void); +extern vfs_t *vfs_allocate(struct super_block *); +extern vfs_t *vfs_from_sb(struct super_block *); extern void vfs_deallocate(vfs_t *); extern void vfs_insertops(vfs_t *, bhv_vfsops_t *); extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *); diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c index 260dd8415dd7..d27c25b27ccd 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ b/fs/xfs/linux-2.6/xfs_vnode.c @@ -58,7 +58,7 @@ struct vnode * vn_initialize( struct inode *inode) { - struct vnode *vp = LINVFS_GET_VP(inode); + struct vnode *vp = vn_from_inode(inode); XFS_STATS_INC(vn_active); XFS_STATS_INC(vn_alloc); @@ -83,7 +83,7 @@ vn_initialize( vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); #endif /* XFS_VNODE_TRACE */ - vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address); + vn_trace_exit(vp, __FUNCTION__, (inst_t *)__return_address); return vp; } @@ -97,7 +97,7 @@ vn_revalidate_core( struct vnode *vp, vattr_t *vap) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); inode->i_mode = vap->va_mode; inode->i_nlink = vap->va_nlink; @@ -129,24 +129,31 @@ vn_revalidate_core( * Revalidate the Linux inode from the vnode. */ int -vn_revalidate( - struct vnode *vp) +__vn_revalidate( + struct vnode *vp, + struct vattr *vattr) { - vattr_t va; int error; - vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address); - ASSERT(vp->v_fbhv != NULL); - - va.va_mask = XFS_AT_STAT|XFS_AT_XFLAGS; - VOP_GETATTR(vp, &va, 0, NULL, error); - if (!error) { - vn_revalidate_core(vp, &va); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS; + VOP_GETATTR(vp, vattr, 0, NULL, error); + if (likely(!error)) { + vn_revalidate_core(vp, vattr); VUNMODIFY(vp); } return -error; } +int +vn_revalidate( + struct vnode *vp) +{ + vattr_t vattr; + + return __vn_revalidate(vp, &vattr); +} + /* * Add a reference to a referenced vnode. */ @@ -159,7 +166,7 @@ vn_hold( XFS_STATS_INC(vn_hold); VN_LOCK(vp); - inode = igrab(LINVFS_GET_IP(vp)); + inode = igrab(vn_to_inode(vp)); ASSERT(inode); VN_UNLOCK(vp, 0); diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 0fe2419461d6..06f5845e9568 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -116,8 +116,14 @@ typedef enum { /* * Vnode to Linux inode mapping. */ -#define LINVFS_GET_VP(inode) ((vnode_t *)list_entry(inode, vnode_t, v_inode)) -#define LINVFS_GET_IP(vp) (&(vp)->v_inode) +static inline struct vnode *vn_from_inode(struct inode *inode) +{ + return (vnode_t *)list_entry(inode, vnode_t, v_inode); +} +static inline struct inode *vn_to_inode(struct vnode *vnode) +{ + return &vnode->v_inode; +} /* * Vnode flags. @@ -490,6 +496,7 @@ typedef struct vnode_map { (vmap).v_ino = (vp)->v_inode.i_ino; } extern int vn_revalidate(struct vnode *); +extern int __vn_revalidate(struct vnode *, vattr_t *); extern void vn_revalidate_core(struct vnode *, vattr_t *); extern void vn_iowait(struct vnode *vp); @@ -497,7 +504,7 @@ extern void vn_iowake(struct vnode *vp); static inline int vn_count(struct vnode *vp) { - return atomic_read(&LINVFS_GET_IP(vp)->i_count); + return atomic_read(&vn_to_inode(vp)->i_count); } /* @@ -511,16 +518,16 @@ extern vnode_t *vn_hold(struct vnode *); vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address)) #define VN_RELE(vp) \ (vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \ - iput(LINVFS_GET_IP(vp))) + iput(vn_to_inode(vp))) #else #define VN_HOLD(vp) ((void)vn_hold(vp)) -#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp))) +#define VN_RELE(vp) (iput(vn_to_inode(vp))) #endif static inline struct vnode *vn_grab(struct vnode *vp) { - struct inode *inode = igrab(LINVFS_GET_IP(vp)); - return inode ? LINVFS_GET_VP(inode) : NULL; + struct inode *inode = igrab(vn_to_inode(vp)); + return inode ? vn_from_inode(inode) : NULL; } /* @@ -528,7 +535,7 @@ static inline struct vnode *vn_grab(struct vnode *vp) */ #define VNAME(dentry) ((char *) (dentry)->d_name.name) #define VNAMELEN(dentry) ((dentry)->d_name.len) -#define VNAME_TO_VNODE(dentry) (LINVFS_GET_VP((dentry)->d_inode)) +#define VNAME_TO_VNODE(dentry) (vn_from_inode((dentry)->d_inode)) /* * Vnode spinlock manipulation. @@ -557,12 +564,12 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag) */ static inline void vn_mark_bad(struct vnode *vp) { - make_bad_inode(LINVFS_GET_IP(vp)); + make_bad_inode(vn_to_inode(vp)); } static inline int VN_BAD(struct vnode *vp) { - return is_bad_inode(LINVFS_GET_IP(vp)); + return is_bad_inode(vn_to_inode(vp)); } /* @@ -587,9 +594,9 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt) /* * Some useful predicates. */ -#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping) -#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages) -#define VN_DIRTY(vp) mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \ +#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping) +#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages) +#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \ PAGECACHE_TAG_DIRTY) #define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED) #define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED) diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index 2ec6b441849c..e4e5f05b841b 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c @@ -79,9 +79,11 @@ xfs_qm_dquot_logitem_format( logvec->i_addr = (xfs_caddr_t)&logitem->qli_format; logvec->i_len = sizeof(xfs_dq_logformat_t); + XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT); logvec++; logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core; logvec->i_len = sizeof(xfs_disk_dquot_t); + XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT); ASSERT(2 == logitem->qli_item.li_desc->lid_size); logitem->qli_format.qlf_size = 2; diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 7c0e39dc6189..1fb757ef3f41 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -1704,9 +1704,9 @@ xfs_qm_get_rtblks( xfs_qcnt_t *O_rtblks) { xfs_filblks_t rtblks; /* total rt blks */ + xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t nextents; /* number of extent entries */ - xfs_bmbt_rec_t *base; /* base of extent array */ xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ int error; @@ -1717,10 +1717,11 @@ xfs_qm_get_rtblks( return error; } rtblks = 0; - nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); - base = &ifp->if_u1.if_extents[0]; - for (ep = base; ep < &base[nextents]; ep++) + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + for (idx = 0; idx < nextents; idx++) { + ep = xfs_iext_get_ext(ifp, idx); rtblks += xfs_bmbt_get_blockcount(ep); + } *O_rtblks = (xfs_qcnt_t)rtblks; return 0; } @@ -2788,9 +2789,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql) xfs_qm_dqdestroy(dqp); dqp = nextdqp; } - /* - * Don't bother about unlocking. - */ + mutex_unlock(&ql->qh_lock); mutex_destroy(&ql->qh_lock); ASSERT(ql->qh_nelems == 0); diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index 90402a1c3983..6838b36d95a9 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c @@ -374,7 +374,7 @@ xfs_qm_exit(void) vfs_bhv_clr_custom(&xfs_qmops); xfs_qm_cleanup_procfs(); if (qm_dqzone) - kmem_cache_destroy(qm_dqzone); + kmem_zone_destroy(qm_dqzone); if (qm_dqtrxzone) - kmem_cache_destroy(qm_dqtrxzone); + kmem_zone_destroy(qm_dqtrxzone); } diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c index 841aa4c15b8a..addf5a7ea06c 100644 --- a/fs/xfs/support/ktrace.c +++ b/fs/xfs/support/ktrace.c @@ -39,8 +39,8 @@ ktrace_init(int zentries) void ktrace_uninit(void) { - kmem_cache_destroy(ktrace_hdr_zone); - kmem_cache_destroy(ktrace_ent_zone); + kmem_zone_destroy(ktrace_hdr_zone); + kmem_zone_destroy(ktrace_ent_zone); } /* diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c index a3d565a67734..e157015c70ff 100644 --- a/fs/xfs/support/uuid.c +++ b/fs/xfs/support/uuid.c @@ -21,13 +21,6 @@ static mutex_t uuid_monitor; static int uuid_table_size; static uuid_t *uuid_table; -void -uuid_init(void) -{ - mutex_init(&uuid_monitor); -} - - /* IRIX interpretation of an uuid_t */ typedef struct { __be32 uu_timelow; @@ -50,7 +43,7 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) | be16_to_cpu(uup->uu_timemid); - fsid[1] = be16_to_cpu(uup->uu_timelow); + fsid[1] = be32_to_cpu(uup->uu_timelow); } void @@ -139,3 +132,9 @@ uuid_table_remove(uuid_t *uuid) ASSERT(i < uuid_table_size); mutex_unlock(&uuid_monitor); } + +void +uuid_init(void) +{ + mutex_init(&uuid_monitor); +} diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h index f9315bc960cb..538d0d65b04c 100644 --- a/fs/xfs/xfs_acl.h +++ b/fs/xfs/xfs_acl.h @@ -55,8 +55,8 @@ struct xfs_inode; extern struct kmem_zone *xfs_acl_zone; #define xfs_acl_zone_init(zone, name) \ - (zone) = kmem_zone_init(sizeof(xfs_acl_t), name) -#define xfs_acl_zone_destroy(zone) kmem_cache_destroy(zone) + (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name)) +#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone) extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *); extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index e5e91e9c7e89..093fac476bda 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -1127,8 +1127,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context) return(error); ASSERT(bp != NULL); leaf = bp->data; - if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - != XFS_ATTR_LEAF_MAGIC)) { + if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, leaf); xfs_da_brelse(NULL, bp); @@ -1541,8 +1540,8 @@ xfs_attr_node_removename(xfs_da_args_t *args) XFS_ATTR_FORK); if (error) goto out; - ASSERT(INT_GET(((xfs_attr_leafblock_t *) - bp->data)->hdr.info.magic, ARCH_CONVERT) + ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *) + bp->data)->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { @@ -1763,7 +1762,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(error); if (bp) { node = bp->data; - switch (INT_GET(node->hdr.info.magic, ARCH_CONVERT)) { + switch (be16_to_cpu(node->hdr.info.magic)) { case XFS_DA_NODE_MAGIC: xfs_attr_trace_l_cn("wrong blk", context, node); xfs_da_brelse(NULL, bp); @@ -1771,18 +1770,14 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) break; case XFS_ATTR_LEAF_MAGIC: leaf = bp->data; - if (cursor->hashval > - INT_GET(leaf->entries[ - INT_GET(leaf->hdr.count, - ARCH_CONVERT)-1].hashval, - ARCH_CONVERT)) { + if (cursor->hashval > be32_to_cpu(leaf->entries[ + be16_to_cpu(leaf->hdr.count)-1].hashval)) { xfs_attr_trace_l_cl("wrong blk", context, leaf); xfs_da_brelse(NULL, bp); bp = NULL; } else if (cursor->hashval <= - INT_GET(leaf->entries[0].hashval, - ARCH_CONVERT)) { + be32_to_cpu(leaf->entries[0].hashval)) { xfs_attr_trace_l_cl("maybe wrong blk", context, leaf); xfs_da_brelse(NULL, bp); @@ -1817,10 +1812,10 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(XFS_ERROR(EFSCORRUPTED)); } node = bp->data; - if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) + if (be16_to_cpu(node->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) break; - if (unlikely(INT_GET(node->hdr.info.magic, ARCH_CONVERT) + if (unlikely(be16_to_cpu(node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", XFS_ERRLEVEL_LOW, @@ -1830,19 +1825,17 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(XFS_ERROR(EFSCORRUPTED)); } btree = node->btree; - for (i = 0; - i < INT_GET(node->hdr.count, ARCH_CONVERT); + for (i = 0; i < be16_to_cpu(node->hdr.count); btree++, i++) { if (cursor->hashval - <= INT_GET(btree->hashval, - ARCH_CONVERT)) { - cursor->blkno = INT_GET(btree->before, ARCH_CONVERT); + <= be32_to_cpu(btree->hashval)) { + cursor->blkno = be32_to_cpu(btree->before); xfs_attr_trace_l_cb("descending", context, btree); break; } } - if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { + if (i == be16_to_cpu(node->hdr.count)) { xfs_da_brelse(NULL, bp); return(0); } @@ -1858,7 +1851,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) */ for (;;) { leaf = bp->data; - if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)", XFS_ERRLEVEL_LOW, @@ -1869,7 +1862,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) error = xfs_attr_leaf_list_int(bp, context); if (error || !leaf->hdr.info.forw) break; /* not really an error, buffer full or EOF */ - cursor->blkno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT); + cursor->blkno = be32_to_cpu(leaf->hdr.info.forw); xfs_da_brelse(NULL, bp); error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, &bp, XFS_ATTR_FORK); @@ -2232,9 +2225,10 @@ xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, : 0, (__psunsigned_t)context->dupcnt, (__psunsigned_t)context->flags, - (__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT), - (__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT), - (__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); + (__psunsigned_t)be16_to_cpu(node->hdr.count), + (__psunsigned_t)be32_to_cpu(node->btree[0].hashval), + (__psunsigned_t)be32_to_cpu(node->btree[ + be16_to_cpu(node->hdr.count)-1].hashval)); } /* @@ -2261,8 +2255,8 @@ xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, : 0, (__psunsigned_t)context->dupcnt, (__psunsigned_t)context->flags, - (__psunsigned_t)INT_GET(btree->hashval, ARCH_CONVERT), - (__psunsigned_t)INT_GET(btree->before, ARCH_CONVERT), + (__psunsigned_t)be32_to_cpu(btree->hashval), + (__psunsigned_t)be32_to_cpu(btree->before), (__psunsigned_t)NULL); } @@ -2290,9 +2284,10 @@ xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, : 0, (__psunsigned_t)context->dupcnt, (__psunsigned_t)context->flags, - (__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT), - (__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT), - (__psunsigned_t)INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); + (__psunsigned_t)be16_to_cpu(leaf->hdr.count), + (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval), + (__psunsigned_t)be32_to_cpu(leaf->entries[ + be16_to_cpu(leaf->hdr.count)-1].hashval)); } /* @@ -2522,7 +2517,7 @@ attr_user_capable( struct vnode *vp, cred_t *cred) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; @@ -2540,7 +2535,7 @@ attr_trusted_capable( struct vnode *vp, cred_t *cred) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index fe91eac4e2a7..717682747bd2 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -194,7 +194,7 @@ xfs_attr_shortform_create(xfs_da_args_t *args) xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; hdr->count = 0; - INT_SET(hdr->totsize, ARCH_CONVERT, sizeof(*hdr)); + hdr->totsize = cpu_to_be16(sizeof(*hdr)); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } @@ -224,8 +224,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) ASSERT(ifp->if_flags & XFS_IFINLINE); sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); - sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { + for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { #ifdef DEBUG if (sfe->namelen != args->namelen) continue; @@ -248,13 +247,13 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset); sfe->namelen = args->namelen; - INT_SET(sfe->valuelen, ARCH_CONVERT, args->valuelen); + sfe->valuelen = args->valuelen; sfe->flags = (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0); memcpy(sfe->nameval, args->name, args->namelen); memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); - INT_MOD(sf->hdr.count, ARCH_CONVERT, 1); - INT_MOD(sf->hdr.totsize, ARCH_CONVERT, size); + sf->hdr.count++; + be16_add(&sf->hdr.totsize, size); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); xfs_sbversion_add_attr2(mp, args->trans); @@ -277,7 +276,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) base = sizeof(xfs_attr_sf_hdr_t); sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; sfe = &sf->list[0]; - end = INT_GET(sf->hdr.count, ARCH_CONVERT); + end = sf->hdr.count; for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), base += size, i++) { size = XFS_ATTR_SF_ENTSIZE(sfe); @@ -300,11 +299,11 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) * Fix up the attribute fork data, covering the hole */ end = base + size; - totsize = INT_GET(sf->hdr.totsize, ARCH_CONVERT); + totsize = be16_to_cpu(sf->hdr.totsize); if (end != totsize) memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end); - INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); - INT_MOD(sf->hdr.totsize, ARCH_CONVERT, -size); + sf->hdr.count--; + be16_add(&sf->hdr.totsize, -size); /* * Fix up the start offset of the attribute fork @@ -360,7 +359,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args) ASSERT(ifp->if_flags & XFS_IFINLINE); sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { if (sfe->namelen != args->namelen) continue; @@ -391,7 +390,7 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args) ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE); sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { if (sfe->namelen != args->namelen) continue; @@ -404,14 +403,14 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args) ((sfe->flags & XFS_ATTR_ROOT) != 0)) continue; if (args->flags & ATTR_KERNOVAL) { - args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + args->valuelen = sfe->valuelen; return(XFS_ERROR(EEXIST)); } - if (args->valuelen < INT_GET(sfe->valuelen, ARCH_CONVERT)) { - args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + if (args->valuelen < sfe->valuelen) { + args->valuelen = sfe->valuelen; return(XFS_ERROR(ERANGE)); } - args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + args->valuelen = sfe->valuelen; memcpy(args->value, &sfe->nameval[args->namelen], args->valuelen); return(XFS_ERROR(EEXIST)); @@ -438,7 +437,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) dp = args->dp; ifp = dp->i_afp; sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; - size = INT_GET(sf->hdr.totsize, ARCH_CONVERT); + size = be16_to_cpu(sf->hdr.totsize); tmpbuffer = kmem_alloc(size, KM_SLEEP); ASSERT(tmpbuffer != NULL); memcpy(tmpbuffer, ifp->if_u1.if_data, size); @@ -481,11 +480,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) nargs.oknoent = 1; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + for (i = 0; i < sf->hdr.count; i++) { nargs.name = (char *)sfe->nameval; nargs.namelen = sfe->namelen; nargs.value = (char *)&sfe->nameval[nargs.namelen]; - nargs.valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + nargs.valuelen = sfe->valuelen; nargs.hashval = xfs_da_hashname((char *)sfe->nameval, sfe->namelen); nargs.flags = (sfe->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : @@ -514,11 +513,9 @@ xfs_attr_shortform_compare(const void *a, const void *b) sa = (xfs_attr_sf_sort_t *)a; sb = (xfs_attr_sf_sort_t *)b; - if (INT_GET(sa->hash, ARCH_CONVERT) - < INT_GET(sb->hash, ARCH_CONVERT)) { + if (sa->hash < sb->hash) { return(-1); - } else if (INT_GET(sa->hash, ARCH_CONVERT) - > INT_GET(sb->hash, ARCH_CONVERT)) { + } else if (sa->hash > sb->hash) { return(1); } else { return(sa->entno - sb->entno); @@ -560,10 +557,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) * If the buffer is large enough, do not bother with sorting. * Note the generous fudge factor of 16 overhead bytes per entry. */ - if ((dp->i_afp->if_bytes + INT_GET(sf->hdr.count, ARCH_CONVERT) * 16) - < context->bufsize) { - for (i = 0, sfe = &sf->list[0]; - i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + if ((dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize) { + for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { attrnames_t *namesp; if (((context->flags & ATTR_SECURE) != 0) != @@ -584,14 +579,13 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) if (context->flags & ATTR_KERNOVAL) { ASSERT(context->flags & ATTR_KERNAMELS); context->count += namesp->attr_namelen + - INT_GET(sfe->namelen, ARCH_CONVERT) + 1; + sfe->namelen + 1; } else { if (xfs_attr_put_listent(context, namesp, (char *)sfe->nameval, (int)sfe->namelen, - (int)INT_GET(sfe->valuelen, - ARCH_CONVERT))) + (int)sfe->valuelen)) break; } sfe = XFS_ATTR_SF_NEXTENTRY(sfe); @@ -603,7 +597,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) /* * It didn't all fit, so we have to sort everything on hashval. */ - sbsize = INT_GET(sf->hdr.count, ARCH_CONVERT) * sizeof(*sbuf); + sbsize = sf->hdr.count * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); /* @@ -611,8 +605,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) * the relevant info from only those that match into a buffer. */ nsbuf = 0; - for (i = 0, sfe = &sf->list[0]; - i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { if (unlikely( ((char *)sfe < (char *)sf) || ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { @@ -636,8 +629,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) continue; } sbp->entno = i; - INT_SET(sbp->hash, ARCH_CONVERT, - xfs_da_hashname((char *)sfe->nameval, sfe->namelen)); + sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen); sbp->name = (char *)sfe->nameval; sbp->namelen = sfe->namelen; /* These are bytes, and both on-disk, don't endian-flip */ @@ -660,12 +652,12 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) cursor->initted = 1; cursor->blkno = 0; for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { - if (INT_GET(sbp->hash, ARCH_CONVERT) == cursor->hashval) { + if (sbp->hash == cursor->hashval) { if (cursor->offset == count) { break; } count++; - } else if (INT_GET(sbp->hash, ARCH_CONVERT) > cursor->hashval) { + } else if (sbp->hash > cursor->hashval) { break; } } @@ -685,8 +677,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) ((sbp->flags & XFS_ATTR_ROOT) ? &attr_trusted : &attr_user); - if (cursor->hashval != INT_GET(sbp->hash, ARCH_CONVERT)) { - cursor->hashval = INT_GET(sbp->hash, ARCH_CONVERT); + if (cursor->hashval != sbp->hash) { + cursor->hashval = sbp->hash; cursor->offset = 0; } if (context->flags & ATTR_KERNOVAL) { @@ -696,7 +688,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) } else { if (xfs_attr_put_listent(context, namesp, sbp->name, sbp->namelen, - INT_GET(sbp->valuelen, ARCH_CONVERT))) + sbp->valuelen)) break; } cursor->offset++; @@ -720,12 +712,11 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) int bytes, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); entry = &leaf->entries[0]; bytes = sizeof(struct xfs_attr_sf_hdr); - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* don't copy partial entries */ if (!(entry->flags & XFS_ATTR_LOCAL)) @@ -733,11 +724,11 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) return(0); - if (INT_GET(name_loc->valuelen, ARCH_CONVERT) >= XFS_ATTR_SF_ENTSIZE_MAX) + if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) return(0); bytes += sizeof(struct xfs_attr_sf_entry)-1 + name_loc->namelen - + INT_GET(name_loc->valuelen, ARCH_CONVERT); + + be16_to_cpu(name_loc->valuelen); } if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && (bytes == sizeof(struct xfs_attr_sf_hdr))) @@ -766,8 +757,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) ASSERT(bp != NULL); memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); leaf = (xfs_attr_leafblock_t *)tmpbuffer; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); /* @@ -810,7 +800,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) nargs.trans = args->trans; nargs.oknoent = 1; entry = &leaf->entries[0]; - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* don't copy partial entries */ if (!entry->nameidx) @@ -820,8 +810,8 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) nargs.name = (char *)name_loc->nameval; nargs.namelen = name_loc->namelen; nargs.value = (char *)&name_loc->nameval[nargs.namelen]; - nargs.valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); - nargs.hashval = INT_GET(entry->hashval, ARCH_CONVERT); + nargs.valuelen = be16_to_cpu(name_loc->valuelen); + nargs.hashval = be32_to_cpu(entry->hashval); nargs.flags = (entry->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : ((entry->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0); xfs_attr_shortform_add(&nargs, forkoff); @@ -875,13 +865,12 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args) goto out; node = bp1->data; leaf = bp2->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); /* both on-disk, don't endian-flip twice */ node->btree[0].hashval = - leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval; - INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); - INT_SET(node->hdr.count, ARCH_CONVERT, 1); + leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval; + node->btree[0].before = cpu_to_be32(blkno); + node->hdr.count = cpu_to_be16(1); xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1); error = 0; out: @@ -920,19 +909,16 @@ xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) leaf = bp->data; memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); hdr = &leaf->hdr; - INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_ATTR_LEAF_MAGIC); - INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); + hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC); + hdr->firstused = cpu_to_be16(XFS_LBSIZE(dp->i_mount)); if (!hdr->firstused) { - INT_SET(hdr->firstused, ARCH_CONVERT, + hdr->firstused = cpu_to_be16( XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN); } - INT_SET(hdr->freemap[0].base, ARCH_CONVERT, - sizeof(xfs_attr_leaf_hdr_t)); - INT_SET(hdr->freemap[0].size, ARCH_CONVERT, - INT_GET(hdr->firstused, ARCH_CONVERT) - - INT_GET(hdr->freemap[0].base, - ARCH_CONVERT)); + hdr->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); + hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) - + sizeof(xfs_attr_leaf_hdr_t)); xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); @@ -1004,10 +990,9 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) int tablesize, entsize, sum, tmp, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); ASSERT((args->index >= 0) - && (args->index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); + && (args->index <= be16_to_cpu(leaf->hdr.count))); hdr = &leaf->hdr; entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen, args->trans->t_mountp->m_sb.sb_blocksize, NULL); @@ -1016,26 +1001,25 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) * Search through freemap for first-fit on new name length. * (may need to figure in size of entry struct too) */ - tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) + tablesize = (be16_to_cpu(hdr->count) + 1) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1]; for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) { - if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) { - sum += INT_GET(map->size, ARCH_CONVERT); + if (tablesize > be16_to_cpu(hdr->firstused)) { + sum += be16_to_cpu(map->size); continue; } if (!map->size) continue; /* no space in this map */ tmp = entsize; - if (INT_GET(map->base, ARCH_CONVERT) - < INT_GET(hdr->firstused, ARCH_CONVERT)) + if (be16_to_cpu(map->base) < be16_to_cpu(hdr->firstused)) tmp += sizeof(xfs_attr_leaf_entry_t); - if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { + if (be16_to_cpu(map->size) >= tmp) { tmp = xfs_attr_leaf_add_work(bp, args, i); return(tmp); } - sum += INT_GET(map->size, ARCH_CONVERT); + sum += be16_to_cpu(map->size); } /* @@ -1056,7 +1040,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) * After compaction, the block is guaranteed to have only one * free region, in freemap[0]. If it is not big enough, give up. */ - if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) + if (be16_to_cpu(hdr->freemap[0].size) < (entsize + sizeof(xfs_attr_leaf_entry_t))) return(XFS_ERROR(ENOSPC)); @@ -1079,45 +1063,42 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) int tmp, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); hdr = &leaf->hdr; ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE)); - ASSERT((args->index >= 0) - && (args->index <= INT_GET(hdr->count, ARCH_CONVERT))); + ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count))); /* * Force open some space in the entry array and fill it in. */ entry = &leaf->entries[args->index]; - if (args->index < INT_GET(hdr->count, ARCH_CONVERT)) { - tmp = INT_GET(hdr->count, ARCH_CONVERT) - args->index; + if (args->index < be16_to_cpu(hdr->count)) { + tmp = be16_to_cpu(hdr->count) - args->index; tmp *= sizeof(xfs_attr_leaf_entry_t); memmove((char *)(entry+1), (char *)entry, tmp); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); } - INT_MOD(hdr->count, ARCH_CONVERT, 1); + be16_add(&hdr->count, 1); /* * Allocate space for the new string (at the end of the run). */ map = &hdr->freemap[mapindex]; mp = args->trans->t_mountp; - ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); - ASSERT((INT_GET(map->base, ARCH_CONVERT) & 0x3) == 0); - ASSERT(INT_GET(map->size, ARCH_CONVERT) >= + ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp)); + ASSERT((be16_to_cpu(map->base) & 0x3) == 0); + ASSERT(be16_to_cpu(map->size) >= xfs_attr_leaf_newentsize(args->namelen, args->valuelen, mp->m_sb.sb_blocksize, NULL)); - ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); - ASSERT((INT_GET(map->size, ARCH_CONVERT) & 0x3) == 0); - INT_MOD(map->size, ARCH_CONVERT, + ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); + ASSERT((be16_to_cpu(map->size) & 0x3) == 0); + be16_add(&map->size, -xfs_attr_leaf_newentsize(args->namelen, args->valuelen, mp->m_sb.sb_blocksize, &tmp)); - INT_SET(entry->nameidx, ARCH_CONVERT, - INT_GET(map->base, ARCH_CONVERT) - + INT_GET(map->size, ARCH_CONVERT)); - INT_SET(entry->hashval, ARCH_CONVERT, args->hashval); + entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) + + be16_to_cpu(map->size)); + entry->hashval = cpu_to_be32(args->hashval); entry->flags = tmp ? XFS_ATTR_LOCAL : 0; entry->flags |= (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0); @@ -1130,12 +1111,10 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) } xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); - ASSERT((args->index == 0) || (INT_GET(entry->hashval, ARCH_CONVERT) - >= INT_GET((entry-1)->hashval, - ARCH_CONVERT))); - ASSERT((args->index == INT_GET(hdr->count, ARCH_CONVERT)-1) || - (INT_GET(entry->hashval, ARCH_CONVERT) - <= (INT_GET((entry+1)->hashval, ARCH_CONVERT)))); + ASSERT((args->index == 0) || + (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval))); + ASSERT((args->index == be16_to_cpu(hdr->count)-1) || + (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval))); /* * Copy the attribute name and value into the new space. @@ -1149,10 +1128,10 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) if (entry->flags & XFS_ATTR_LOCAL) { name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); name_loc->namelen = args->namelen; - INT_SET(name_loc->valuelen, ARCH_CONVERT, args->valuelen); + name_loc->valuelen = cpu_to_be16(args->valuelen); memcpy((char *)name_loc->nameval, args->name, args->namelen); memcpy((char *)&name_loc->nameval[args->namelen], args->value, - INT_GET(name_loc->valuelen, ARCH_CONVERT)); + be16_to_cpu(name_loc->valuelen)); } else { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); name_rmt->namelen = args->namelen; @@ -1171,28 +1150,23 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) /* * Update the control info for this leaf node */ - if (INT_GET(entry->nameidx, ARCH_CONVERT) - < INT_GET(hdr->firstused, ARCH_CONVERT)) { + if (be16_to_cpu(entry->nameidx) < be16_to_cpu(hdr->firstused)) { /* both on-disk, don't endian-flip twice */ hdr->firstused = entry->nameidx; } - ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) - >= ((INT_GET(hdr->count, ARCH_CONVERT) - * sizeof(*entry))+sizeof(*hdr))); - tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1) - * sizeof(xfs_attr_leaf_entry_t) + ASSERT(be16_to_cpu(hdr->firstused) >= + ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr))); + tmp = (be16_to_cpu(hdr->count)-1) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); map = &hdr->freemap[0]; for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { - if (INT_GET(map->base, ARCH_CONVERT) == tmp) { - INT_MOD(map->base, ARCH_CONVERT, - sizeof(xfs_attr_leaf_entry_t)); - INT_MOD(map->size, ARCH_CONVERT, - -sizeof(xfs_attr_leaf_entry_t)); + if (be16_to_cpu(map->base) == tmp) { + be16_add(&map->base, sizeof(xfs_attr_leaf_entry_t)); + be16_add(&map->size, + -((int)sizeof(xfs_attr_leaf_entry_t))); } } - INT_MOD(hdr->usedbytes, ARCH_CONVERT, - xfs_attr_leaf_entsize(leaf, args->index)); + be16_add(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index)); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); return(0); @@ -1223,28 +1197,25 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp) hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; hdr_d->info = hdr_s->info; /* struct copy */ - INT_SET(hdr_d->firstused, ARCH_CONVERT, XFS_LBSIZE(mp)); + hdr_d->firstused = cpu_to_be16(XFS_LBSIZE(mp)); /* handle truncation gracefully */ if (!hdr_d->firstused) { - INT_SET(hdr_d->firstused, ARCH_CONVERT, + hdr_d->firstused = cpu_to_be16( XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN); } hdr_d->usedbytes = 0; hdr_d->count = 0; hdr_d->holes = 0; - INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, - sizeof(xfs_attr_leaf_hdr_t)); - INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, - INT_GET(hdr_d->firstused, ARCH_CONVERT) - - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); + hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) - + sizeof(xfs_attr_leaf_hdr_t)); /* * Copy all entry's in the same (sorted) order, * but allocate name/value pairs packed and in sequence. */ xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0, - (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp); - + be16_to_cpu(hdr_s->count), mp); xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1); kmem_free(tmpbuffer, XFS_LBSIZE(mp)); @@ -1279,10 +1250,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); args = state->args; /* @@ -1319,22 +1288,21 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Move any entries required from leaf to leaf: */ - if (count < INT_GET(hdr1->count, ARCH_CONVERT)) { + if (count < be16_to_cpu(hdr1->count)) { /* * Figure the total bytes to be added to the destination leaf. */ /* number entries being moved */ - count = INT_GET(hdr1->count, ARCH_CONVERT) - count; - space = INT_GET(hdr1->usedbytes, ARCH_CONVERT) - totallen; + count = be16_to_cpu(hdr1->count) - count; + space = be16_to_cpu(hdr1->usedbytes) - totallen; space += count * sizeof(xfs_attr_leaf_entry_t); /* * leaf2 is the destination, compact it if it looks tight. */ - max = INT_GET(hdr2->firstused, ARCH_CONVERT) + max = be16_to_cpu(hdr2->firstused) - sizeof(xfs_attr_leaf_hdr_t); - max -= INT_GET(hdr2->count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t); + max -= be16_to_cpu(hdr2->count) * sizeof(xfs_attr_leaf_entry_t); if (space > max) { xfs_attr_leaf_compact(args->trans, blk2->bp); } @@ -1342,13 +1310,12 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Move high entries from leaf1 to low end of leaf2. */ - xfs_attr_leaf_moveents(leaf1, - INT_GET(hdr1->count, ARCH_CONVERT)-count, + xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count, leaf2, 0, count, state->mp); xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); - } else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) { + } else if (count > be16_to_cpu(hdr1->count)) { /* * I assert that since all callers pass in an empty * second buffer, this code should never execute. @@ -1358,17 +1325,16 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Figure the total bytes to be added to the destination leaf. */ /* number entries being moved */ - count -= INT_GET(hdr1->count, ARCH_CONVERT); - space = totallen - INT_GET(hdr1->usedbytes, ARCH_CONVERT); + count -= be16_to_cpu(hdr1->count); + space = totallen - be16_to_cpu(hdr1->usedbytes); space += count * sizeof(xfs_attr_leaf_entry_t); /* * leaf1 is the destination, compact it if it looks tight. */ - max = INT_GET(hdr1->firstused, ARCH_CONVERT) + max = be16_to_cpu(hdr1->firstused) - sizeof(xfs_attr_leaf_hdr_t); - max -= INT_GET(hdr1->count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t); + max -= be16_to_cpu(hdr1->count) * sizeof(xfs_attr_leaf_entry_t); if (space > max) { xfs_attr_leaf_compact(args->trans, blk1->bp); } @@ -1377,8 +1343,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Move low entries from leaf2 to high end of leaf1. */ xfs_attr_leaf_moveents(leaf2, 0, leaf1, - (int)INT_GET(hdr1->count, ARCH_CONVERT), count, - state->mp); + be16_to_cpu(hdr1->count), count, state->mp); xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); @@ -1387,12 +1352,10 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Copy out last hashval in each block for B-tree code. */ - blk1->hashval = - INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT); - blk2->hashval = - INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT); + blk1->hashval = be32_to_cpu( + leaf1->entries[be16_to_cpu(leaf1->hdr.count)-1].hashval); + blk2->hashval = be32_to_cpu( + leaf2->entries[be16_to_cpu(leaf2->hdr.count)-1].hashval); /* * Adjust the expected index for insertion. @@ -1406,13 +1369,12 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * inserting. The index/blkno fields refer to the "old" entry, * while the index2/blkno2 fields refer to the "new" entry. */ - if (blk1->index > INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { + if (blk1->index > be16_to_cpu(leaf1->hdr.count)) { ASSERT(state->inleaf == 0); - blk2->index = blk1->index - - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count); args->index = args->index2 = blk2->index; args->blkno = args->blkno2 = blk2->blkno; - } else if (blk1->index == INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { + } else if (blk1->index == be16_to_cpu(leaf1->hdr.count)) { if (state->inleaf) { args->index = blk1->index; args->blkno = blk1->blkno; @@ -1420,7 +1382,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, args->blkno2 = blk2->blkno; } else { blk2->index = blk1->index - - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + - be16_to_cpu(leaf1->hdr.count); args->index = args->index2 = blk2->index; args->blkno = args->blkno2 = blk2->blkno; } @@ -1464,15 +1426,14 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state, * Examine entries until we reduce the absolute difference in * byte usage between the two blocks to a minimum. */ - max = INT_GET(hdr1->count, ARCH_CONVERT) - + INT_GET(hdr2->count, ARCH_CONVERT); + max = be16_to_cpu(hdr1->count) + be16_to_cpu(hdr2->count); half = (max+1) * sizeof(*entry); - half += INT_GET(hdr1->usedbytes, ARCH_CONVERT) - + INT_GET(hdr2->usedbytes, ARCH_CONVERT) - + xfs_attr_leaf_newentsize( - state->args->namelen, - state->args->valuelen, - state->blocksize, NULL); + half += be16_to_cpu(hdr1->usedbytes) + + be16_to_cpu(hdr2->usedbytes) + + xfs_attr_leaf_newentsize( + state->args->namelen, + state->args->valuelen, + state->blocksize, NULL); half /= 2; lastdelta = state->blocksize; entry = &leaf1->entries[0]; @@ -1498,7 +1459,7 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state, /* * Wrap around into the second block if necessary. */ - if (count == INT_GET(hdr1->count, ARCH_CONVERT)) { + if (count == be16_to_cpu(hdr1->count)) { leaf1 = leaf2; entry = &leaf1->entries[0]; index = 0; @@ -1566,12 +1527,12 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); leaf = (xfs_attr_leafblock_t *)info; - count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + count = be16_to_cpu(leaf->hdr.count); bytes = sizeof(xfs_attr_leaf_hdr_t) + count * sizeof(xfs_attr_leaf_entry_t) + - INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + be16_to_cpu(leaf->hdr.usedbytes); if (bytes > (state->blocksize >> 1)) { *action = 0; /* blk over 50%, don't try to join */ return(0); @@ -1588,7 +1549,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = info->forw; + forward = (info->forw != 0); memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &retval); @@ -1610,13 +1571,12 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) * to shrink an attribute list over time. */ /* start with smaller blk num */ - forward = (INT_GET(info->forw, ARCH_CONVERT) - < INT_GET(info->back, ARCH_CONVERT)); + forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); for (i = 0; i < 2; forward = !forward, i++) { if (forward) - blkno = INT_GET(info->forw, ARCH_CONVERT); + blkno = be32_to_cpu(info->forw); else - blkno = INT_GET(info->back, ARCH_CONVERT); + blkno = be32_to_cpu(info->back); if (blkno == 0) continue; error = xfs_da_read_buf(state->args->trans, state->args->dp, @@ -1626,14 +1586,13 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) ASSERT(bp != NULL); leaf = (xfs_attr_leafblock_t *)info; - count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + count = be16_to_cpu(leaf->hdr.count); bytes = state->blocksize - (state->blocksize>>2); - bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + bytes -= be16_to_cpu(leaf->hdr.usedbytes); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - count += INT_GET(leaf->hdr.count, ARCH_CONVERT); - bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + count += be16_to_cpu(leaf->hdr.count); + bytes -= be16_to_cpu(leaf->hdr.usedbytes); bytes -= count * sizeof(xfs_attr_leaf_entry_t); bytes -= sizeof(xfs_attr_leaf_hdr_t); xfs_da_brelse(state->args->trans, bp); @@ -1685,21 +1644,18 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_mount_t *mp; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); hdr = &leaf->hdr; mp = args->trans->t_mountp; - ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) - && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); + ASSERT((be16_to_cpu(hdr->count) > 0) + && (be16_to_cpu(hdr->count) < (XFS_LBSIZE(mp)/8))); ASSERT((args->index >= 0) - && (args->index < INT_GET(hdr->count, ARCH_CONVERT))); - ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) - >= ((INT_GET(hdr->count, ARCH_CONVERT) - * sizeof(*entry))+sizeof(*hdr))); + && (args->index < be16_to_cpu(hdr->count))); + ASSERT(be16_to_cpu(hdr->firstused) >= + ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr))); entry = &leaf->entries[args->index]; - ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) - >= INT_GET(hdr->firstused, ARCH_CONVERT)); - ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT(be16_to_cpu(entry->nameidx) >= be16_to_cpu(hdr->firstused)); + ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp)); /* * Scan through free region table: @@ -1707,33 +1663,30 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) * find smallest free region in case we need to replace it, * adjust any map that borders the entry table, */ - tablesize = INT_GET(hdr->count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t) + tablesize = be16_to_cpu(hdr->count) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); map = &hdr->freemap[0]; - tmp = INT_GET(map->size, ARCH_CONVERT); + tmp = be16_to_cpu(map->size); before = after = -1; smallest = XFS_ATTR_LEAF_MAPSIZE - 1; entsize = xfs_attr_leaf_entsize(leaf, args->index); for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { - ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); - ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); - if (INT_GET(map->base, ARCH_CONVERT) == tablesize) { - INT_MOD(map->base, ARCH_CONVERT, - -sizeof(xfs_attr_leaf_entry_t)); - INT_MOD(map->size, ARCH_CONVERT, - sizeof(xfs_attr_leaf_entry_t)); + ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp)); + ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); + if (be16_to_cpu(map->base) == tablesize) { + be16_add(&map->base, + -((int)sizeof(xfs_attr_leaf_entry_t))); + be16_add(&map->size, sizeof(xfs_attr_leaf_entry_t)); } - if ((INT_GET(map->base, ARCH_CONVERT) - + INT_GET(map->size, ARCH_CONVERT)) - == INT_GET(entry->nameidx, ARCH_CONVERT)) { + if ((be16_to_cpu(map->base) + be16_to_cpu(map->size)) + == be16_to_cpu(entry->nameidx)) { before = i; - } else if (INT_GET(map->base, ARCH_CONVERT) - == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) { + } else if (be16_to_cpu(map->base) + == (be16_to_cpu(entry->nameidx) + entsize)) { after = i; - } else if (INT_GET(map->size, ARCH_CONVERT) < tmp) { - tmp = INT_GET(map->size, ARCH_CONVERT); + } else if (be16_to_cpu(map->size) < tmp) { + tmp = be16_to_cpu(map->size); smallest = i; } } @@ -1745,38 +1698,35 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) if ((before >= 0) || (after >= 0)) { if ((before >= 0) && (after >= 0)) { map = &hdr->freemap[before]; - INT_MOD(map->size, ARCH_CONVERT, entsize); - INT_MOD(map->size, ARCH_CONVERT, - INT_GET(hdr->freemap[after].size, - ARCH_CONVERT)); + be16_add(&map->size, entsize); + be16_add(&map->size, + be16_to_cpu(hdr->freemap[after].size)); hdr->freemap[after].base = 0; hdr->freemap[after].size = 0; } else if (before >= 0) { map = &hdr->freemap[before]; - INT_MOD(map->size, ARCH_CONVERT, entsize); + be16_add(&map->size, entsize); } else { map = &hdr->freemap[after]; /* both on-disk, don't endian flip twice */ map->base = entry->nameidx; - INT_MOD(map->size, ARCH_CONVERT, entsize); + be16_add(&map->size, entsize); } } else { /* * Replace smallest region (if it is smaller than free'd entry) */ map = &hdr->freemap[smallest]; - if (INT_GET(map->size, ARCH_CONVERT) < entsize) { - INT_SET(map->base, ARCH_CONVERT, - INT_GET(entry->nameidx, ARCH_CONVERT)); - INT_SET(map->size, ARCH_CONVERT, entsize); + if (be16_to_cpu(map->size) < entsize) { + map->base = cpu_to_be16(be16_to_cpu(entry->nameidx)); + map->size = cpu_to_be16(entsize); } } /* * Did we remove the first entry? */ - if (INT_GET(entry->nameidx, ARCH_CONVERT) - == INT_GET(hdr->firstused, ARCH_CONVERT)) + if (be16_to_cpu(entry->nameidx) == be16_to_cpu(hdr->firstused)) smallest = 1; else smallest = 0; @@ -1785,18 +1735,18 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) * Compress the remaining entries and zero out the removed stuff. */ memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); - INT_MOD(hdr->usedbytes, ARCH_CONVERT, -entsize); + be16_add(&hdr->usedbytes, -entsize); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), entsize)); - tmp = (INT_GET(hdr->count, ARCH_CONVERT) - args->index) + tmp = (be16_to_cpu(hdr->count) - args->index) * sizeof(xfs_attr_leaf_entry_t); memmove((char *)entry, (char *)(entry+1), tmp); - INT_MOD(hdr->count, ARCH_CONVERT, -1); + be16_add(&hdr->count, -1); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); - entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)]; + entry = &leaf->entries[be16_to_cpu(hdr->count)]; memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t)); /* @@ -1808,18 +1758,17 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) if (smallest) { tmp = XFS_LBSIZE(mp); entry = &leaf->entries[0]; - for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; - i >= 0; entry++, i--) { - ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) - >= INT_GET(hdr->firstused, ARCH_CONVERT)); - ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) - < XFS_LBSIZE(mp)); - if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp) - tmp = INT_GET(entry->nameidx, ARCH_CONVERT); + for (i = be16_to_cpu(hdr->count)-1; i >= 0; entry++, i--) { + ASSERT(be16_to_cpu(entry->nameidx) >= + be16_to_cpu(hdr->firstused)); + ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp)); + + if (be16_to_cpu(entry->nameidx) < tmp) + tmp = be16_to_cpu(entry->nameidx); } - INT_SET(hdr->firstused, ARCH_CONVERT, tmp); + hdr->firstused = cpu_to_be16(tmp); if (!hdr->firstused) { - INT_SET(hdr->firstused, ARCH_CONVERT, + hdr->firstused = cpu_to_be16( tmp - XFS_ATTR_LEAF_NAME_ALIGN); } } else { @@ -1833,9 +1782,8 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) * "join" the leaf with a sibling if so. */ tmp = sizeof(xfs_attr_leaf_hdr_t); - tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t); - tmp += INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + tmp += be16_to_cpu(leaf->hdr.count) * sizeof(xfs_attr_leaf_entry_t); + tmp += be16_to_cpu(leaf->hdr.usedbytes); return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */ } @@ -1859,20 +1807,16 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); drop_hdr = &drop_leaf->hdr; save_hdr = &save_leaf->hdr; /* * Save last hashval from dying block for later Btree fixup. */ - drop_blk->hashval = - INT_GET(drop_leaf->entries[INT_GET(drop_leaf->hdr.count, - ARCH_CONVERT)-1].hashval, - ARCH_CONVERT); + drop_blk->hashval = be32_to_cpu( + drop_leaf->entries[be16_to_cpu(drop_leaf->hdr.count)-1].hashval); /* * Check if we need a temp buffer, or can we do it in place. @@ -1886,12 +1830,11 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, */ if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0, - (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + be16_to_cpu(drop_hdr->count), mp); } else { xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, - INT_GET(save_hdr->count, ARCH_CONVERT), - (int)INT_GET(drop_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(save_hdr->count), + be16_to_cpu(drop_hdr->count), mp); } } else { /* @@ -1905,28 +1848,24 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, tmp_hdr = &tmp_leaf->hdr; tmp_hdr->info = save_hdr->info; /* struct copy */ tmp_hdr->count = 0; - INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize); + tmp_hdr->firstused = cpu_to_be16(state->blocksize); if (!tmp_hdr->firstused) { - INT_SET(tmp_hdr->firstused, ARCH_CONVERT, + tmp_hdr->firstused = cpu_to_be16( state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN); } tmp_hdr->usedbytes = 0; if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0, - (int)INT_GET(drop_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(drop_hdr->count), mp); xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, - INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), - (int)INT_GET(save_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(tmp_leaf->hdr.count), + be16_to_cpu(save_hdr->count), mp); } else { xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0, - (int)INT_GET(save_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(save_hdr->count), mp); xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, - INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), - (int)INT_GET(drop_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(tmp_leaf->hdr.count), + be16_to_cpu(drop_hdr->count), mp); } memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize); kmem_free(tmpbuffer, state->blocksize); @@ -1938,10 +1877,8 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, /* * Copy out last hashval in each block for B-tree code. */ - save_blk->hashval = - INT_GET(save_leaf->entries[INT_GET(save_leaf->hdr.count, - ARCH_CONVERT)-1].hashval, - ARCH_CONVERT); + save_blk->hashval = be32_to_cpu( + save_leaf->entries[be16_to_cpu(save_leaf->hdr.count)-1].hashval); } /*======================================================================== @@ -1972,48 +1909,45 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_dahash_t hashval; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.count) < (XFS_LBSIZE(args->dp->i_mount)/8)); /* * Binary search. (note: small blocks will skip this loop) */ hashval = args->hashval; - probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2; + probe = span = be16_to_cpu(leaf->hdr.count) / 2; for (entry = &leaf->entries[probe]; span > 4; entry = &leaf->entries[probe]) { span /= 2; - if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval) + if (be32_to_cpu(entry->hashval) < hashval) probe += span; - else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval) + else if (be32_to_cpu(entry->hashval) > hashval) probe -= span; else break; } ASSERT((probe >= 0) && (!leaf->hdr.count - || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)))); - ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) - == hashval)); + || (probe < be16_to_cpu(leaf->hdr.count)))); + ASSERT((span <= 4) || (be32_to_cpu(entry->hashval) == hashval)); /* * Since we may have duplicate hashval's, find the first matching * hashval in the leaf. */ - while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) - >= hashval)) { + while ((probe > 0) && (be32_to_cpu(entry->hashval) >= hashval)) { entry--; probe--; } - while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) - && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) { + while ((probe < be16_to_cpu(leaf->hdr.count)) && + (be32_to_cpu(entry->hashval) < hashval)) { entry++; probe++; } - if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) - || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { + if ((probe == be16_to_cpu(leaf->hdr.count)) || + (be32_to_cpu(entry->hashval) != hashval)) { args->index = probe; return(XFS_ERROR(ENOATTR)); } @@ -2021,8 +1955,8 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) /* * Duplicate keys may be present, so search all of them for a match. */ - for ( ; (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) - && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval); + for ( ; (probe < be16_to_cpu(leaf->hdr.count)) && + (be32_to_cpu(entry->hashval) == hashval); entry++, probe++) { /* * GROT: Add code to remove incomplete entries. @@ -2064,11 +1998,9 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) ((entry->flags & XFS_ATTR_ROOT) != 0)) continue; args->index = probe; - args->rmtblkno - = INT_GET(name_rmt->valueblk, ARCH_CONVERT); + args->rmtblkno = be32_to_cpu(name_rmt->valueblk); args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, - INT_GET(name_rmt->valuelen, - ARCH_CONVERT)); + be32_to_cpu(name_rmt->valuelen)); return(XFS_ERROR(EEXIST)); } } @@ -2090,18 +2022,17 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_attr_leaf_name_remote_t *name_rmt; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.count) < (XFS_LBSIZE(args->dp->i_mount)/8)); - ASSERT(args->index < ((int)INT_GET(leaf->hdr.count, ARCH_CONVERT))); + ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); entry = &leaf->entries[args->index]; if (entry->flags & XFS_ATTR_LOCAL) { name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); ASSERT(name_loc->namelen == args->namelen); ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0); - valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); + valuelen = be16_to_cpu(name_loc->valuelen); if (args->flags & ATTR_KERNOVAL) { args->valuelen = valuelen; return(0); @@ -2116,8 +2047,8 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); ASSERT(name_rmt->namelen == args->namelen); ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); - valuelen = INT_GET(name_rmt->valuelen, ARCH_CONVERT); - args->rmtblkno = INT_GET(name_rmt->valueblk, ARCH_CONVERT); + valuelen = be32_to_cpu(name_rmt->valuelen); + args->rmtblkno = be32_to_cpu(name_rmt->valueblk); args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen); if (args->flags & ATTR_KERNOVAL) { args->valuelen = valuelen; @@ -2159,32 +2090,29 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Set up environment. */ - ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; - ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) - && (INT_GET(hdr_s->count, ARCH_CONVERT) - < (XFS_LBSIZE(mp)/8))); - ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >= - ((INT_GET(hdr_s->count, ARCH_CONVERT) + ASSERT((be16_to_cpu(hdr_s->count) > 0) && + (be16_to_cpu(hdr_s->count) < (XFS_LBSIZE(mp)/8))); + ASSERT(be16_to_cpu(hdr_s->firstused) >= + ((be16_to_cpu(hdr_s->count) * sizeof(*entry_s))+sizeof(*hdr_s))); - ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)); - ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= - ((INT_GET(hdr_d->count, ARCH_CONVERT) + ASSERT(be16_to_cpu(hdr_d->count) < (XFS_LBSIZE(mp)/8)); + ASSERT(be16_to_cpu(hdr_d->firstused) >= + ((be16_to_cpu(hdr_d->count) * sizeof(*entry_d))+sizeof(*hdr_d))); - ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT)); - ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT)); - ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT)); + ASSERT(start_s < be16_to_cpu(hdr_s->count)); + ASSERT(start_d <= be16_to_cpu(hdr_d->count)); + ASSERT(count <= be16_to_cpu(hdr_s->count)); /* * Move the entries in the destination leaf up to make a hole? */ - if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) { - tmp = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d; + if (start_d < be16_to_cpu(hdr_d->count)) { + tmp = be16_to_cpu(hdr_d->count) - start_d; tmp *= sizeof(xfs_attr_leaf_entry_t); entry_s = &leaf_d->entries[start_d]; entry_d = &leaf_d->entries[start_d + count]; @@ -2199,8 +2127,8 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, entry_d = &leaf_d->entries[start_d]; desti = start_d; for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) { - ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) - >= INT_GET(hdr_s->firstused, ARCH_CONVERT)); + ASSERT(be16_to_cpu(entry_s->nameidx) + >= be16_to_cpu(hdr_s->firstused)); tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i); #ifdef GROT /* @@ -2210,35 +2138,35 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, */ if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); - INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); - INT_MOD(hdr_s->count, ARCH_CONVERT, -1); + be16_add(&hdr_s->usedbytes, -tmp); + be16_add(&hdr_s->count, -1); entry_d--; /* to compensate for ++ in loop hdr */ desti--; if ((start_s + i) < offset) result++; /* insertion index adjustment */ } else { #endif /* GROT */ - INT_MOD(hdr_d->firstused, ARCH_CONVERT, -tmp); + be16_add(&hdr_d->firstused, -tmp); /* both on-disk, don't endian flip twice */ entry_d->hashval = entry_s->hashval; /* both on-disk, don't endian flip twice */ entry_d->nameidx = hdr_d->firstused; entry_d->flags = entry_s->flags; - ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp + ASSERT(be16_to_cpu(entry_d->nameidx) + tmp <= XFS_LBSIZE(mp)); memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti), XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp); - ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp + ASSERT(be16_to_cpu(entry_s->nameidx) + tmp <= XFS_LBSIZE(mp)); memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); - INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); - INT_MOD(hdr_d->usedbytes, ARCH_CONVERT, tmp); - INT_MOD(hdr_s->count, ARCH_CONVERT, -1); - INT_MOD(hdr_d->count, ARCH_CONVERT, 1); - tmp = INT_GET(hdr_d->count, ARCH_CONVERT) + be16_add(&hdr_s->usedbytes, -tmp); + be16_add(&hdr_d->usedbytes, tmp); + be16_add(&hdr_s->count, -1); + be16_add(&hdr_d->count, 1); + tmp = be16_to_cpu(hdr_d->count) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); - ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp); + ASSERT(be16_to_cpu(hdr_d->firstused) >= tmp); #ifdef GROT } #endif /* GROT */ @@ -2247,7 +2175,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Zero out the entries we just copied. */ - if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) { + if (start_s == be16_to_cpu(hdr_s->count)) { tmp = count * sizeof(xfs_attr_leaf_entry_t); entry_s = &leaf_s->entries[start_s]; ASSERT(((char *)entry_s + tmp) <= @@ -2258,15 +2186,14 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, * Move the remaining entries down to fill the hole, * then zero the entries at the top. */ - tmp = INT_GET(hdr_s->count, ARCH_CONVERT) - count; + tmp = be16_to_cpu(hdr_s->count) - count; tmp *= sizeof(xfs_attr_leaf_entry_t); entry_s = &leaf_s->entries[start_s + count]; entry_d = &leaf_s->entries[start_s]; memmove((char *)entry_d, (char *)entry_s, tmp); tmp = count * sizeof(xfs_attr_leaf_entry_t); - entry_s = &leaf_s->entries[INT_GET(hdr_s->count, - ARCH_CONVERT)]; + entry_s = &leaf_s->entries[be16_to_cpu(hdr_s->count)]; ASSERT(((char *)entry_s + tmp) <= ((char *)leaf_s + XFS_LBSIZE(mp))); memset((char *)entry_s, 0, tmp); @@ -2275,14 +2202,11 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Fill in the freemap information */ - INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, - sizeof(xfs_attr_leaf_hdr_t)); - INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, - INT_GET(hdr_d->count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t)); - INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, - INT_GET(hdr_d->firstused, ARCH_CONVERT) - - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); + be16_add(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) * + sizeof(xfs_attr_leaf_entry_t)); + hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) + - be16_to_cpu(hdr_d->freemap[0].base)); hdr_d->freemap[1].base = 0; hdr_d->freemap[2].base = 0; hdr_d->freemap[1].size = 0; @@ -2301,18 +2225,16 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC) && - (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC)); - if ( (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) - && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) - && ( (INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < - INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) - || (INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT) < - INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT))) ) { + ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) && + (be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC)); + if ((be16_to_cpu(leaf1->hdr.count) > 0) && + (be16_to_cpu(leaf2->hdr.count) > 0) && + ((be32_to_cpu(leaf2->entries[0].hashval) < + be32_to_cpu(leaf1->entries[0].hashval)) || + (be32_to_cpu(leaf2->entries[ + be16_to_cpu(leaf2->hdr.count)-1].hashval) < + be32_to_cpu(leaf1->entries[ + be16_to_cpu(leaf1->hdr.count)-1].hashval)))) { return(1); } return(0); @@ -2327,14 +2249,12 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count) xfs_attr_leafblock_t *leaf; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); if (count) - *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + *count = be16_to_cpu(leaf->hdr.count); if (!leaf->hdr.count) return(0); - return(INT_GET(leaf->entries[INT_GET(leaf->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); + return be32_to_cpu(leaf->entries[be16_to_cpu(leaf->hdr.count)-1].hashval); } /* @@ -2348,13 +2268,11 @@ xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index) xfs_attr_leaf_name_remote_t *name_rmt; int size; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); if (leaf->entries[index].flags & XFS_ATTR_LOCAL) { name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index); size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen, - INT_GET(name_loc->valuelen, - ARCH_CONVERT)); + be16_to_cpu(name_loc->valuelen)); } else { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index); size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen); @@ -2412,22 +2330,20 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) */ if (context->resynch) { entry = &leaf->entries[0]; - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); - entry++, i++) { - if (INT_GET(entry->hashval, ARCH_CONVERT) - == cursor->hashval) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { + if (be32_to_cpu(entry->hashval) == cursor->hashval) { if (cursor->offset == context->dupcnt) { context->dupcnt = 0; break; } context->dupcnt++; - } else if (INT_GET(entry->hashval, ARCH_CONVERT) - > cursor->hashval) { + } else if (be32_to_cpu(entry->hashval) > + cursor->hashval) { context->dupcnt = 0; break; } } - if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { + if (i == be16_to_cpu(leaf->hdr.count)) { xfs_attr_trace_l_c("not found", context); return(0); } @@ -2441,12 +2357,12 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) * We have found our place, start copying out the new attributes. */ retval = 0; - for ( ; (i < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + for ( ; (i < be16_to_cpu(leaf->hdr.count)) && (retval == 0); entry++, i++) { attrnames_t *namesp; - if (INT_GET(entry->hashval, ARCH_CONVERT) != cursor->hashval) { - cursor->hashval = INT_GET(entry->hashval, ARCH_CONVERT); + if (be32_to_cpu(entry->hashval) != cursor->hashval) { + cursor->hashval = be32_to_cpu(entry->hashval); cursor->offset = 0; } @@ -2475,8 +2391,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) retval = xfs_attr_put_listent(context, namesp, (char *)name_loc->nameval, (int)name_loc->namelen, - (int)INT_GET(name_loc->valuelen, - ARCH_CONVERT)); + be16_to_cpu(name_loc->valuelen)); } } else { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); @@ -2488,8 +2403,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) retval = xfs_attr_put_listent(context, namesp, (char *)name_rmt->name, (int)name_rmt->namelen, - (int)INT_GET(name_rmt->valuelen, - ARCH_CONVERT)); + be32_to_cpu(name_rmt->valuelen)); } } if (retval == 0) { @@ -2596,9 +2510,8 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); @@ -2613,7 +2526,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) namelen = name_rmt->namelen; name = (char *)name_rmt->name; } - ASSERT(INT_GET(entry->hashval, ARCH_CONVERT) == args->hashval); + ASSERT(be32_to_cpu(entry->hashval) == args->hashval); ASSERT(namelen == args->namelen); ASSERT(memcmp(name, args->name, namelen) == 0); #endif /* DEBUG */ @@ -2625,8 +2538,8 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) if (args->rmtblkno) { ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); - INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); - INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); + name_rmt->valueblk = cpu_to_be32(args->rmtblkno); + name_rmt->valuelen = cpu_to_be32(args->valuelen); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); } @@ -2663,9 +2576,8 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; @@ -2736,16 +2648,14 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) } leaf1 = bp1->data; - ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index < INT_GET(leaf1->hdr.count, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < be16_to_cpu(leaf1->hdr.count)); ASSERT(args->index >= 0); entry1 = &leaf1->entries[ args->index ]; leaf2 = bp2->data; - ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index2 < INT_GET(leaf2->hdr.count, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count)); ASSERT(args->index2 >= 0); entry2 = &leaf2->entries[ args->index2 ]; @@ -2768,7 +2678,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) namelen2 = name_rmt->namelen; name2 = (char *)name_rmt->name; } - ASSERT(INT_GET(entry1->hashval, ARCH_CONVERT) == INT_GET(entry2->hashval, ARCH_CONVERT)); + ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval)); ASSERT(namelen1 == namelen2); ASSERT(memcmp(name1, name2, namelen1) == 0); #endif /* DEBUG */ @@ -2782,8 +2692,8 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) if (args->rmtblkno) { ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); - INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); - INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); + name_rmt->valueblk = cpu_to_be32(args->rmtblkno); + name_rmt->valuelen = cpu_to_be32(args->valuelen); xfs_da_log_buf(args->trans, bp1, XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); } @@ -2842,9 +2752,9 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp) * This is a depth-first traversal! */ info = bp->data; - if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { error = xfs_attr_node_inactive(trans, dp, bp, 1); - } else if (INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { error = xfs_attr_leaf_inactive(trans, dp, bp); } else { error = XFS_ERROR(EIO); @@ -2892,15 +2802,14 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, } node = bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) - == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); parent_blkno = xfs_da_blkno(bp); /* save for re-read later */ - count = INT_GET(node->hdr.count, ARCH_CONVERT); + count = be16_to_cpu(node->hdr.count); if (!count) { xfs_da_brelse(*trans, bp); return(0); } - child_fsb = INT_GET(node->btree[0].before, ARCH_CONVERT); + child_fsb = be32_to_cpu(node->btree[0].before); xfs_da_brelse(*trans, bp); /* no locks for later trans */ /* @@ -2927,12 +2836,10 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, * Invalidate the subtree, however we have to. */ info = child_bp->data; - if (INT_GET(info->magic, ARCH_CONVERT) - == XFS_DA_NODE_MAGIC) { + if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { error = xfs_attr_node_inactive(trans, dp, child_bp, level+1); - } else if (INT_GET(info->magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC) { + } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { error = xfs_attr_leaf_inactive(trans, dp, child_bp); } else { @@ -2962,7 +2869,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, &bp, XFS_ATTR_FORK); if (error) return(error); - child_fsb = INT_GET(node->btree[i+1].before, ARCH_CONVERT); + child_fsb = be32_to_cpu(node->btree[i+1].before); xfs_da_brelse(*trans, bp); } /* @@ -2991,17 +2898,16 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) int error, count, size, tmp, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); /* * Count the number of "remote" value extents. */ count = 0; entry = &leaf->entries[0]; - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { - if ( INT_GET(entry->nameidx, ARCH_CONVERT) - && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { + if (be16_to_cpu(entry->nameidx) && + ((entry->flags & XFS_ATTR_LOCAL) == 0)) { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); if (name_rmt->valueblk) count++; @@ -3027,17 +2933,14 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) */ lp = list; entry = &leaf->entries[0]; - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { - if ( INT_GET(entry->nameidx, ARCH_CONVERT) - && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { + if (be16_to_cpu(entry->nameidx) && + ((entry->flags & XFS_ATTR_LOCAL) == 0)) { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); if (name_rmt->valueblk) { - /* both on-disk, don't endian flip twice */ - lp->valueblk = name_rmt->valueblk; - INT_SET(lp->valuelen, ARCH_CONVERT, - XFS_B_TO_FSB(dp->i_mount, - INT_GET(name_rmt->valuelen, - ARCH_CONVERT))); + lp->valueblk = be32_to_cpu(name_rmt->valueblk); + lp->valuelen = XFS_B_TO_FSB(dp->i_mount, + be32_to_cpu(name_rmt->valuelen)); lp++; } } @@ -3050,10 +2953,8 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) error = 0; for (lp = list, i = 0; i < count; i++, lp++) { tmp = xfs_attr_leaf_freextent(trans, dp, - INT_GET(lp->valueblk, - ARCH_CONVERT), - INT_GET(lp->valuelen, - ARCH_CONVERT)); + lp->valueblk, lp->valuelen); + if (error == 0) error = tmp; /* save only the 1st errno */ } diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h index 541e34109bb9..51c3ee156b2f 100644 --- a/fs/xfs/xfs_attr_leaf.h +++ b/fs/xfs/xfs_attr_leaf.h @@ -73,39 +73,39 @@ struct xfs_trans; #define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */ typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */ - __uint16_t base; /* base of free region */ - __uint16_t size; /* length of free region */ + __be16 base; /* base of free region */ + __be16 size; /* length of free region */ } xfs_attr_leaf_map_t; typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */ xfs_da_blkinfo_t info; /* block type, links, etc. */ - __uint16_t count; /* count of active leaf_entry's */ - __uint16_t usedbytes; /* num bytes of names/values stored */ - __uint16_t firstused; /* first used byte in name area */ - __uint8_t holes; /* != 0 if blk needs compaction */ - __uint8_t pad1; + __be16 count; /* count of active leaf_entry's */ + __be16 usedbytes; /* num bytes of names/values stored */ + __be16 firstused; /* first used byte in name area */ + __u8 holes; /* != 0 if blk needs compaction */ + __u8 pad1; xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */ } xfs_attr_leaf_hdr_t; typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */ - xfs_dahash_t hashval; /* hash value of name */ - __uint16_t nameidx; /* index into buffer of name/value */ - __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */ - __uint8_t pad2; /* unused pad byte */ + __be32 hashval; /* hash value of name */ + __be16 nameidx; /* index into buffer of name/value */ + __u8 flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */ + __u8 pad2; /* unused pad byte */ } xfs_attr_leaf_entry_t; typedef struct xfs_attr_leaf_name_local { - __uint16_t valuelen; /* number of bytes in value */ - __uint8_t namelen; /* length of name bytes */ - __uint8_t nameval[1]; /* name/value bytes */ + __be16 valuelen; /* number of bytes in value */ + __u8 namelen; /* length of name bytes */ + __u8 nameval[1]; /* name/value bytes */ } xfs_attr_leaf_name_local_t; typedef struct xfs_attr_leaf_name_remote { - xfs_dablk_t valueblk; /* block number of value bytes */ - __uint32_t valuelen; /* number of bytes in value */ - __uint8_t namelen; /* length of name bytes */ - __uint8_t name[1]; /* name bytes */ + __be32 valueblk; /* block number of value bytes */ + __be32 valuelen; /* number of bytes in value */ + __u8 namelen; /* length of name bytes */ + __u8 name[1]; /* name bytes */ } xfs_attr_leaf_name_remote_t; typedef struct xfs_attr_leafblock { @@ -143,8 +143,8 @@ typedef struct xfs_attr_leafblock { static inline xfs_attr_leaf_name_remote_t * xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) { - return (xfs_attr_leaf_name_remote_t *) &((char *) - (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; + return (xfs_attr_leaf_name_remote_t *) + &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; } #define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \ @@ -152,16 +152,15 @@ xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) static inline xfs_attr_leaf_name_local_t * xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) { - return (xfs_attr_leaf_name_local_t *) &((char *) - (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; + return (xfs_attr_leaf_name_local_t *) + &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; } #define XFS_ATTR_LEAF_NAME(leafp,idx) \ xfs_attr_leaf_name(leafp,idx) static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) { - return (&((char *) - (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]); + return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; } /* diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h index ffed6ca81a52..f67f917803b1 100644 --- a/fs/xfs/xfs_attr_sf.h +++ b/fs/xfs/xfs_attr_sf.h @@ -32,8 +32,8 @@ struct xfs_inode; */ typedef struct xfs_attr_shortform { struct xfs_attr_sf_hdr { /* constant-structure header block */ - __uint16_t totsize; /* total bytes in shortform list */ - __uint8_t count; /* count of active entries */ + __be16 totsize; /* total bytes in shortform list */ + __u8 count; /* count of active entries */ } hdr; struct xfs_attr_sf_entry { __uint8_t namelen; /* actual length of name (no NULL) */ @@ -66,8 +66,8 @@ typedef struct xfs_attr_sf_sort { #define XFS_ATTR_SF_NEXTENTRY(sfep) /* next entry in struct */ \ ((xfs_attr_sf_entry_t *)((char *)(sfep) + XFS_ATTR_SF_ENTSIZE(sfep))) #define XFS_ATTR_SF_TOTSIZE(dp) /* total space in use */ \ - (INT_GET(((xfs_attr_shortform_t *) \ - ((dp)->i_afp->if_u1.if_data))->hdr.totsize, ARCH_CONVERT)) + (be16_to_cpu(((xfs_attr_shortform_t *) \ + ((dp)->i_afp->if_u1.if_data))->hdr.totsize)) #if defined(XFS_ATTR_TRACE) /* diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 70625e577c70..2d702e4a74a3 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -89,7 +89,7 @@ xfs_bmap_add_attrfork_local( int *flags); /* inode logging flags */ /* - * Called by xfs_bmapi to update extent list structure and the btree + * Called by xfs_bmapi to update file extent records and the btree * after allocating space (or doing a delayed allocation). */ STATIC int /* error */ @@ -97,7 +97,7 @@ xfs_bmap_add_extent( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp, /* inode logging flags */ @@ -113,7 +113,7 @@ xfs_bmap_add_extent_delay_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ @@ -129,7 +129,7 @@ xfs_bmap_add_extent_hole_delay( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp,/* inode logging flags */ int rsvd); /* OK to allocate reserved blocks */ @@ -142,7 +142,7 @@ xfs_bmap_add_extent_hole_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp, /* inode logging flags */ int whichfork); /* data or attr fork */ @@ -155,7 +155,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp); /* inode logging flags */ /* @@ -169,7 +169,7 @@ xfs_bmap_alloc( /* * Transform a btree format file with only one leaf node, where the * extents list will fit in the inode, into an extents format file. - * Since the extent list is already in-core, all we have to do is + * Since the file extents are already in-core, all we have to do is * give up the space for the btree root and pitch the leaf block. */ STATIC int /* error */ @@ -191,7 +191,7 @@ xfs_bmap_check_extents( #endif /* - * Called by xfs_bmapi to update extent list structure and the btree + * Called by xfs_bmapi to update file extent records and the btree * after removing space (or undoing a delayed allocation). */ STATIC int /* error */ @@ -201,7 +201,7 @@ xfs_bmap_del_extent( xfs_extnum_t idx, /* extent number to update/insert */ xfs_bmap_free_t *flist, /* list of extents to be freed */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp,/* inode logging flags */ int whichfork, /* data or attr fork */ int rsvd); /* OK to allocate reserved blocks */ @@ -217,18 +217,6 @@ xfs_bmap_del_free( xfs_bmap_free_item_t *free); /* list item to be freed */ /* - * Remove count entries from the extents array for inode "ip", starting - * at index "idx". Copies the remaining items down over the deleted ones, - * and gives back the excess memory. - */ -STATIC void -xfs_bmap_delete_exlist( - xfs_inode_t *ip, /* incode inode pointer */ - xfs_extnum_t idx, /* starting delete index */ - xfs_extnum_t count, /* count of items to delete */ - int whichfork); /* data or attr fork */ - -/* * Convert an extents-format file into a btree-format file. * The new file will have a root block (in the inode) and a single child block. */ @@ -244,18 +232,6 @@ xfs_bmap_extents_to_btree( int whichfork); /* data or attr fork */ /* - * Insert new item(s) in the extent list for inode "ip". - * Count new items are inserted at offset idx. - */ -STATIC void -xfs_bmap_insert_exlist( - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* starting index of new items */ - xfs_extnum_t count, /* number of inserted items */ - xfs_bmbt_irec_t *new, /* items to insert */ - int whichfork); /* data or attr fork */ - -/* * Convert a local file to an extents file. * This code is sort of bogus, since the file data needs to get * logged so it won't be lost. The bmap-level manipulations are ok, though. @@ -316,7 +292,7 @@ xfs_bmap_trace_addentry( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. + * Add bmap trace entry prior to a call to xfs_iext_remove. */ STATIC void xfs_bmap_trace_delete( @@ -328,7 +304,7 @@ xfs_bmap_trace_delete( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or + * Add bmap trace entry prior to a call to xfs_iext_insert, or * reading in the extents list from the disk (in the btree). */ STATIC void @@ -343,7 +319,7 @@ xfs_bmap_trace_insert( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry after updating an extent list entry in place. + * Add bmap trace entry after updating an extent record in place. */ STATIC void xfs_bmap_trace_post_update( @@ -354,7 +330,7 @@ xfs_bmap_trace_post_update( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry prior to updating an extent list entry in place. + * Add bmap trace entry prior to updating an extent record in place. */ STATIC void xfs_bmap_trace_pre_update( @@ -413,19 +389,24 @@ STATIC int xfs_bmap_count_tree( xfs_mount_t *mp, xfs_trans_t *tp, + xfs_ifork_t *ifp, xfs_fsblock_t blockno, int levelin, int *count); STATIC int xfs_bmap_count_leaves( - xfs_bmbt_rec_t *frp, + xfs_ifork_t *ifp, + xfs_extnum_t idx, int numrecs, int *count); STATIC int xfs_bmap_disk_count_leaves( - xfs_bmbt_rec_t *frp, + xfs_ifork_t *ifp, + xfs_mount_t *mp, + xfs_extnum_t idx, + xfs_bmbt_block_t *block, int numrecs, int *count); @@ -537,7 +518,7 @@ xfs_bmap_add_attrfork_local( } /* - * Called by xfs_bmapi to update extent list structure and the btree + * Called by xfs_bmapi to update file extent records and the btree * after allocating space (or doing a delayed allocation). */ STATIC int /* error */ @@ -545,7 +526,7 @@ xfs_bmap_add_extent( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp, /* inode logging flags */ @@ -578,7 +559,7 @@ xfs_bmap_add_extent( if (nextents == 0) { xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new, NULL, whichfork); - xfs_bmap_insert_exlist(ip, 0, 1, new, whichfork); + xfs_iext_insert(ifp, 0, 1, new); ASSERT(cur == NULL); ifp->if_lastex = 0; if (!ISNULLSTARTBLOCK(new->br_startblock)) { @@ -614,7 +595,7 @@ xfs_bmap_add_extent( /* * Get the record referred to by idx. */ - xfs_bmbt_get_all(&ifp->if_u1.if_extents[idx], &prev); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev); /* * If it's a real allocation record, and the new allocation ends * after the start of the referred to record, then we're filling @@ -714,14 +695,13 @@ xfs_bmap_add_extent_delay_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp, /* inode logging flags */ int rsvd) /* OK to use reserved data block allocation */ { - xfs_bmbt_rec_t *base; /* base of extent entry list */ xfs_btree_cur_t *cur; /* btree cursor */ int diff; /* temp value */ xfs_bmbt_rec_t *ep; /* extent entry for idx */ @@ -730,6 +710,7 @@ xfs_bmap_add_extent_delay_real( static char fname[] = "xfs_bmap_add_extent_delay_real"; #endif int i; /* temp state */ + xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ /* left is 0, right is 1, prev is 2 */ @@ -763,8 +744,8 @@ xfs_bmap_add_extent_delay_real( * Set up a bunch of variables to make the tests simpler. */ cur = *curp; - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_get_all(ep, &PREV); new_endoff = new->br_startoff + new->br_blockcount; ASSERT(PREV.br_startoff <= new->br_startoff); @@ -781,7 +762,7 @@ xfs_bmap_add_extent_delay_real( * Don't set contiguous if the combined extent would be too large. */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(ep - 1, &LEFT); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); } STATE_SET(LEFT_CONTIG, @@ -798,7 +779,7 @@ xfs_bmap_add_extent_delay_real( if (STATE_SET_TEST(RIGHT_VALID, idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { - xfs_bmbt_get_all(ep + 1, &RIGHT); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); } STATE_SET(RIGHT_CONTIG, @@ -825,14 +806,14 @@ xfs_bmap_add_extent_delay_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 2); ip->i_df.if_lastex = idx - 1; ip->i_d.di_nextents--; if (cur == NULL) @@ -867,14 +848,14 @@ xfs_bmap_add_extent_delay_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 1); if (cur == NULL) rval = XFS_ILOG_DEXT; else { @@ -908,7 +889,7 @@ xfs_bmap_add_extent_delay_real( ip->i_df.if_lastex = idx; xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx + 1, 1); if (cur == NULL) rval = XFS_ILOG_DEXT; else { @@ -964,7 +945,7 @@ xfs_bmap_add_extent_delay_real( */ xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); @@ -1010,7 +991,7 @@ xfs_bmap_add_extent_delay_real( xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx, 1, new); ip->i_df.if_lastex = idx; ip->i_d.di_nextents++; if (cur == NULL) @@ -1039,8 +1020,7 @@ xfs_bmap_add_extent_delay_real( temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), STARTBLOCKVAL(PREV.br_startblock) - (cur ? cur->bc_private.b.allocated : 0)); - base = ip->i_df.if_u1.if_extents; - ep = &base[idx + 1]; + ep = xfs_iext_get_ext(ifp, idx + 1); xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1, XFS_DATA_FORK); @@ -1058,7 +1038,8 @@ xfs_bmap_add_extent_delay_real( xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, XFS_DATA_FORK); xfs_bmbt_set_blockcount(ep, temp); - xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, + xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), + new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, RIGHT.br_state); xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, @@ -1098,7 +1079,7 @@ xfs_bmap_add_extent_delay_real( xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx + 1, 1, new); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) @@ -1127,8 +1108,7 @@ xfs_bmap_add_extent_delay_real( temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), STARTBLOCKVAL(PREV.br_startblock) - (cur ? cur->bc_private.b.allocated : 0)); - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); *dnew = temp; @@ -1149,7 +1129,7 @@ xfs_bmap_add_extent_delay_real( r[1].br_blockcount = temp2; xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); + xfs_iext_insert(ifp, idx + 1, 2, &r[0]); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) @@ -1204,13 +1184,13 @@ xfs_bmap_add_extent_delay_real( } } } - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2, XFS_DATA_FORK); - xfs_bmbt_set_startblock(ep + 2, NULLSTARTBLOCK((int)temp2)); + xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), + NULLSTARTBLOCK((int)temp2)); xfs_bmap_trace_post_update(fname, "0", ip, idx + 2, XFS_DATA_FORK); *dnew = temp + temp2; @@ -1254,10 +1234,9 @@ xfs_bmap_add_extent_unwritten_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp) /* inode logging flags */ { - xfs_bmbt_rec_t *base; /* base of extent entry list */ xfs_btree_cur_t *cur; /* btree cursor */ xfs_bmbt_rec_t *ep; /* extent entry for idx */ int error; /* error return value */ @@ -1265,6 +1244,7 @@ xfs_bmap_add_extent_unwritten_real( static char fname[] = "xfs_bmap_add_extent_unwritten_real"; #endif int i; /* temp state */ + xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_exntst_t newext; /* new extent state */ xfs_exntst_t oldext; /* old extent state */ @@ -1298,8 +1278,8 @@ xfs_bmap_add_extent_unwritten_real( */ error = 0; cur = *curp; - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_get_all(ep, &PREV); newext = new->br_state; oldext = (newext == XFS_EXT_UNWRITTEN) ? @@ -1320,7 +1300,7 @@ xfs_bmap_add_extent_unwritten_real( * Don't set contiguous if the combined extent would be too large. */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(ep - 1, &LEFT); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); } STATE_SET(LEFT_CONTIG, @@ -1337,7 +1317,7 @@ xfs_bmap_add_extent_unwritten_real( if (STATE_SET_TEST(RIGHT_VALID, idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { - xfs_bmbt_get_all(ep + 1, &RIGHT); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); } STATE_SET(RIGHT_CONTIG, @@ -1363,14 +1343,14 @@ xfs_bmap_add_extent_unwritten_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 2); ip->i_df.if_lastex = idx - 1; ip->i_d.di_nextents -= 2; if (cur == NULL) @@ -1409,14 +1389,14 @@ xfs_bmap_add_extent_unwritten_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 1); ip->i_d.di_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; @@ -1456,7 +1436,7 @@ xfs_bmap_add_extent_unwritten_real( ip->i_df.if_lastex = idx; xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx + 1, 1); ip->i_d.di_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; @@ -1516,7 +1496,7 @@ xfs_bmap_add_extent_unwritten_real( */ xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); @@ -1571,7 +1551,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK); xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx, 1, new); ip->i_df.if_lastex = idx; ip->i_d.di_nextents++; if (cur == NULL) @@ -1609,7 +1589,8 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_blockcount - new->br_blockcount); xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, XFS_DATA_FORK); - xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, + xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), + new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, newext); xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, XFS_DATA_FORK); @@ -1649,7 +1630,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx + 1, 1, new); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) @@ -1696,7 +1677,7 @@ xfs_bmap_add_extent_unwritten_real( r[1].br_state = oldext; xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); + xfs_iext_insert(ifp, idx + 1, 2, &r[0]); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents += 2; if (cur == NULL) @@ -1770,15 +1751,15 @@ xfs_bmap_add_extent_hole_delay( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp, /* inode logging flags */ int rsvd) /* OK to allocate reserved blocks */ { - xfs_bmbt_rec_t *base; /* base of extent entry list */ - xfs_bmbt_rec_t *ep; /* extent list entry for idx */ + xfs_bmbt_rec_t *ep; /* extent record for idx */ #ifdef XFS_BMAP_TRACE static char fname[] = "xfs_bmap_add_extent_hole_delay"; #endif + xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_irec_t left; /* left neighbor extent entry */ xfs_filblks_t newlen=0; /* new indirect size */ xfs_filblks_t oldlen=0; /* old indirect size */ @@ -1799,15 +1780,15 @@ xfs_bmap_add_extent_hole_delay( ((state &= ~MASK(b)), 0)) #define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG)) - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + ep = xfs_iext_get_ext(ifp, idx); state = 0; ASSERT(ISNULLSTARTBLOCK(new->br_startblock)); /* * Check and set flags if this segment has a left neighbor */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(ep - 1, &left); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); } /* @@ -1844,23 +1825,24 @@ xfs_bmap_add_extent_hole_delay( /* * New allocation is contiguous with delayed allocations * on the left and on the right. - * Merge all three into a single extent list entry. + * Merge all three into a single extent record. */ temp = left.br_blockcount + new->br_blockcount + right.br_blockcount; xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, temp); + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); oldlen = STARTBLOCKVAL(left.br_startblock) + STARTBLOCKVAL(new->br_startblock) + STARTBLOCKVAL(right.br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); - xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); + xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), + NULLSTARTBLOCK((int)newlen)); xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 1); ip->i_df.if_lastex = idx - 1; break; @@ -1873,11 +1855,12 @@ xfs_bmap_add_extent_hole_delay( temp = left.br_blockcount + new->br_blockcount; xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, temp); + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); oldlen = STARTBLOCKVAL(left.br_startblock) + STARTBLOCKVAL(new->br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); - xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); + xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), + NULLSTARTBLOCK((int)newlen)); xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; @@ -1909,7 +1892,7 @@ xfs_bmap_add_extent_hole_delay( oldlen = newlen = 0; xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx, 1, new); ip->i_df.if_lastex = idx; break; } @@ -1940,7 +1923,7 @@ xfs_bmap_add_extent_hole_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp, /* inode logging flags */ int whichfork) /* data or attr fork */ { @@ -1970,13 +1953,13 @@ xfs_bmap_add_extent_hole_real( ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); - ep = &ifp->if_u1.if_extents[idx]; + ep = xfs_iext_get_ext(ifp, idx); state = 0; /* * Check and set flags if this segment has a left neighbor. */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(ep - 1, &left); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); } /* @@ -2019,18 +2002,18 @@ xfs_bmap_add_extent_hole_real( /* * New allocation is contiguous with real allocations on the * left and on the right. - * Merge all three into a single extent list entry. + * Merge all three into a single extent record. */ xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, whichfork); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), left.br_blockcount + new->br_blockcount + right.br_blockcount); xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, whichfork); xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, whichfork); - xfs_bmap_delete_exlist(ip, idx, 1, whichfork); + xfs_iext_remove(ifp, idx, 1); ifp->if_lastex = idx - 1; XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) - 1); @@ -2062,7 +2045,7 @@ xfs_bmap_add_extent_hole_real( * Merge the new allocation with the left neighbor. */ xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), left.br_blockcount + new->br_blockcount); xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork); ifp->if_lastex = idx - 1; @@ -2116,7 +2099,7 @@ xfs_bmap_add_extent_hole_real( */ xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, whichfork); - xfs_bmap_insert_exlist(ip, idx, 1, new, whichfork); + xfs_iext_insert(ifp, idx, 1, new); ifp->if_lastex = idx; XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) + 1); @@ -2311,25 +2294,15 @@ xfs_bmap_extsize_align( #define XFS_ALLOC_GAP_UNITS 4 -/* - * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. - * It figures out where to ask the underlying allocator to put the new extent. - */ STATIC int -xfs_bmap_alloc( +xfs_bmap_adjacent( xfs_bmalloca_t *ap) /* bmap alloc argument struct */ { xfs_fsblock_t adjust; /* adjustment to block numbers */ - xfs_alloctype_t atype=0; /* type for allocation routines */ - int error; /* error return value */ xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ xfs_mount_t *mp; /* mount point structure */ int nullfb; /* true if ap->firstblock isn't set */ int rt; /* true if inode is realtime */ - xfs_extlen_t prod = 0; /* product factor for allocators */ - xfs_extlen_t ralen = 0; /* realtime allocation length */ - xfs_extlen_t align; /* minimum allocation alignment */ - xfs_rtblock_t rtx; #define ISVALID(x,y) \ (rt ? \ @@ -2338,75 +2311,10 @@ xfs_bmap_alloc( XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) - /* - * Set up variables. - */ mp = ap->ip->i_mount; nullfb = ap->firstblock == NULLFSBLOCK; rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); - if (rt) { - align = ap->ip->i_d.di_extsize ? - ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize; - /* Set prod to match the extent size */ - prod = align / mp->m_sb.sb_rextsize; - - error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, - align, rt, ap->eof, 0, - ap->conv, &ap->off, &ap->alen); - if (error) - return error; - ASSERT(ap->alen); - ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); - - /* - * If the offset & length are not perfectly aligned - * then kill prod, it will just get us in trouble. - */ - if (do_mod(ap->off, align) || ap->alen % align) - prod = 1; - /* - * Set ralen to be the actual requested length in rtextents. - */ - ralen = ap->alen / mp->m_sb.sb_rextsize; - /* - * If the old value was close enough to MAXEXTLEN that - * we rounded up to it, cut it back so it's valid again. - * Note that if it's a really large request (bigger than - * MAXEXTLEN), we don't hear about that number, and can't - * adjust the starting point to match it. - */ - if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) - ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; - /* - * If it's an allocation to an empty file at offset 0, - * pick an extent that will space things out in the rt area. - */ - if (ap->eof && ap->off == 0) { - error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); - if (error) - return error; - ap->rval = rtx * mp->m_sb.sb_rextsize; - } else - ap->rval = 0; - } else { - align = (ap->userdata && ap->ip->i_d.di_extsize && - (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ? - ap->ip->i_d.di_extsize : 0; - if (unlikely(align)) { - error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, - align, rt, - ap->eof, 0, ap->conv, - &ap->off, &ap->alen); - ASSERT(!error); - ASSERT(ap->alen); - } - if (nullfb) - ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); - else - ap->rval = ap->firstblock; - } - /* * If allocating at eof, and there's a previous real block, * try to use it's last block as our starting point. @@ -2531,287 +2439,384 @@ xfs_bmap_alloc( else if (gotbno != NULLFSBLOCK) ap->rval = gotbno; } +#undef ISVALID + return 0; +} + +STATIC int +xfs_bmap_rtalloc( + xfs_bmalloca_t *ap) /* bmap alloc argument struct */ +{ + xfs_alloctype_t atype = 0; /* type for allocation routines */ + int error; /* error return value */ + xfs_mount_t *mp; /* mount point structure */ + xfs_extlen_t prod = 0; /* product factor for allocators */ + xfs_extlen_t ralen = 0; /* realtime allocation length */ + xfs_extlen_t align; /* minimum allocation alignment */ + xfs_rtblock_t rtx; /* realtime extent number */ + xfs_rtblock_t rtb; + + mp = ap->ip->i_mount; + align = ap->ip->i_d.di_extsize ? + ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize; + prod = align / mp->m_sb.sb_rextsize; + error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, + align, 1, ap->eof, 0, + ap->conv, &ap->off, &ap->alen); + if (error) + return error; + ASSERT(ap->alen); + ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); + + /* + * If the offset & length are not perfectly aligned + * then kill prod, it will just get us in trouble. + */ + if (do_mod(ap->off, align) || ap->alen % align) + prod = 1; + /* + * Set ralen to be the actual requested length in rtextents. + */ + ralen = ap->alen / mp->m_sb.sb_rextsize; + /* + * If the old value was close enough to MAXEXTLEN that + * we rounded up to it, cut it back so it's valid again. + * Note that if it's a really large request (bigger than + * MAXEXTLEN), we don't hear about that number, and can't + * adjust the starting point to match it. + */ + if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) + ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; + /* + * If it's an allocation to an empty file at offset 0, + * pick an extent that will space things out in the rt area. + */ + if (ap->eof && ap->off == 0) { + error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); + if (error) + return error; + ap->rval = rtx * mp->m_sb.sb_rextsize; + } else { + ap->rval = 0; + } + + xfs_bmap_adjacent(ap); + + /* + * Realtime allocation, done through xfs_rtallocate_extent. + */ + atype = ap->rval == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; + do_div(ap->rval, mp->m_sb.sb_rextsize); + rtb = ap->rval; + ap->alen = ralen; + if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen, + &ralen, atype, ap->wasdel, prod, &rtb))) + return error; + if (rtb == NULLFSBLOCK && prod > 1 && + (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, + ap->alen, &ralen, atype, + ap->wasdel, 1, &rtb))) + return error; + ap->rval = rtb; + if (ap->rval != NULLFSBLOCK) { + ap->rval *= mp->m_sb.sb_rextsize; + ralen *= mp->m_sb.sb_rextsize; + ap->alen = ralen; + ap->ip->i_d.di_nblocks += ralen; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= ralen; + /* + * Adjust the disk quota also. This was reserved + * earlier. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : + XFS_TRANS_DQ_RTBCOUNT, (long) ralen); + } else { + ap->alen = 0; + } + return 0; +} + +STATIC int +xfs_bmap_btalloc( + xfs_bmalloca_t *ap) /* bmap alloc argument struct */ +{ + xfs_mount_t *mp; /* mount point structure */ + xfs_alloctype_t atype = 0; /* type for allocation routines */ + xfs_extlen_t align; /* minimum allocation alignment */ + xfs_agnumber_t ag; + xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ + xfs_agnumber_t startag; + xfs_alloc_arg_t args; + xfs_extlen_t blen; + xfs_extlen_t delta; + xfs_extlen_t longest; + xfs_extlen_t need; + xfs_extlen_t nextminlen = 0; + xfs_perag_t *pag; + int nullfb; /* true if ap->firstblock isn't set */ + int isaligned; + int notinit; + int tryagain; + int error; + + mp = ap->ip->i_mount; + align = (ap->userdata && ap->ip->i_d.di_extsize && + (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ? + ap->ip->i_d.di_extsize : 0; + if (unlikely(align)) { + error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, + align, 0, ap->eof, 0, ap->conv, + &ap->off, &ap->alen); + ASSERT(!error); + ASSERT(ap->alen); + } + nullfb = ap->firstblock == NULLFSBLOCK; + fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); + if (nullfb) + ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); + else + ap->rval = ap->firstblock; + + xfs_bmap_adjacent(ap); + /* * If allowed, use ap->rval; otherwise must use firstblock since * it's in the right allocation group. */ - if (nullfb || rt || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno) + if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno) ; else ap->rval = ap->firstblock; /* - * Realtime allocation, done through xfs_rtallocate_extent. + * Normal allocation, done through xfs_alloc_vextent. */ - if (rt) { -#ifndef __KERNEL__ - ASSERT(0); -#else - xfs_rtblock_t rtb; - - atype = ap->rval == 0 ? - XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; - do_div(ap->rval, mp->m_sb.sb_rextsize); - rtb = ap->rval; - ap->alen = ralen; - if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen, - &ralen, atype, ap->wasdel, prod, &rtb))) - return error; - if (rtb == NULLFSBLOCK && prod > 1 && - (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, - ap->alen, &ralen, atype, - ap->wasdel, 1, &rtb))) - return error; - ap->rval = rtb; - if (ap->rval != NULLFSBLOCK) { - ap->rval *= mp->m_sb.sb_rextsize; - ralen *= mp->m_sb.sb_rextsize; - ap->alen = ralen; - ap->ip->i_d.di_nblocks += ralen; - xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); - if (ap->wasdel) - ap->ip->i_delayed_blks -= ralen; + tryagain = isaligned = 0; + args.tp = ap->tp; + args.mp = mp; + args.fsbno = ap->rval; + args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks); + blen = 0; + if (nullfb) { + args.type = XFS_ALLOCTYPE_START_BNO; + args.total = ap->total; + /* + * Find the longest available space. + * We're going to try for the whole allocation at once. + */ + startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); + notinit = 0; + down_read(&mp->m_peraglock); + while (blen < ap->alen) { + pag = &mp->m_perag[ag]; + if (!pag->pagf_init && + (error = xfs_alloc_pagf_init(mp, args.tp, + ag, XFS_ALLOC_FLAG_TRYLOCK))) { + up_read(&mp->m_peraglock); + return error; + } /* - * Adjust the disk quota also. This was reserved - * earlier. + * See xfs_alloc_fix_freelist... */ - XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, - ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : - XFS_TRANS_DQ_RTBCOUNT, - (long) ralen); - } else - ap->alen = 0; -#endif /* __KERNEL__ */ + if (pag->pagf_init) { + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? + need - pag->pagf_flcount : 0; + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || + pag->pagf_longest > 0); + if (blen < longest) + blen = longest; + } else + notinit = 1; + if (++ag == mp->m_sb.sb_agcount) + ag = 0; + if (ag == startag) + break; + } + up_read(&mp->m_peraglock); + /* + * Since the above loop did a BUF_TRYLOCK, it is + * possible that there is space for this request. + */ + if (notinit || blen < ap->minlen) + args.minlen = ap->minlen; + /* + * If the best seen length is less than the request + * length, use the best as the minimum. + */ + else if (blen < ap->alen) + args.minlen = blen; + /* + * Otherwise we've seen an extent as big as alen, + * use that as the minimum. + */ + else + args.minlen = ap->alen; + } else if (ap->low) { + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = args.minlen = ap->minlen; + } else { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.total = ap->total; + args.minlen = ap->minlen; + } + if (unlikely(ap->userdata && ap->ip->i_d.di_extsize && + (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) { + args.prod = ap->ip->i_d.di_extsize; + if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); + } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) { + args.prod = 1; + args.mod = 0; + } else { + args.prod = NBPP >> mp->m_sb.sb_blocklog; + if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); } /* - * Normal allocation, done through xfs_alloc_vextent. + * If we are not low on available data blocks, and the + * underlying logical volume manager is a stripe, and + * the file offset is zero then try to allocate data + * blocks on stripe unit boundary. + * NOTE: ap->aeof is only set if the allocation length + * is >= the stripe unit and the allocation offset is + * at the end of file. */ - else { - xfs_agnumber_t ag; - xfs_alloc_arg_t args; - xfs_extlen_t blen; - xfs_extlen_t delta; - int isaligned; - xfs_extlen_t longest; - xfs_extlen_t need; - xfs_extlen_t nextminlen=0; - int notinit; - xfs_perag_t *pag; - xfs_agnumber_t startag; - int tryagain; - - tryagain = isaligned = 0; - args.tp = ap->tp; - args.mp = mp; - args.fsbno = ap->rval; - args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks); - blen = 0; - if (nullfb) { - args.type = XFS_ALLOCTYPE_START_BNO; - args.total = ap->total; - /* - * Find the longest available space. - * We're going to try for the whole allocation at once. - */ - startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); - notinit = 0; - down_read(&mp->m_peraglock); - while (blen < ap->alen) { - pag = &mp->m_perag[ag]; - if (!pag->pagf_init && - (error = xfs_alloc_pagf_init(mp, args.tp, - ag, XFS_ALLOC_FLAG_TRYLOCK))) { - up_read(&mp->m_peraglock); - return error; - } - /* - * See xfs_alloc_fix_freelist... - */ - if (pag->pagf_init) { - need = XFS_MIN_FREELIST_PAG(pag, mp); - delta = need > pag->pagf_flcount ? - need - pag->pagf_flcount : 0; - longest = (pag->pagf_longest > delta) ? - (pag->pagf_longest - delta) : - (pag->pagf_flcount > 0 || - pag->pagf_longest > 0); - if (blen < longest) - blen = longest; - } else - notinit = 1; - if (++ag == mp->m_sb.sb_agcount) - ag = 0; - if (ag == startag) - break; - } - up_read(&mp->m_peraglock); + if (!ap->low && ap->aeof) { + if (!ap->off) { + args.alignment = mp->m_dalign; + atype = args.type; + isaligned = 1; /* - * Since the above loop did a BUF_TRYLOCK, it is - * possible that there is space for this request. + * Adjust for alignment */ - if (notinit || blen < ap->minlen) - args.minlen = ap->minlen; + if (blen > args.alignment && blen <= ap->alen) + args.minlen = blen - args.alignment; + args.minalignslop = 0; + } else { /* - * If the best seen length is less than the request - * length, use the best as the minimum. + * First try an exact bno allocation. + * If it fails then do a near or start bno + * allocation with alignment turned on. */ - else if (blen < ap->alen) - args.minlen = blen; + atype = args.type; + tryagain = 1; + args.type = XFS_ALLOCTYPE_THIS_BNO; + args.alignment = 1; /* - * Otherwise we've seen an extent as big as alen, - * use that as the minimum. + * Compute the minlen+alignment for the + * next case. Set slop so that the value + * of minlen+alignment+slop doesn't go up + * between the calls. */ + if (blen > mp->m_dalign && blen <= ap->alen) + nextminlen = blen - mp->m_dalign; else - args.minlen = ap->alen; - } else if (ap->low) { - args.type = XFS_ALLOCTYPE_FIRST_AG; - args.total = args.minlen = ap->minlen; - } else { - args.type = XFS_ALLOCTYPE_NEAR_BNO; - args.total = ap->total; - args.minlen = ap->minlen; - } - if (unlikely(ap->userdata && ap->ip->i_d.di_extsize && - (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) { - args.prod = ap->ip->i_d.di_extsize; - if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) - args.mod = (xfs_extlen_t)(args.prod - args.mod); - } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) { - args.prod = 1; - args.mod = 0; - } else { - args.prod = NBPP >> mp->m_sb.sb_blocklog; - if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) - args.mod = (xfs_extlen_t)(args.prod - args.mod); + nextminlen = args.minlen; + if (nextminlen + mp->m_dalign > args.minlen + 1) + args.minalignslop = + nextminlen + mp->m_dalign - + args.minlen - 1; + else + args.minalignslop = 0; } + } else { + args.alignment = 1; + args.minalignslop = 0; + } + args.minleft = ap->minleft; + args.wasdel = ap->wasdel; + args.isfl = 0; + args.userdata = ap->userdata; + if ((error = xfs_alloc_vextent(&args))) + return error; + if (tryagain && args.fsbno == NULLFSBLOCK) { /* - * If we are not low on available data blocks, and the - * underlying logical volume manager is a stripe, and - * the file offset is zero then try to allocate data - * blocks on stripe unit boundary. - * NOTE: ap->aeof is only set if the allocation length - * is >= the stripe unit and the allocation offset is - * at the end of file. + * Exact allocation failed. Now try with alignment + * turned on. */ - if (!ap->low && ap->aeof) { - if (!ap->off) { - args.alignment = mp->m_dalign; - atype = args.type; - isaligned = 1; - /* - * Adjust for alignment - */ - if (blen > args.alignment && blen <= ap->alen) - args.minlen = blen - args.alignment; - args.minalignslop = 0; - } else { - /* - * First try an exact bno allocation. - * If it fails then do a near or start bno - * allocation with alignment turned on. - */ - atype = args.type; - tryagain = 1; - args.type = XFS_ALLOCTYPE_THIS_BNO; - args.alignment = 1; - /* - * Compute the minlen+alignment for the - * next case. Set slop so that the value - * of minlen+alignment+slop doesn't go up - * between the calls. - */ - if (blen > mp->m_dalign && blen <= ap->alen) - nextminlen = blen - mp->m_dalign; - else - nextminlen = args.minlen; - if (nextminlen + mp->m_dalign > args.minlen + 1) - args.minalignslop = - nextminlen + mp->m_dalign - - args.minlen - 1; - else - args.minalignslop = 0; - } - } else { - args.alignment = 1; - args.minalignslop = 0; - } - args.minleft = ap->minleft; - args.wasdel = ap->wasdel; - args.isfl = 0; - args.userdata = ap->userdata; + args.type = atype; + args.fsbno = ap->rval; + args.alignment = mp->m_dalign; + args.minlen = nextminlen; + args.minalignslop = 0; + isaligned = 1; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (isaligned && args.fsbno == NULLFSBLOCK) { + /* + * allocation failed, so turn off alignment and + * try again. + */ + args.type = atype; + args.fsbno = ap->rval; + args.alignment = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (args.fsbno == NULLFSBLOCK && nullfb && + args.minlen > ap->minlen) { + args.minlen = ap->minlen; + args.type = XFS_ALLOCTYPE_START_BNO; + args.fsbno = ap->rval; if ((error = xfs_alloc_vextent(&args))) return error; - if (tryagain && args.fsbno == NULLFSBLOCK) { - /* - * Exact allocation failed. Now try with alignment - * turned on. - */ - args.type = atype; - args.fsbno = ap->rval; - args.alignment = mp->m_dalign; - args.minlen = nextminlen; - args.minalignslop = 0; - isaligned = 1; - if ((error = xfs_alloc_vextent(&args))) - return error; - } - if (isaligned && args.fsbno == NULLFSBLOCK) { - /* - * allocation failed, so turn off alignment and - * try again. - */ - args.type = atype; - args.fsbno = ap->rval; - args.alignment = 0; - if ((error = xfs_alloc_vextent(&args))) - return error; - } - if (args.fsbno == NULLFSBLOCK && nullfb && - args.minlen > ap->minlen) { - args.minlen = ap->minlen; - args.type = XFS_ALLOCTYPE_START_BNO; - args.fsbno = ap->rval; - if ((error = xfs_alloc_vextent(&args))) - return error; - } - if (args.fsbno == NULLFSBLOCK && nullfb) { - args.fsbno = 0; - args.type = XFS_ALLOCTYPE_FIRST_AG; - args.total = ap->minlen; - args.minleft = 0; - if ((error = xfs_alloc_vextent(&args))) - return error; - ap->low = 1; - } - if (args.fsbno != NULLFSBLOCK) { - ap->firstblock = ap->rval = args.fsbno; - ASSERT(nullfb || fb_agno == args.agno || - (ap->low && fb_agno < args.agno)); - ap->alen = args.len; - ap->ip->i_d.di_nblocks += args.len; - xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); - if (ap->wasdel) - ap->ip->i_delayed_blks -= args.len; - /* - * Adjust the disk quota also. This was reserved - * earlier. - */ - XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, - ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : - XFS_TRANS_DQ_BCOUNT, - (long) args.len); - } else { - ap->rval = NULLFSBLOCK; - ap->alen = 0; - } + } + if (args.fsbno == NULLFSBLOCK && nullfb) { + args.fsbno = 0; + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = ap->minlen; + args.minleft = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + ap->low = 1; + } + if (args.fsbno != NULLFSBLOCK) { + ap->firstblock = ap->rval = args.fsbno; + ASSERT(nullfb || fb_agno == args.agno || + (ap->low && fb_agno < args.agno)); + ap->alen = args.len; + ap->ip->i_d.di_nblocks += args.len; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= args.len; + /* + * Adjust the disk quota also. This was reserved + * earlier. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : + XFS_TRANS_DQ_BCOUNT, + (long) args.len); + } else { + ap->rval = NULLFSBLOCK; + ap->alen = 0; } return 0; -#undef ISVALID +} + +/* + * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. + * It figures out where to ask the underlying allocator to put the new extent. + */ +STATIC int +xfs_bmap_alloc( + xfs_bmalloca_t *ap) /* bmap alloc argument struct */ +{ + if ((ap->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ap->userdata) + return xfs_bmap_rtalloc(ap); + return xfs_bmap_btalloc(ap); } /* * Transform a btree format file with only one leaf node, where the * extents list will fit in the inode, into an extents format file. - * Since the extent list is already in-core, all we have to do is + * Since the file extents are already in-core, all we have to do is * give up the space for the btree root and pitch the leaf block. */ STATIC int /* error */ @@ -2868,7 +2873,7 @@ xfs_bmap_btree_to_extents( } /* - * Called by xfs_bmapi to update extent list structure and the btree + * Called by xfs_bmapi to update file extent records and the btree * after removing space (or undoing a delayed allocation). */ STATIC int /* error */ @@ -2878,7 +2883,7 @@ xfs_bmap_del_extent( xfs_extnum_t idx, /* extent number to update/delete */ xfs_bmap_free_t *flist, /* list of extents to be freed */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *del, /* data to remove from extent list */ + xfs_bmbt_irec_t *del, /* data to remove from extents */ int *logflagsp, /* inode logging flags */ int whichfork, /* data or attr fork */ int rsvd) /* OK to allocate reserved blocks */ @@ -2903,7 +2908,6 @@ xfs_bmap_del_extent( xfs_filblks_t nblks; /* quota/sb block count */ xfs_bmbt_irec_t new; /* new record to be inserted */ /* REFERENCED */ - xfs_extnum_t nextents; /* number of extents in list */ uint qfield; /* quota field to update */ xfs_filblks_t temp; /* for indirect length calculations */ xfs_filblks_t temp2; /* for indirect length calculations */ @@ -2911,10 +2915,10 @@ xfs_bmap_del_extent( XFS_STATS_INC(xs_del_exlist); mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - ASSERT(idx >= 0 && idx < nextents); + ASSERT((idx >= 0) && (idx < ifp->if_bytes / + (uint)sizeof(xfs_bmbt_rec_t))); ASSERT(del->br_blockcount > 0); - ep = &ifp->if_u1.if_extents[idx]; + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_get_all(ep, &got); ASSERT(got.br_startoff <= del->br_startoff); del_endoff = del->br_startoff + del->br_blockcount; @@ -2990,7 +2994,7 @@ xfs_bmap_del_extent( * Matches the whole extent. Delete the entry. */ xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork); - xfs_bmap_delete_exlist(ip, idx, 1, whichfork); + xfs_iext_remove(ifp, idx, 1); ifp->if_lastex = idx; if (delay) break; @@ -3160,7 +3164,7 @@ xfs_bmap_del_extent( xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork); xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL, whichfork); - xfs_bmap_insert_exlist(ip, idx + 1, 1, &new, whichfork); + xfs_iext_insert(ifp, idx + 1, 1, &new); ifp->if_lastex = idx + 1; break; } @@ -3213,31 +3217,6 @@ xfs_bmap_del_free( } /* - * Remove count entries from the extents array for inode "ip", starting - * at index "idx". Copies the remaining items down over the deleted ones, - * and gives back the excess memory. - */ -STATIC void -xfs_bmap_delete_exlist( - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* starting delete index */ - xfs_extnum_t count, /* count of items to delete */ - int whichfork) /* data or attr fork */ -{ - xfs_bmbt_rec_t *base; /* base of extent list */ - xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_extnum_t nextents; /* number of extents in list after */ - - ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(ifp->if_flags & XFS_IFEXTENTS); - base = ifp->if_u1.if_extents; - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count; - memmove(&base[idx], &base[idx + count], - (nextents - idx) * sizeof(*base)); - xfs_iext_realloc(ip, -count, whichfork); -} - -/* * Convert an extents-format file into a btree-format file. * The new file will have a root block (in the inode) and a single child block. */ @@ -3258,13 +3237,13 @@ xfs_bmap_extents_to_btree( xfs_bmbt_rec_t *arp; /* child record pointer */ xfs_bmbt_block_t *block; /* btree root block */ xfs_btree_cur_t *cur; /* bmap btree cursor */ - xfs_bmbt_rec_t *ep; /* extent list pointer */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ int error; /* error return value */ - xfs_extnum_t i, cnt; /* extent list index */ + xfs_extnum_t i, cnt; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_key_t *kp; /* root block key pointer */ xfs_mount_t *mp; /* mount structure */ - xfs_extnum_t nextents; /* extent list size */ + xfs_extnum_t nextents; /* number of file extents */ xfs_bmbt_ptr_t *pp; /* root block address pointer */ ifp = XFS_IFORK_PTR(ip, whichfork); @@ -3343,7 +3322,8 @@ xfs_bmap_extents_to_btree( ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO); arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) { + for (cnt = i = 0; i < nextents; i++) { + ep = xfs_iext_get_ext(ifp, i); if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) { arp->l0 = INT_GET(ep->l0, ARCH_CONVERT); arp->l1 = INT_GET(ep->l1, ARCH_CONVERT); @@ -3373,34 +3353,6 @@ xfs_bmap_extents_to_btree( } /* - * Insert new item(s) in the extent list for inode "ip". - * Count new items are inserted at offset idx. - */ -STATIC void -xfs_bmap_insert_exlist( - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* starting index of new items */ - xfs_extnum_t count, /* number of inserted items */ - xfs_bmbt_irec_t *new, /* items to insert */ - int whichfork) /* data or attr fork */ -{ - xfs_bmbt_rec_t *base; /* extent list base */ - xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_extnum_t nextents; /* extent list size */ - xfs_extnum_t to; /* extent list index */ - - ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(ifp->if_flags & XFS_IFEXTENTS); - xfs_iext_realloc(ip, count, whichfork); - base = ifp->if_u1.if_extents; - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - memmove(&base[idx + count], &base[idx], - (nextents - (idx + count)) * sizeof(*base)); - for (to = idx; to < idx + count; to++, new++) - xfs_bmbt_set_all(&base[to], new); -} - -/* * Helper routine to reset inode di_forkoff field when switching * attribute fork from local to extent format - we reset it where * possible to make space available for inline data fork extents. @@ -3457,12 +3409,13 @@ xfs_bmap_local_to_extents( error = 0; if (ifp->if_bytes) { xfs_alloc_arg_t args; /* allocation arguments */ - xfs_buf_t *bp; /* buffer for extent list block */ - xfs_bmbt_rec_t *ep; /* extent list pointer */ + xfs_buf_t *bp; /* buffer for extent block */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ args.tp = tp; args.mp = ip->i_mount; - ASSERT(ifp->if_flags & XFS_IFINLINE); + ASSERT((ifp->if_flags & + (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE); /* * Allocate a block. We know we need only one, since the * file currently fits in an inode. @@ -3492,8 +3445,8 @@ xfs_bmap_local_to_extents( xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); xfs_bmap_forkoff_reset(args.mp, ip, whichfork); xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); - xfs_iext_realloc(ip, 1, whichfork); - ep = ifp->if_u1.if_extents; + xfs_iext_add(ifp, 0, 1); + ep = xfs_iext_get_ext(ifp, 0); xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork); XFS_IFORK_NEXT_SET(ip, whichfork, 1); @@ -3518,7 +3471,7 @@ xfs_bmbt_rec_t * /* pointer to found extent entry */ xfs_bmap_do_search_extents( xfs_bmbt_rec_t *base, /* base of extent list */ xfs_extnum_t lastx, /* last extent index used */ - xfs_extnum_t nextents, /* extent list size */ + xfs_extnum_t nextents, /* number of file extents */ xfs_fileoff_t bno, /* block number searched for */ int *eofp, /* out: end of file found */ xfs_extnum_t *lastxp, /* out: last extent index */ @@ -3569,9 +3522,9 @@ xfs_bmap_do_search_extents( got.br_blockcount = xfs_bmbt_get_blockcount(ep); *eofp = 0; } else { - /* binary search the extents array */ low = 0; high = nextents - 1; + /* binary search the extents array */ while (low <= high) { XFS_STATS_INC(xs_cmp_exlist); lastx = (low + high) >> 1; @@ -3622,6 +3575,57 @@ xfs_bmap_do_search_extents( } /* + * Search the extent records for the entry containing block bno. + * If bno lies in a hole, point to the next entry. If bno lies + * past eof, *eofp will be set, and *prevp will contain the last + * entry (null if none). Else, *lastxp will be set to the index + * of the found entry; *gotp will contain the entry. + */ +xfs_bmbt_rec_t * /* pointer to found extent entry */ +xfs_bmap_search_multi_extents( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_fileoff_t bno, /* block number searched for */ + int *eofp, /* out: end of file found */ + xfs_extnum_t *lastxp, /* out: last extent index */ + xfs_bmbt_irec_t *gotp, /* out: extent entry found */ + xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ +{ + xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_extnum_t lastx; /* last extent index */ + + /* + * Initialize the extent entry structure to catch access to + * uninitialized br_startblock field. + */ + gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL; + gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL; + gotp->br_state = XFS_EXT_INVALID; +#if XFS_BIG_BLKNOS + gotp->br_startblock = 0xffffa5a5a5a5a5a5LL; +#else + gotp->br_startblock = 0xffffa5a5; +#endif + prevp->br_startoff = NULLFILEOFF; + + ep = xfs_iext_bno_to_ext(ifp, bno, &lastx); + if (lastx > 0) { + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp); + } + if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { + xfs_bmbt_get_all(ep, gotp); + *eofp = 0; + } else { + if (lastx > 0) { + *gotp = *prevp; + } + *eofp = 1; + ep = NULL; + } + *lastxp = lastx; + return ep; +} + +/* * Search the extents list for the inode, for the extent containing bno. * If bno lies in a hole, point to the next entry. If bno lies past eof, * *eofp will be set, and *prevp will contain the last entry (null if none). @@ -3639,20 +3643,14 @@ xfs_bmap_search_extents( xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ { xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_rec_t *base; /* base of extent list */ - xfs_extnum_t lastx; /* last extent index used */ - xfs_extnum_t nextents; /* extent list size */ - xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ int rt; /* realtime flag */ XFS_STATS_INC(xs_look_exlist); ifp = XFS_IFORK_PTR(ip, whichfork); - lastx = ifp->if_lastex; - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - base = &ifp->if_u1.if_extents[0]; - ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp, - lastxp, gotp, prevp); + ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp); + rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) { cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld " @@ -3732,7 +3730,7 @@ xfs_bmap_trace_addentry( } /* - * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. + * Add bmap trace entry prior to a call to xfs_iext_remove. */ STATIC void xfs_bmap_trace_delete( @@ -3747,13 +3745,13 @@ xfs_bmap_trace_delete( ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx, - cnt, &ifp->if_u1.if_extents[idx], - cnt == 2 ? &ifp->if_u1.if_extents[idx + 1] : NULL, + cnt, xfs_iext_get_ext(ifp, idx), + cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL, whichfork); } /* - * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or + * Add bmap trace entry prior to a call to xfs_iext_insert, or * reading in the extents list from the disk (in the btree). */ STATIC void @@ -3783,7 +3781,7 @@ xfs_bmap_trace_insert( } /* - * Add bmap trace entry after updating an extent list entry in place. + * Add bmap trace entry after updating an extent record in place. */ STATIC void xfs_bmap_trace_post_update( @@ -3797,11 +3795,11 @@ xfs_bmap_trace_post_update( ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx, - 1, &ifp->if_u1.if_extents[idx], NULL, whichfork); + 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork); } /* - * Add bmap trace entry prior to updating an extent list entry in place. + * Add bmap trace entry prior to updating an extent record in place. */ STATIC void xfs_bmap_trace_pre_update( @@ -3815,7 +3813,7 @@ xfs_bmap_trace_pre_update( ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1, - &ifp->if_u1.if_extents[idx], NULL, whichfork); + xfs_iext_get_ext(ifp, idx), NULL, whichfork); } #endif /* XFS_BMAP_TRACE */ @@ -3892,7 +3890,7 @@ xfs_bmap_add_attrfork( int rsvd) /* xact may use reserved blks */ { xfs_fsblock_t firstblock; /* 1st block/ag allocated */ - xfs_bmap_free_t flist; /* freed extent list */ + xfs_bmap_free_t flist; /* freed extent records */ xfs_mount_t *mp; /* mount structure */ xfs_trans_t *tp; /* transaction pointer */ unsigned long s; /* spinlock spl value */ @@ -4146,7 +4144,7 @@ xfs_bmap_finish( xfs_efd_log_item_t *efd; /* extent free data */ xfs_efi_log_item_t *efi; /* extent free intention */ int error; /* error return value */ - xfs_bmap_free_item_t *free; /* free extent list item */ + xfs_bmap_free_item_t *free; /* free extent item */ unsigned int logres; /* new log reservation */ unsigned int logcount; /* new log count */ xfs_mount_t *mp; /* filesystem mount structure */ @@ -4242,9 +4240,9 @@ xfs_bmap_first_unused( xfs_fileoff_t *first_unused, /* unused block */ int whichfork) /* data or attr fork */ { - xfs_bmbt_rec_t *base; /* base of extent array */ xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ int error; /* error return value */ + int idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t lastaddr; /* last block number seen */ xfs_fileoff_t lowest; /* lowest useful block */ @@ -4265,10 +4263,8 @@ xfs_bmap_first_unused( return error; lowest = *first_unused; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - base = &ifp->if_u1.if_extents[0]; - for (lastaddr = 0, max = lowest, ep = base; - ep < &base[nextents]; - ep++) { + for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { + ep = xfs_iext_get_ext(ifp, idx); off = xfs_bmbt_get_startoff(ep); /* * See if the hole before this extent will work. @@ -4287,8 +4283,8 @@ xfs_bmap_first_unused( /* * Returns the file-relative block number of the last block + 1 before * last_block (input value) in the file. - * This is not based on i_size, it is based on the extent list. - * Returns 0 for local files, as they do not have an extent list. + * This is not based on i_size, it is based on the extent records. + * Returns 0 for local files, as they do not have extent records. */ int /* error */ xfs_bmap_last_before( @@ -4335,8 +4331,8 @@ xfs_bmap_last_before( /* * Returns the file-relative block number of the first block past eof in - * the file. This is not based on i_size, it is based on the extent list. - * Returns 0 for local files, as they do not have an extent list. + * the file. This is not based on i_size, it is based on the extent records. + * Returns 0 for local files, as they do not have extent records. */ int /* error */ xfs_bmap_last_offset( @@ -4345,7 +4341,6 @@ xfs_bmap_last_offset( xfs_fileoff_t *last_block, /* last block */ int whichfork) /* data or attr fork */ { - xfs_bmbt_rec_t *base; /* base of extent array */ xfs_bmbt_rec_t *ep; /* pointer to last extent */ int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork pointer */ @@ -4368,9 +4363,7 @@ xfs_bmap_last_offset( *last_block = 0; return 0; } - base = &ifp->if_u1.if_extents[0]; - ASSERT(base != NULL); - ep = &base[nextents - 1]; + ep = xfs_iext_get_ext(ifp, nextents - 1); *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep); return 0; } @@ -4400,7 +4393,7 @@ xfs_bmap_one_block( return 0; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); - ep = ifp->if_u1.if_extents; + ep = xfs_iext_get_ext(ifp, 0); xfs_bmbt_get_all(ep, &s); rval = s.br_startoff == 0 && s.br_blockcount == 1; if (rval && whichfork == XFS_DATA_FORK) @@ -4435,7 +4428,6 @@ xfs_bmap_read_extents( xfs_bmbt_ptr_t *pp; /* pointer to block address */ /* REFERENCED */ xfs_extnum_t room; /* number of entries there's room for */ - xfs_bmbt_rec_t *trp; /* target record pointer */ bno = NULLFSBLOCK; mp = ip->i_mount; @@ -4478,16 +4470,16 @@ xfs_bmap_read_extents( /* * Here with bp and block set to the leftmost leaf node in the tree. */ - room = ifp->if_bytes / (uint)sizeof(*trp); - trp = ifp->if_u1.if_extents; + room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); i = 0; /* - * Loop over all leaf nodes. Copy information to the extent list. + * Loop over all leaf nodes. Copy information to the extent records. */ for (;;) { - xfs_bmbt_rec_t *frp, *temp; + xfs_bmbt_rec_t *frp, *trp; xfs_fsblock_t nextbno; xfs_extnum_t num_recs; + xfs_extnum_t start; num_recs = be16_to_cpu(block->bb_numrecs); @@ -4511,12 +4503,13 @@ xfs_bmap_read_extents( if (nextbno != NULLFSBLOCK) xfs_btree_reada_bufl(mp, nextbno, 1); /* - * Copy records into the extent list. + * Copy records into the extent records. */ frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); - temp = trp; - for (j = 0; j < num_recs; j++, frp++, trp++) { + start = i; + for (j = 0; j < num_recs; j++, i++, frp++) { + trp = xfs_iext_get_ext(ifp, i); trp->l0 = INT_GET(frp->l0, ARCH_CONVERT); trp->l1 = INT_GET(frp->l1, ARCH_CONVERT); } @@ -4526,14 +4519,14 @@ xfs_bmap_read_extents( * any "older" data bmap btree records for a * set bit in the "extent flag" position. */ - if (unlikely(xfs_check_nostate_extents(temp, num_recs))) { + if (unlikely(xfs_check_nostate_extents(ifp, + start, num_recs))) { XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", XFS_ERRLEVEL_LOW, ip->i_mount); goto error0; } } - i += num_recs; xfs_trans_brelse(tp, bp); bno = nextbno; /* @@ -4546,7 +4539,7 @@ xfs_bmap_read_extents( return error; block = XFS_BUF_TO_BMBT_BLOCK(bp); } - ASSERT(i == ifp->if_bytes / (uint)sizeof(*trp)); + ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); xfs_bmap_trace_exlist(fname, ip, i, whichfork); return 0; @@ -4557,7 +4550,7 @@ error0: #ifdef XFS_BMAP_TRACE /* - * Add bmap trace insert entries for all the contents of the extent list. + * Add bmap trace insert entries for all the contents of the extent records. */ void xfs_bmap_trace_exlist( @@ -4566,16 +4559,15 @@ xfs_bmap_trace_exlist( xfs_extnum_t cnt, /* count of entries in the list */ int whichfork) /* data or attr fork */ { - xfs_bmbt_rec_t *base; /* base of extent list */ - xfs_bmbt_rec_t *ep; /* current entry in extent list */ - xfs_extnum_t idx; /* extent list entry number */ + xfs_bmbt_rec_t *ep; /* current extent record */ + xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_irec_t s; /* extent list record */ + xfs_bmbt_irec_t s; /* file extent record */ ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(cnt == ifp->if_bytes / (uint)sizeof(*base)); - base = ifp->if_u1.if_extents; - for (idx = 0, ep = base; idx < cnt; idx++, ep++) { + ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); + for (idx = 0; idx < cnt; idx++) { + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_get_all(ep, &s); xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL, whichfork); @@ -4661,14 +4653,10 @@ xfs_bmapi( xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */ xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_fileoff_t end; /* end of mapped file region */ - int eof; /* we've hit the end of extent list */ - char contig; /* allocation must be one extent */ - char delay; /* this request is for delayed alloc */ - char exact; /* don't do all of wasdelayed extent */ - char convert; /* unwritten extent I/O completion */ - xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + int eof; /* we've hit the end of extents */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ int error; /* error return */ - xfs_bmbt_irec_t got; /* current extent list record */ + xfs_bmbt_irec_t got; /* current file extent record */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extlen_t indlen; /* indirect blocks length */ xfs_extnum_t lastx; /* last useful extent number */ @@ -4680,17 +4668,13 @@ xfs_bmapi( int nallocs; /* number of extents alloc\'d */ xfs_extnum_t nextents; /* number of extents in file */ xfs_fileoff_t obno; /* old block number (offset) */ - xfs_bmbt_irec_t prev; /* previous extent list record */ + xfs_bmbt_irec_t prev; /* previous file extent record */ int tmp_logflags; /* temp flags holder */ int whichfork; /* data or attr fork */ char inhole; /* current location is hole in file */ - char stateless; /* ignore state flag set */ - char trim; /* output trimmed to match range */ - char userdata; /* allocating non-metadata */ char wasdelay; /* old extent was delayed */ char wr; /* this is a write request */ char rt; /* this is a realtime file */ - char rsvd; /* OK to allocate reserved blocks */ #ifdef DEBUG xfs_fileoff_t orig_bno; /* original block number value */ int orig_flags; /* original flags arg value */ @@ -4727,15 +4711,8 @@ xfs_bmapi( XFS_STATS_INC(xs_blk_mapw); else XFS_STATS_INC(xs_blk_mapr); - delay = (flags & XFS_BMAPI_DELAY) != 0; - trim = (flags & XFS_BMAPI_ENTIRE) == 0; - userdata = (flags & XFS_BMAPI_METADATA) == 0; - convert = (flags & XFS_BMAPI_CONVERT) != 0; - exact = (flags & XFS_BMAPI_EXACT) != 0; - rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; - contig = (flags & XFS_BMAPI_CONTIG) != 0; /* - * stateless is used to combine extents which + * IGSTATE flag is used to combine extents which * differ only due to the state of the extents. * This technique is used from xfs_getbmap() * when the caller does not wish to see the @@ -4751,10 +4728,9 @@ xfs_bmapi( * xfs_strat_comp(), where the xfs_bmapi() call * is transactioned, and the extents combined. */ - stateless = (flags & XFS_BMAPI_IGSTATE) != 0; - if (stateless && wr) /* if writing unwritten space, no */ - wr = 0; /* allocations are allowed */ - ASSERT(wr || !delay); + if ((flags & XFS_BMAPI_IGSTATE) && wr) /* if writing unwritten space */ + wr = 0; /* no allocations are allowed */ + ASSERT(wr || !(flags & XFS_BMAPI_DELAY)); logflags = 0; nallocs = 0; cur = NULL; @@ -4789,7 +4765,7 @@ xfs_bmapi( if (eof && !wr) got.br_startoff = end; inhole = eof || got.br_startoff > bno; - wasdelay = wr && !inhole && !delay && + wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) && ISNULLSTARTBLOCK(got.br_startblock); /* * First, deal with the hole before the allocated space @@ -4801,11 +4777,11 @@ xfs_bmapi( * allocate the stuff asked for in this bmap call * but that wouldn't be as good. */ - if (wasdelay && !exact) { + if (wasdelay && !(flags & XFS_BMAPI_EXACT)) { alen = (xfs_extlen_t)got.br_blockcount; aoff = got.br_startoff; if (lastx != NULLEXTNUM && lastx) { - ep = &ifp->if_u1.if_extents[lastx - 1]; + ep = xfs_iext_get_ext(ifp, lastx - 1); xfs_bmbt_get_all(ep, &prev); } } else if (wasdelay) { @@ -4823,8 +4799,8 @@ xfs_bmapi( got.br_startoff - bno); aoff = bno; } - minlen = contig ? alen : 1; - if (delay) { + minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1; + if (flags & XFS_BMAPI_DELAY) { xfs_extlen_t extsz; /* Figure out the extent size, adjust alen */ @@ -4837,7 +4813,9 @@ xfs_bmapi( if (extsz) { error = xfs_bmap_extsize_align(mp, &got, &prev, extsz, - rt, eof, delay, convert, + rt, eof, + flags&XFS_BMAPI_DELAY, + flags&XFS_BMAPI_CONVERT, &aoff, &alen); ASSERT(!error); } @@ -4875,24 +4853,29 @@ xfs_bmapi( if (rt) { error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, - -(extsz), rsvd); + -(extsz), (flags & + XFS_BMAPI_RSVBLOCKS)); } else { error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, - -(alen), rsvd); + -(alen), (flags & + XFS_BMAPI_RSVBLOCKS)); } if (!error) { error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, - -(indlen), rsvd); + -(indlen), (flags & + XFS_BMAPI_RSVBLOCKS)); if (error && rt) xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, - extsz, rsvd); + extsz, (flags & + XFS_BMAPI_RSVBLOCKS)); else if (error) xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, - alen, rsvd); + alen, (flags & + XFS_BMAPI_RSVBLOCKS)); } if (error) { @@ -4925,7 +4908,7 @@ xfs_bmapi( /* Indicate if this is the first user data * in the file, or just any user data. */ - if (userdata) { + if (!(flags & XFS_BMAPI_METADATA)) { bma.userdata = (aoff == 0) ? XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA; @@ -4937,7 +4920,7 @@ xfs_bmapi( bma.firstblock = *firstblock; bma.alen = alen; bma.off = aoff; - bma.conv = convert; + bma.conv = (flags & XFS_BMAPI_CONVERT); bma.wasdel = wasdelay; bma.minlen = minlen; bma.low = flist->xbf_low; @@ -4948,7 +4931,8 @@ xfs_bmapi( * is larger than a stripe unit. */ if (mp->m_dalign && alen >= mp->m_dalign && - userdata && whichfork == XFS_DATA_FORK) { + (!(flags & XFS_BMAPI_METADATA)) && + (whichfork == XFS_DATA_FORK)) { if ((error = xfs_bmap_isaeof(ip, aoff, whichfork, &bma.aeof))) goto error0; @@ -5011,19 +4995,19 @@ xfs_bmapi( } error = xfs_bmap_add_extent(ip, lastx, &cur, &got, firstblock, flist, &tmp_logflags, whichfork, - rsvd); + (flags & XFS_BMAPI_RSVBLOCKS)); logflags |= tmp_logflags; if (error) goto error0; lastx = ifp->if_lastex; - ep = &ifp->if_u1.if_extents[lastx]; + ep = xfs_iext_get_ext(ifp, lastx); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); xfs_bmbt_get_all(ep, &got); ASSERT(got.br_startoff <= aoff); ASSERT(got.br_startoff + got.br_blockcount >= aoff + alen); #ifdef DEBUG - if (delay) { + if (flags & XFS_BMAPI_DELAY) { ASSERT(ISNULLSTARTBLOCK(got.br_startblock)); ASSERT(STARTBLOCKVAL(got.br_startblock) > 0); } @@ -5052,14 +5036,15 @@ xfs_bmapi( * Then deal with the allocated space we found. */ ASSERT(ep != NULL); - if (trim && (got.br_startoff + got.br_blockcount > obno)) { + if (!(flags & XFS_BMAPI_ENTIRE) && + (got.br_startoff + got.br_blockcount > obno)) { if (obno > bno) bno = obno; ASSERT((bno >= obno) || (n == 0)); ASSERT(bno < end); mval->br_startoff = bno; if (ISNULLSTARTBLOCK(got.br_startblock)) { - ASSERT(!wr || delay); + ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); mval->br_startblock = DELAYSTARTBLOCK; } else mval->br_startblock = @@ -5081,7 +5066,7 @@ xfs_bmapi( } else { *mval = got; if (ISNULLSTARTBLOCK(mval->br_startblock)) { - ASSERT(!wr || delay); + ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); mval->br_startblock = DELAYSTARTBLOCK; } } @@ -5107,12 +5092,12 @@ xfs_bmapi( mval->br_state = XFS_EXT_NORM; error = xfs_bmap_add_extent(ip, lastx, &cur, mval, firstblock, flist, &tmp_logflags, whichfork, - rsvd); + (flags & XFS_BMAPI_RSVBLOCKS)); logflags |= tmp_logflags; if (error) goto error0; lastx = ifp->if_lastex; - ep = &ifp->if_u1.if_extents[lastx]; + ep = xfs_iext_get_ext(ifp, lastx); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); xfs_bmbt_get_all(ep, &got); /* @@ -5124,9 +5109,10 @@ xfs_bmapi( continue; } - ASSERT(!trim || + ASSERT((flags & XFS_BMAPI_ENTIRE) || ((mval->br_startoff + mval->br_blockcount) <= end)); - ASSERT(!trim || (mval->br_blockcount <= len) || + ASSERT((flags & XFS_BMAPI_ENTIRE) || + (mval->br_blockcount <= len) || (mval->br_startoff < obno)); bno = mval->br_startoff + mval->br_blockcount; len = end - bno; @@ -5141,7 +5127,8 @@ xfs_bmapi( mval[-1].br_startblock != HOLESTARTBLOCK && mval->br_startblock == mval[-1].br_startblock + mval[-1].br_blockcount && - (stateless || mval[-1].br_state == mval->br_state)) { + ((flags & XFS_BMAPI_IGSTATE) || + mval[-1].br_state == mval->br_state)) { ASSERT(mval->br_startoff == mval[-1].br_startoff + mval[-1].br_blockcount); mval[-1].br_blockcount += mval->br_blockcount; @@ -5168,8 +5155,7 @@ xfs_bmapi( /* * Else go on to the next record. */ - ep++; - lastx++; + ep = xfs_iext_get_ext(ifp, ++lastx); if (lastx >= nextents) { eof = 1; prev = got; @@ -5199,7 +5185,7 @@ xfs_bmapi( error0: /* * Log everything. Do this after conversion, there's no point in - * logging the extent list if we've converted to btree format. + * logging the extent records if we've converted to btree format. */ if ((logflags & XFS_ILOG_FEXT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) @@ -5252,12 +5238,12 @@ xfs_bmapi_single( xfs_fsblock_t *fsb, /* output: mapped block */ xfs_fileoff_t bno) /* starting file offs. mapped */ { - int eof; /* we've hit the end of extent list */ + int eof; /* we've hit the end of extents */ int error; /* error return */ - xfs_bmbt_irec_t got; /* current extent list record */ + xfs_bmbt_irec_t got; /* current file extent record */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t lastx; /* last useful extent number */ - xfs_bmbt_irec_t prev; /* previous extent list record */ + xfs_bmbt_irec_t prev; /* previous file extent record */ ifp = XFS_IFORK_PTR(ip, whichfork); if (unlikely( @@ -5312,18 +5298,18 @@ xfs_bunmapi( xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_bmbt_irec_t del; /* extent being deleted */ int eof; /* is deleting at eof */ - xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ int error; /* error return value */ xfs_extnum_t extno; /* extent number in list */ - xfs_bmbt_irec_t got; /* current extent list entry */ + xfs_bmbt_irec_t got; /* current extent record */ xfs_ifork_t *ifp; /* inode fork pointer */ int isrt; /* freeing in rt area */ xfs_extnum_t lastx; /* last extent index used */ int logflags; /* transaction logging flags */ xfs_extlen_t mod; /* rt extent offset */ xfs_mount_t *mp; /* mount structure */ - xfs_extnum_t nextents; /* size of extent list */ - xfs_bmbt_irec_t prev; /* previous extent list entry */ + xfs_extnum_t nextents; /* number of file extents */ + xfs_bmbt_irec_t prev; /* previous extent record */ xfs_fileoff_t start; /* first file offset deleted */ int tmp_logflags; /* partial logging flags */ int wasdel; /* was a delayed alloc extent */ @@ -5369,7 +5355,7 @@ xfs_bunmapi( * file, back up to the last block if so... */ if (eof) { - ep = &ifp->if_u1.if_extents[--lastx]; + ep = xfs_iext_get_ext(ifp, --lastx); xfs_bmbt_get_all(ep, &got); bno = got.br_startoff + got.br_blockcount - 1; } @@ -5393,7 +5379,7 @@ xfs_bunmapi( if (got.br_startoff > bno) { if (--lastx < 0) break; - ep--; + ep = xfs_iext_get_ext(ifp, lastx); xfs_bmbt_get_all(ep, &got); } /* @@ -5440,7 +5426,8 @@ xfs_bunmapi( del.br_blockcount : mod; if (bno < got.br_startoff) { if (--lastx >= 0) - xfs_bmbt_get_all(--ep, &got); + xfs_bmbt_get_all(xfs_iext_get_ext( + ifp, lastx), &got); } continue; } @@ -5500,7 +5487,8 @@ xfs_bunmapi( * try again. */ ASSERT(lastx > 0); - xfs_bmbt_get_all(ep - 1, &prev); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, + lastx - 1), &prev); ASSERT(prev.br_state == XFS_EXT_NORM); ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock)); ASSERT(del.br_startblock == @@ -5587,12 +5575,12 @@ nodelete: * If not done go on to the next (previous) record. * Reset ep in case the extents array was re-alloced. */ - ep = &ifp->if_u1.if_extents[lastx]; + ep = xfs_iext_get_ext(ifp, lastx); if (bno != (xfs_fileoff_t)-1 && bno >= start) { if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) || xfs_bmbt_get_startoff(ep) > bno) { - lastx--; - ep--; + if (--lastx >= 0) + ep = xfs_iext_get_ext(ifp, lastx); } if (lastx >= 0) xfs_bmbt_get_all(ep, &got); @@ -5636,7 +5624,7 @@ nodelete: error0: /* * Log everything. Do this after conversion, there's no point in - * logging the extent list if we've converted to btree format. + * logging the extent records if we've converted to btree format. */ if ((logflags & XFS_ILOG_FEXT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) @@ -5892,9 +5880,9 @@ xfs_bmap_isaeof( { int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ - xfs_extnum_t nextents; /* size of extent list */ - xfs_bmbt_irec_t s; /* expanded extent list entry */ + xfs_bmbt_rec_t *lastrec; /* extent record pointer */ + xfs_extnum_t nextents; /* number of file extents */ + xfs_bmbt_irec_t s; /* expanded extent record */ ASSERT(whichfork == XFS_DATA_FORK); ifp = XFS_IFORK_PTR(ip, whichfork); @@ -5909,7 +5897,7 @@ xfs_bmap_isaeof( /* * Go to the last extent */ - lastrec = &ifp->if_u1.if_extents[nextents - 1]; + lastrec = xfs_iext_get_ext(ifp, nextents - 1); xfs_bmbt_get_all(lastrec, &s); /* * Check we are allocating in the last extent (for delayed allocations) @@ -5936,8 +5924,8 @@ xfs_bmap_eof( xfs_fsblock_t blockcount; /* extent block count */ int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ - xfs_extnum_t nextents; /* size of extent list */ + xfs_bmbt_rec_t *lastrec; /* extent record pointer */ + xfs_extnum_t nextents; /* number of file extents */ xfs_fileoff_t startoff; /* extent starting file offset */ ASSERT(whichfork == XFS_DATA_FORK); @@ -5953,7 +5941,7 @@ xfs_bmap_eof( /* * Go to the last extent */ - lastrec = &ifp->if_u1.if_extents[nextents - 1]; + lastrec = xfs_iext_get_ext(ifp, nextents - 1); startoff = xfs_bmbt_get_startoff(lastrec); blockcount = xfs_bmbt_get_blockcount(lastrec); *eof = endoff >= startoff + blockcount; @@ -5969,18 +5957,21 @@ xfs_bmap_check_extents( xfs_inode_t *ip, /* incore inode pointer */ int whichfork) /* data or attr fork */ { - xfs_bmbt_rec_t *base; /* base of extents list */ xfs_bmbt_rec_t *ep; /* current extent entry */ + xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t nextents; /* number of extents in list */ + xfs_bmbt_rec_t *nextp; /* next extent entry */ ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); - base = ifp->if_u1.if_extents; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - for (ep = base; ep < &base[nextents - 1]; ep++) { + ep = xfs_iext_get_ext(ifp, 0); + for (idx = 0; idx < nextents - 1; idx++) { + nextp = xfs_iext_get_ext(ifp, idx + 1); xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, - (void *)(ep + 1)); + (void *)(nextp)); + ep = nextp; } } @@ -6119,12 +6110,14 @@ xfs_bmap_check_leaf_extents( xfs_fsblock_t bno; /* block # of "block" */ xfs_buf_t *bp; /* buffer for "block" */ int error; /* error return value */ - xfs_extnum_t i=0; /* index into the extents list */ + xfs_extnum_t i=0, j; /* index into the extents list */ xfs_ifork_t *ifp; /* fork structure */ int level; /* btree level, for checking */ xfs_mount_t *mp; /* file system mount structure */ xfs_bmbt_ptr_t *pp; /* pointer to block address */ - xfs_bmbt_rec_t *ep, *lastp; /* extent pointers in block entry */ + xfs_bmbt_rec_t *ep; /* pointer to current extent */ + xfs_bmbt_rec_t *lastp; /* pointer to previous extent */ + xfs_bmbt_rec_t *nextp; /* pointer to next extent */ int bp_release = 0; if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { @@ -6194,7 +6187,6 @@ xfs_bmap_check_leaf_extents( */ lastp = NULL; for (;;) { - xfs_bmbt_rec_t *frp; xfs_fsblock_t nextbno; xfs_extnum_t num_recs; @@ -6213,18 +6205,20 @@ xfs_bmap_check_leaf_extents( * conform with the first entry in this one. */ - frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, + ep = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); - - for (ep = frp;ep < frp + (num_recs - 1); ep++) { + for (j = 1; j < num_recs; j++) { + nextp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, + block, j + 1, mp->m_bmap_dmxr[0]); if (lastp) { xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)lastp, (void *)ep); } xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, - (void *)(ep + 1)); + (void *)(nextp)); + lastp = ep; + ep = nextp; } - lastp = frp + num_recs - 1; /* For the next iteration */ i += num_recs; if (bp_release) { @@ -6288,7 +6282,7 @@ xfs_bmap_count_blocks( mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { - if (unlikely(xfs_bmap_count_leaves(ifp->if_u1.if_extents, + if (unlikely(xfs_bmap_count_leaves(ifp, 0, ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), count) < 0)) { XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)", @@ -6310,7 +6304,7 @@ xfs_bmap_count_blocks( ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); bno = INT_GET(*pp, ARCH_CONVERT); - if (unlikely(xfs_bmap_count_tree(mp, tp, bno, level, count) < 0)) { + if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); @@ -6327,6 +6321,7 @@ int /* error */ xfs_bmap_count_tree( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ + xfs_ifork_t *ifp, /* inode fork pointer */ xfs_fsblock_t blockno, /* file system block number */ int levelin, /* level in btree */ int *count) /* Count of blocks */ @@ -6339,7 +6334,6 @@ xfs_bmap_count_tree( xfs_fsblock_t nextbno; xfs_bmbt_block_t *block, *nextblock; int numrecs; - xfs_bmbt_rec_t *frp; if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) return error; @@ -6364,7 +6358,7 @@ xfs_bmap_count_tree( xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); bno = INT_GET(*pp, ARCH_CONVERT); if (unlikely((error = - xfs_bmap_count_tree(mp, tp, bno, level, count)) < 0)) { + xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) { xfs_trans_brelse(tp, bp); XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", XFS_ERRLEVEL_LOW, mp); @@ -6376,9 +6370,8 @@ xfs_bmap_count_tree( for (;;) { nextbno = be64_to_cpu(block->bb_rightsib); numrecs = be16_to_cpu(block->bb_numrecs); - frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, - xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); - if (unlikely(xfs_bmap_disk_count_leaves(frp, numrecs, count) < 0)) { + if (unlikely(xfs_bmap_disk_count_leaves(ifp, mp, + 0, block, numrecs, count) < 0)) { xfs_trans_brelse(tp, bp); XFS_ERROR_REPORT("xfs_bmap_count_tree(2)", XFS_ERRLEVEL_LOW, mp); @@ -6399,33 +6392,45 @@ xfs_bmap_count_tree( } /* - * Count leaf blocks given a pointer to an extent list. + * Count leaf blocks given a range of extent records. */ int xfs_bmap_count_leaves( - xfs_bmbt_rec_t *frp, + xfs_ifork_t *ifp, + xfs_extnum_t idx, int numrecs, int *count) { int b; + xfs_bmbt_rec_t *frp; - for ( b = 1; b <= numrecs; b++, frp++) + for (b = 0; b < numrecs; b++) { + frp = xfs_iext_get_ext(ifp, idx + b); *count += xfs_bmbt_get_blockcount(frp); + } return 0; } /* - * Count leaf blocks given a pointer to an extent list originally in btree format. + * Count leaf blocks given a range of extent records originally + * in btree format. */ int xfs_bmap_disk_count_leaves( - xfs_bmbt_rec_t *frp, + xfs_ifork_t *ifp, + xfs_mount_t *mp, + xfs_extnum_t idx, + xfs_bmbt_block_t *block, int numrecs, int *count) { int b; + xfs_bmbt_rec_t *frp; - for ( b = 1; b <= numrecs; b++, frp++) + for (b = 1; b <= numrecs; b++) { + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, idx + b, mp->m_bmap_dmxr[0]); *count += xfs_bmbt_disk_get_blockcount(frp); + } return 0; } diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 12cc63dfc2c4..011ccaa9a1c0 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h @@ -20,6 +20,7 @@ struct getbmap; struct xfs_bmbt_irec; +struct xfs_ifork; struct xfs_inode; struct xfs_mount; struct xfs_trans; @@ -347,9 +348,28 @@ xfs_bmap_count_blocks( */ int xfs_check_nostate_extents( - xfs_bmbt_rec_t *ep, + struct xfs_ifork *ifp, + xfs_extnum_t idx, xfs_extnum_t num); +/* + * Call xfs_bmap_do_search_extents() to search for the extent + * record containing block bno. If in multi-level in-core extent + * allocation mode, find and extract the target extent buffer, + * otherwise just use the direct extent list. + */ +xfs_bmbt_rec_t * +xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *, + xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); + +/* + * Search an extent list for the extent which includes block + * bno. + */ +xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *, + xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *, + xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); + #endif /* __KERNEL__ */ #endif /* __XFS_BMAP_H__ */ diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 3f1383d160e8..bea44709afbe 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -2754,7 +2754,7 @@ xfs_bmbt_update( } /* - * Check an extent list, which has just been read, for + * Check extent records, which have just been read, for * any bit in the extent flag field. ASSERT on debug * kernels, as this condition should not occur. * Return an error condition (1) if any flags found, @@ -2763,10 +2763,14 @@ xfs_bmbt_update( int xfs_check_nostate_extents( - xfs_bmbt_rec_t *ep, + xfs_ifork_t *ifp, + xfs_extnum_t idx, xfs_extnum_t num) { - for (; num > 0; num--, ep++) { + xfs_bmbt_rec_t *ep; + + for (; num > 0; num--, idx++) { + ep = xfs_iext_get_ext(ifp, idx); if ((ep->l0 >> (64 - BMBT_EXNTFLAG_BITLEN)) != 0) { ASSERT(0); diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index e095a2d344ae..6478cfa0e539 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -372,14 +372,6 @@ extern int xfs_bmbt_get_rec(struct xfs_btree_cur *, xfs_fileoff_t *, xfs_exntst_t *, int *); #endif -/* - * Search an extent list for the extent which includes block - * bno. - */ -xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *, - xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *, - xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); - #endif /* __KERNEL__ */ #endif /* __XFS_BMAP_BTREE_H__ */ diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h index f57cc9ac875e..022fff62085b 100644 --- a/fs/xfs/xfs_clnt.h +++ b/fs/xfs/xfs_clnt.h @@ -68,8 +68,6 @@ struct xfs_mount_args { * enforcement */ #define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit * enforcement */ -#define XFSMNT_NOATIME 0x00000100 /* don't modify access - * times on reads */ #define XFSMNT_NOALIGN 0x00000200 /* don't allocate at * stripe boundaries*/ #define XFSMNT_RETERR 0x00000400 /* return error to user */ diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 473671fa5c13..4bae3a76c678 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -126,10 +126,10 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, node = bp->data; node->hdr.info.forw = 0; node->hdr.info.back = 0; - INT_SET(node->hdr.info.magic, ARCH_CONVERT, XFS_DA_NODE_MAGIC); + node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC); node->hdr.info.pad = 0; node->hdr.count = 0; - INT_SET(node->hdr.level, ARCH_CONVERT, level); + node->hdr.level = cpu_to_be16(level); xfs_da_log_buf(tp, bp, XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); @@ -290,28 +290,28 @@ xfs_da_split(xfs_da_state_t *state) node = oldblk->bp->data; if (node->hdr.info.forw) { - if (INT_GET(node->hdr.info.forw, ARCH_CONVERT) == addblk->blkno) { + if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) { bp = addblk->bp; } else { ASSERT(state->extravalid); bp = state->extrablk.bp; } node = bp->data; - INT_SET(node->hdr.info.back, ARCH_CONVERT, oldblk->blkno); + node->hdr.info.back = cpu_to_be32(oldblk->blkno); xfs_da_log_buf(state->args->trans, bp, XFS_DA_LOGRANGE(node, &node->hdr.info, sizeof(node->hdr.info))); } node = oldblk->bp->data; - if (INT_GET(node->hdr.info.back, ARCH_CONVERT)) { - if (INT_GET(node->hdr.info.back, ARCH_CONVERT) == addblk->blkno) { + if (node->hdr.info.back) { + if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) { bp = addblk->bp; } else { ASSERT(state->extravalid); bp = state->extrablk.bp; } node = bp->data; - INT_SET(node->hdr.info.forw, ARCH_CONVERT, oldblk->blkno); + node->hdr.info.forw = cpu_to_be32(oldblk->blkno); xfs_da_log_buf(state->args->trans, bp, XFS_DA_LOGRANGE(node, &node->hdr.info, sizeof(node->hdr.info))); @@ -359,14 +359,14 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(bp != NULL); node = bp->data; oldroot = blk1->bp->data; - if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { - size = (int)((char *)&oldroot->btree[INT_GET(oldroot->hdr.count, ARCH_CONVERT)] - + if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC) { + size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] - (char *)oldroot); } else { ASSERT(XFS_DIR_IS_V2(mp)); - ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); leaf = (xfs_dir2_leaf_t *)oldroot; - size = (int)((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] - + size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] - (char *)leaf); } memcpy(node, oldroot, size); @@ -381,18 +381,18 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, error = xfs_da_node_create(args, args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp) ? mp->m_dirleafblk : 0, - INT_GET(node->hdr.level, ARCH_CONVERT) + 1, &bp, args->whichfork); + be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork); if (error) return(error); node = bp->data; - INT_SET(node->btree[0].hashval, ARCH_CONVERT, blk1->hashval); - INT_SET(node->btree[0].before, ARCH_CONVERT, blk1->blkno); - INT_SET(node->btree[1].hashval, ARCH_CONVERT, blk2->hashval); - INT_SET(node->btree[1].before, ARCH_CONVERT, blk2->blkno); - INT_SET(node->hdr.count, ARCH_CONVERT, 2); + node->btree[0].hashval = cpu_to_be32(blk1->hashval); + node->btree[0].before = cpu_to_be32(blk1->blkno); + node->btree[1].hashval = cpu_to_be32(blk2->hashval); + node->btree[1].before = cpu_to_be32(blk2->blkno); + node->hdr.count = cpu_to_be16(2); #ifdef DEBUG - if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC) { ASSERT(blk1->blkno >= mp->m_dirleafblk && blk1->blkno < mp->m_dirfreeblk); ASSERT(blk2->blkno >= mp->m_dirleafblk && @@ -424,7 +424,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, int useextra; node = oldblk->bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); /* * With V2 the extra block is data or freespace. @@ -435,7 +435,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, /* * Do we have to split the node? */ - if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > state->node_ents) { + if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) { /* * Allocate a new node, add to the doubly linked chain of * nodes, then move some of our excess entries into it. @@ -472,7 +472,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, * If we had double-split op below us, then add the extra block too. */ node = oldblk->bp->data; - if (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)) { + if (oldblk->index <= be16_to_cpu(node->hdr.count)) { oldblk->index++; xfs_da_node_add(state, oldblk, addblk); if (useextra) { @@ -516,17 +516,17 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Figure out how many entries need to move, and in which direction. * Swap the nodes around if that makes it simpler. */ - if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && - ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || - (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < - INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && + ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) || + (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) < + be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) { tmpnode = node1; node1 = node2; node2 = tmpnode; } - ASSERT(INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - ASSERT(INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - count = (INT_GET(node1->hdr.count, ARCH_CONVERT) - INT_GET(node2->hdr.count, ARCH_CONVERT)) / 2; + ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC); + count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2; if (count == 0) return; tp = state->args->trans; @@ -537,7 +537,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Move elements in node2 up to make a hole. */ - if ((tmp = INT_GET(node2->hdr.count, ARCH_CONVERT)) > 0) { + if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) { tmp *= (uint)sizeof(xfs_da_node_entry_t); btree_s = &node2->btree[0]; btree_d = &node2->btree[count]; @@ -548,13 +548,12 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Move the req'd B-tree elements from high in node1 to * low in node2. */ - INT_MOD(node2->hdr.count, ARCH_CONVERT, count); + be16_add(&node2->hdr.count, count); tmp = count * (uint)sizeof(xfs_da_node_entry_t); - btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count]; + btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count]; btree_d = &node2->btree[0]; memcpy(btree_d, btree_s, tmp); - INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count)); - + be16_add(&node1->hdr.count, -count); } else { /* * Move the req'd B-tree elements from low in node2 to @@ -563,21 +562,21 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, count = -count; tmp = count * (uint)sizeof(xfs_da_node_entry_t); btree_s = &node2->btree[0]; - btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)]; + btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)]; memcpy(btree_d, btree_s, tmp); - INT_MOD(node1->hdr.count, ARCH_CONVERT, count); + be16_add(&node1->hdr.count, count); xfs_da_log_buf(tp, blk1->bp, XFS_DA_LOGRANGE(node1, btree_d, tmp)); /* * Move elements in node2 down to fill the hole. */ - tmp = INT_GET(node2->hdr.count, ARCH_CONVERT) - count; + tmp = be16_to_cpu(node2->hdr.count) - count; tmp *= (uint)sizeof(xfs_da_node_entry_t); btree_s = &node2->btree[count]; btree_d = &node2->btree[0]; memmove(btree_d, btree_s, tmp); - INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count)); + be16_add(&node2->hdr.count, -count); } /* @@ -588,7 +587,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, xfs_da_log_buf(tp, blk2->bp, XFS_DA_LOGRANGE(node2, &node2->hdr, sizeof(node2->hdr) + - sizeof(node2->btree[0]) * INT_GET(node2->hdr.count, ARCH_CONVERT))); + sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count))); /* * Record the last hashval from each block for upward propagation. @@ -596,15 +595,15 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, */ node1 = blk1->bp->data; node2 = blk2->bp->data; - blk1->hashval = INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); - blk2->hashval = INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval); + blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval); /* * Adjust the expected index for insertion. */ - if (blk1->index >= INT_GET(node1->hdr.count, ARCH_CONVERT)) { - blk2->index = blk1->index - INT_GET(node1->hdr.count, ARCH_CONVERT); - blk1->index = INT_GET(node1->hdr.count, ARCH_CONVERT) + 1; /* make it invalid */ + if (blk1->index >= be16_to_cpu(node1->hdr.count)) { + blk2->index = blk1->index - be16_to_cpu(node1->hdr.count); + blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */ } } @@ -622,8 +621,8 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, node = oldblk->bp->data; mp = state->mp; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - ASSERT((oldblk->index >= 0) && (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT))); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); ASSERT(newblk->blkno != 0); if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) ASSERT(newblk->blkno >= mp->m_dirleafblk && @@ -634,22 +633,22 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, */ tmp = 0; btree = &node->btree[ oldblk->index ]; - if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) { - tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree); + if (oldblk->index < be16_to_cpu(node->hdr.count)) { + tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree); memmove(btree + 1, btree, tmp); } - INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval); - INT_SET(btree->before, ARCH_CONVERT, newblk->blkno); + btree->hashval = cpu_to_be32(newblk->hashval); + btree->before = cpu_to_be32(newblk->blkno); xfs_da_log_buf(state->args->trans, oldblk->bp, XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); - INT_MOD(node->hdr.count, ARCH_CONVERT, +1); + be16_add(&node->hdr.count, 1); xfs_da_log_buf(state->args->trans, oldblk->bp, XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); /* * Copy the last hash value from the oldblk to propagate upwards. */ - oldblk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval); } /*======================================================================== @@ -768,21 +767,21 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) ASSERT(args != NULL); ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); oldroot = root_blk->bp->data; - ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC); ASSERT(!oldroot->hdr.info.forw); ASSERT(!oldroot->hdr.info.back); /* * If the root has more than one child, then don't do anything. */ - if (INT_GET(oldroot->hdr.count, ARCH_CONVERT) > 1) + if (be16_to_cpu(oldroot->hdr.count) > 1) return(0); /* * Read in the (only) child block, then copy those bytes into * the root block's buffer and free the original child block. */ - child = INT_GET(oldroot->btree[ 0 ].before, ARCH_CONVERT); + child = be32_to_cpu(oldroot->btree[0].before); ASSERT(child != 0); error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp, args->whichfork); @@ -790,11 +789,11 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) return(error); ASSERT(bp != NULL); blkinfo = bp->data; - if (INT_GET(oldroot->hdr.level, ARCH_CONVERT) == 1) { - ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || - INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + if (be16_to_cpu(oldroot->hdr.level) == 1) { + ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) || + be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC); } else { - ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC); } ASSERT(!blkinfo->forw); ASSERT(!blkinfo->back); @@ -830,9 +829,9 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC); node = (xfs_da_intnode_t *)info; - count = INT_GET(node->hdr.count, ARCH_CONVERT); + count = be16_to_cpu(node->hdr.count); if (count > (state->node_ents >> 1)) { *action = 0; /* blk over 50%, don't try to join */ return(0); /* blk over 50%, don't try to join */ @@ -849,7 +848,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = info->forw; + forward = (info->forw != 0); memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &retval); @@ -871,13 +870,12 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) * to shrink a directory over time. */ /* start with smaller blk num */ - forward = (INT_GET(info->forw, ARCH_CONVERT) - < INT_GET(info->back, ARCH_CONVERT)); + forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); for (i = 0; i < 2; forward = !forward, i++) { if (forward) - blkno = INT_GET(info->forw, ARCH_CONVERT); + blkno = be32_to_cpu(info->forw); else - blkno = INT_GET(info->back, ARCH_CONVERT); + blkno = be32_to_cpu(info->back); if (blkno == 0) continue; error = xfs_da_read_buf(state->args->trans, state->args->dp, @@ -889,10 +887,10 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) node = (xfs_da_intnode_t *)info; count = state->node_ents; count -= state->node_ents >> 2; - count -= INT_GET(node->hdr.count, ARCH_CONVERT); + count -= be16_to_cpu(node->hdr.count); node = bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - count -= INT_GET(node->hdr.count, ARCH_CONVERT); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + count -= be16_to_cpu(node->hdr.count); xfs_da_brelse(state->args->trans, bp); if (count >= 0) break; /* fits with at least 25% to spare */ @@ -973,16 +971,16 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path) } for (blk--, level--; level >= 0; blk--, level--) { node = blk->bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); btree = &node->btree[ blk->index ]; - if (INT_GET(btree->hashval, ARCH_CONVERT) == lasthash) + if (be32_to_cpu(btree->hashval) == lasthash) break; blk->hashval = lasthash; - INT_SET(btree->hashval, ARCH_CONVERT, lasthash); + btree->hashval = cpu_to_be32(lasthash); xfs_da_log_buf(state->args->trans, blk->bp, XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); - lasthash = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); } } @@ -997,25 +995,25 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk) int tmp; node = drop_blk->bp->data; - ASSERT(drop_blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)); + ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count)); ASSERT(drop_blk->index >= 0); /* * Copy over the offending entry, or just zero it out. */ btree = &node->btree[drop_blk->index]; - if (drop_blk->index < (INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { - tmp = INT_GET(node->hdr.count, ARCH_CONVERT) - drop_blk->index - 1; + if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) { + tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1; tmp *= (uint)sizeof(xfs_da_node_entry_t); memmove(btree, btree + 1, tmp); xfs_da_log_buf(state->args->trans, drop_blk->bp, XFS_DA_LOGRANGE(node, btree, tmp)); - btree = &node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ]; + btree = &node->btree[be16_to_cpu(node->hdr.count)-1]; } memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); xfs_da_log_buf(state->args->trans, drop_blk->bp, XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); - INT_MOD(node->hdr.count, ARCH_CONVERT, -1); + be16_add(&node->hdr.count, -1); xfs_da_log_buf(state->args->trans, drop_blk->bp, XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); @@ -1023,7 +1021,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk) * Copy the last hash value from the block to propagate upwards. */ btree--; - drop_blk->hashval = INT_GET(btree->hashval, ARCH_CONVERT); + drop_blk->hashval = be32_to_cpu(btree->hashval); } /* @@ -1041,40 +1039,40 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, drop_node = drop_blk->bp->data; save_node = save_blk->bp->data; - ASSERT(INT_GET(drop_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - ASSERT(INT_GET(save_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(drop_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(save_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); tp = state->args->trans; /* * If the dying block has lower hashvals, then move all the * elements in the remaining block up to make a hole. */ - if ((INT_GET(drop_node->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(save_node->btree[ 0 ].hashval, ARCH_CONVERT)) || - (INT_GET(drop_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < - INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT))) + if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) || + (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) < + be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval))) { - btree = &save_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT) ]; - tmp = INT_GET(save_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); + btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)]; + tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); memmove(btree, &save_node->btree[0], tmp); btree = &save_node->btree[0]; xfs_da_log_buf(tp, save_blk->bp, XFS_DA_LOGRANGE(save_node, btree, - (INT_GET(save_node->hdr.count, ARCH_CONVERT) + INT_GET(drop_node->hdr.count, ARCH_CONVERT)) * + (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) * sizeof(xfs_da_node_entry_t))); } else { - btree = &save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT) ]; + btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)]; xfs_da_log_buf(tp, save_blk->bp, XFS_DA_LOGRANGE(save_node, btree, - INT_GET(drop_node->hdr.count, ARCH_CONVERT) * + be16_to_cpu(drop_node->hdr.count) * sizeof(xfs_da_node_entry_t))); } /* * Move all the B-tree elements from drop_blk to save_blk. */ - tmp = INT_GET(drop_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); + tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); memcpy(btree, &drop_node->btree[0], tmp); - INT_MOD(save_node->hdr.count, ARCH_CONVERT, INT_GET(drop_node->hdr.count, ARCH_CONVERT)); + be16_add(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count)); xfs_da_log_buf(tp, save_blk->bp, XFS_DA_LOGRANGE(save_node, &save_node->hdr, @@ -1083,7 +1081,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, /* * Save the last hashval in the remaining block for upward propagation. */ - save_blk->hashval = INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval); } /*======================================================================== @@ -1138,46 +1136,46 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result) return(error); } curr = blk->bp->data; - ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || - INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || - INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(curr->magic) == XFS_DA_NODE_MAGIC || + be16_to_cpu(curr->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) || + be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC); /* * Search an intermediate node for a match. */ - blk->magic = INT_GET(curr->magic, ARCH_CONVERT); - if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + blk->magic = be16_to_cpu(curr->magic); + if (blk->magic == XFS_DA_NODE_MAGIC) { node = blk->bp->data; - blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); /* * Binary search. (note: small blocks will skip loop) */ - max = INT_GET(node->hdr.count, ARCH_CONVERT); + max = be16_to_cpu(node->hdr.count); probe = span = max / 2; hashval = args->hashval; for (btree = &node->btree[probe]; span > 4; btree = &node->btree[probe]) { span /= 2; - if (INT_GET(btree->hashval, ARCH_CONVERT) < hashval) + if (be32_to_cpu(btree->hashval) < hashval) probe += span; - else if (INT_GET(btree->hashval, ARCH_CONVERT) > hashval) + else if (be32_to_cpu(btree->hashval) > hashval) probe -= span; else break; } ASSERT((probe >= 0) && (probe < max)); - ASSERT((span <= 4) || (INT_GET(btree->hashval, ARCH_CONVERT) == hashval)); + ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval)); /* * Since we may have duplicate hashval's, find the first * matching hashval in the node. */ - while ((probe > 0) && (INT_GET(btree->hashval, ARCH_CONVERT) >= hashval)) { + while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) { btree--; probe--; } - while ((probe < max) && (INT_GET(btree->hashval, ARCH_CONVERT) < hashval)) { + while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) { btree++; probe++; } @@ -1187,21 +1185,21 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result) */ if (probe == max) { blk->index = max-1; - blkno = INT_GET(node->btree[ max-1 ].before, ARCH_CONVERT); + blkno = be32_to_cpu(node->btree[max-1].before); } else { blk->index = probe; - blkno = INT_GET(btree->before, ARCH_CONVERT); + blkno = be32_to_cpu(btree->before); } } - else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + else if (be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC) { blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); break; } - else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + else if (be16_to_cpu(curr->magic) == XFS_DIR_LEAF_MAGIC) { blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL); break; } - else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + else if (be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC) { blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL); break; } @@ -1274,8 +1272,8 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || old_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || old_blk->magic == XFS_ATTR_LEAF_MAGIC); - ASSERT(old_blk->magic == INT_GET(old_info->magic, ARCH_CONVERT)); - ASSERT(new_blk->magic == INT_GET(new_info->magic, ARCH_CONVERT)); + ASSERT(old_blk->magic == be16_to_cpu(old_info->magic)); + ASSERT(new_blk->magic == be16_to_cpu(new_info->magic)); ASSERT(old_blk->magic == new_blk->magic); switch (old_blk->magic) { @@ -1302,47 +1300,44 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, /* * Link new block in before existing block. */ - INT_SET(new_info->forw, ARCH_CONVERT, old_blk->blkno); - new_info->back = old_info->back; /* INT_: direct copy */ - if (INT_GET(old_info->back, ARCH_CONVERT)) { + new_info->forw = cpu_to_be32(old_blk->blkno); + new_info->back = old_info->back; + if (old_info->back) { error = xfs_da_read_buf(args->trans, args->dp, - INT_GET(old_info->back, - ARCH_CONVERT), -1, &bp, - args->whichfork); + be32_to_cpu(old_info->back), + -1, &bp, args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(old_info->magic, ARCH_CONVERT)); - ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == old_blk->blkno); - INT_SET(tmp_info->forw, ARCH_CONVERT, new_blk->blkno); + ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic)); + ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); + tmp_info->forw = cpu_to_be32(new_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da_buf_done(bp); } - INT_SET(old_info->back, ARCH_CONVERT, new_blk->blkno); + old_info->back = cpu_to_be32(new_blk->blkno); } else { /* * Link new block in after existing block. */ - new_info->forw = old_info->forw; /* INT_: direct copy */ - INT_SET(new_info->back, ARCH_CONVERT, old_blk->blkno); - if (INT_GET(old_info->forw, ARCH_CONVERT)) { + new_info->forw = old_info->forw; + new_info->back = cpu_to_be32(old_blk->blkno); + if (old_info->forw) { error = xfs_da_read_buf(args->trans, args->dp, - INT_GET(old_info->forw, ARCH_CONVERT), -1, &bp, - args->whichfork); + be32_to_cpu(old_info->forw), + -1, &bp, args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) - == INT_GET(old_info->magic, ARCH_CONVERT)); - ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) - == old_blk->blkno); - INT_SET(tmp_info->back, ARCH_CONVERT, new_blk->blkno); + ASSERT(tmp_info->magic == old_info->magic); + ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); + tmp_info->back = cpu_to_be32(new_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da_buf_done(bp); } - INT_SET(old_info->forw, ARCH_CONVERT, new_blk->blkno); + old_info->forw = cpu_to_be32(new_blk->blkno); } xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); @@ -1360,13 +1355,13 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp) node1 = node1_bp->data; node2 = node2_bp->data; - ASSERT((INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) && - (INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC)); - if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && - ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < - INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || - (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < - INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + ASSERT((be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC) && + (be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC)); + if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && + ((be32_to_cpu(node2->btree[0].hashval) < + be32_to_cpu(node1->btree[0].hashval)) || + (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) < + be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) { return(1); } return(0); @@ -1381,12 +1376,12 @@ xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count) xfs_da_intnode_t *node; node = bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); if (count) - *count = INT_GET(node->hdr.count, ARCH_CONVERT); + *count = be16_to_cpu(node->hdr.count); if (!node->hdr.count) return(0); - return(INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); + return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); } /* @@ -1411,50 +1406,47 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || save_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || save_blk->magic == XFS_ATTR_LEAF_MAGIC); - ASSERT(save_blk->magic == INT_GET(save_info->magic, ARCH_CONVERT)); - ASSERT(drop_blk->magic == INT_GET(drop_info->magic, ARCH_CONVERT)); + ASSERT(save_blk->magic == be16_to_cpu(save_info->magic)); + ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic)); ASSERT(save_blk->magic == drop_blk->magic); - ASSERT((INT_GET(save_info->forw, ARCH_CONVERT) == drop_blk->blkno) || - (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno)); - ASSERT((INT_GET(drop_info->forw, ARCH_CONVERT) == save_blk->blkno) || - (INT_GET(drop_info->back, ARCH_CONVERT) == save_blk->blkno)); + ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) || + (be32_to_cpu(save_info->back) == drop_blk->blkno)); + ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) || + (be32_to_cpu(drop_info->back) == save_blk->blkno)); /* * Unlink the leaf block from the doubly linked chain of leaves. */ - if (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno) { - save_info->back = drop_info->back; /* INT_: direct copy */ - if (INT_GET(drop_info->back, ARCH_CONVERT)) { + if (be32_to_cpu(save_info->back) == drop_blk->blkno) { + save_info->back = drop_info->back; + if (drop_info->back) { error = xfs_da_read_buf(args->trans, args->dp, - INT_GET(drop_info->back, - ARCH_CONVERT), -1, &bp, - args->whichfork); + be32_to_cpu(drop_info->back), + -1, &bp, args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(save_info->magic, ARCH_CONVERT)); - ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == drop_blk->blkno); - INT_SET(tmp_info->forw, ARCH_CONVERT, save_blk->blkno); + ASSERT(tmp_info->magic == save_info->magic); + ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); + tmp_info->forw = cpu_to_be32(save_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info) - 1); xfs_da_buf_done(bp); } } else { - save_info->forw = drop_info->forw; /* INT_: direct copy */ - if (INT_GET(drop_info->forw, ARCH_CONVERT)) { + save_info->forw = drop_info->forw; + if (drop_info->forw) { error = xfs_da_read_buf(args->trans, args->dp, - INT_GET(drop_info->forw, ARCH_CONVERT), -1, &bp, - args->whichfork); + be32_to_cpu(drop_info->forw), + -1, &bp, args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) - == INT_GET(save_info->magic, ARCH_CONVERT)); - ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) - == drop_blk->blkno); - INT_SET(tmp_info->back, ARCH_CONVERT, save_blk->blkno); + ASSERT(tmp_info->magic == save_info->magic); + ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); + tmp_info->back = cpu_to_be32(save_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info) - 1); xfs_da_buf_done(bp); @@ -1497,14 +1489,14 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, for (blk = &path->blk[level]; level >= 0; blk--, level--) { ASSERT(blk->bp != NULL); node = blk->bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - if (forward && (blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) { blk->index++; - blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + blkno = be32_to_cpu(node->btree[blk->index].before); break; } else if (!forward && (blk->index > 0)) { blk->index--; - blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + blkno = be32_to_cpu(node->btree[blk->index].before); break; } } @@ -1536,18 +1528,18 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, return(error); ASSERT(blk->bp != NULL); info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || - INT_GET(info->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || - INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); - blk->magic = INT_GET(info->magic, ARCH_CONVERT); - if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC || + be16_to_cpu(info->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) || + be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); + blk->magic = be16_to_cpu(info->magic); + if (blk->magic == XFS_DA_NODE_MAGIC) { node = (xfs_da_intnode_t *)info; - blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); if (forward) blk->index = 0; else - blk->index = INT_GET(node->hdr.count, ARCH_CONVERT)-1; - blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + blk->index = be16_to_cpu(node->hdr.count)-1; + blkno = be32_to_cpu(node->btree[blk->index].before); } else { ASSERT(level == path->active-1); blk->index = 0; @@ -1788,40 +1780,40 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * Get values from the moved block. */ - if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + if (be16_to_cpu(dead_info->magic) == XFS_DIR_LEAF_MAGIC) { ASSERT(XFS_DIR_IS_V1(mp)); dead_leaf = (xfs_dir_leafblock_t *)dead_info; dead_level = 0; dead_hash = INT_GET(dead_leaf->entries[INT_GET(dead_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); - } else if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + } else if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) { ASSERT(XFS_DIR_IS_V2(mp)); dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; dead_level = 0; - dead_hash = INT_GET(dead_leaf2->ents[INT_GET(dead_leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval); } else { - ASSERT(INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(dead_info->magic) == XFS_DA_NODE_MAGIC); dead_node = (xfs_da_intnode_t *)dead_info; - dead_level = INT_GET(dead_node->hdr.level, ARCH_CONVERT); - dead_hash = INT_GET(dead_node->btree[INT_GET(dead_node->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + dead_level = be16_to_cpu(dead_node->hdr.level); + dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval); } sib_buf = par_buf = NULL; /* * If the moved block has a left sibling, fix up the pointers. */ - if ((sib_blkno = INT_GET(dead_info->back, ARCH_CONVERT))) { + if ((sib_blkno = be32_to_cpu(dead_info->back))) { if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) goto done; sib_info = sib_buf->data; if (unlikely( - INT_GET(sib_info->forw, ARCH_CONVERT) != last_blkno || - INT_GET(sib_info->magic, ARCH_CONVERT) != INT_GET(dead_info->magic, ARCH_CONVERT))) { + be32_to_cpu(sib_info->forw) != last_blkno || + sib_info->magic != dead_info->magic)) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - INT_SET(sib_info->forw, ARCH_CONVERT, dead_blkno); + sib_info->forw = cpu_to_be32(dead_blkno); xfs_da_log_buf(tp, sib_buf, XFS_DA_LOGRANGE(sib_info, &sib_info->forw, sizeof(sib_info->forw))); @@ -1831,20 +1823,19 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * If the moved block has a right sibling, fix up the pointers. */ - if ((sib_blkno = INT_GET(dead_info->forw, ARCH_CONVERT))) { + if ((sib_blkno = be32_to_cpu(dead_info->forw))) { if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) goto done; sib_info = sib_buf->data; if (unlikely( - INT_GET(sib_info->back, ARCH_CONVERT) != last_blkno - || INT_GET(sib_info->magic, ARCH_CONVERT) - != INT_GET(dead_info->magic, ARCH_CONVERT))) { + be32_to_cpu(sib_info->back) != last_blkno || + sib_info->magic != dead_info->magic)) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - INT_SET(sib_info->back, ARCH_CONVERT, dead_blkno); + sib_info->back = cpu_to_be32(dead_blkno); xfs_da_log_buf(tp, sib_buf, XFS_DA_LOGRANGE(sib_info, &sib_info->back, sizeof(sib_info->back))); @@ -1861,26 +1852,26 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, goto done; par_node = par_buf->data; if (unlikely( - INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC || - (level >= 0 && level != INT_GET(par_node->hdr.level, ARCH_CONVERT) + 1))) { + be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC || + (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - level = INT_GET(par_node->hdr.level, ARCH_CONVERT); + level = be16_to_cpu(par_node->hdr.level); for (entno = 0; - entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && - INT_GET(par_node->btree[entno].hashval, ARCH_CONVERT) < dead_hash; + entno < be16_to_cpu(par_node->hdr.count) && + be32_to_cpu(par_node->btree[entno].hashval) < dead_hash; entno++) continue; - if (unlikely(entno == INT_GET(par_node->hdr.count, ARCH_CONVERT))) { + if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - par_blkno = INT_GET(par_node->btree[entno].before, ARCH_CONVERT); + par_blkno = be32_to_cpu(par_node->btree[entno].before); if (level == dead_level + 1) break; xfs_da_brelse(tp, par_buf); @@ -1892,13 +1883,13 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, */ for (;;) { for (; - entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && - INT_GET(par_node->btree[entno].before, ARCH_CONVERT) != last_blkno; + entno < be16_to_cpu(par_node->hdr.count) && + be32_to_cpu(par_node->btree[entno].before) != last_blkno; entno++) continue; - if (entno < INT_GET(par_node->hdr.count, ARCH_CONVERT)) + if (entno < be16_to_cpu(par_node->hdr.count)) break; - par_blkno = INT_GET(par_node->hdr.info.forw, ARCH_CONVERT); + par_blkno = be32_to_cpu(par_node->hdr.info.forw); xfs_da_brelse(tp, par_buf); par_buf = NULL; if (unlikely(par_blkno == 0)) { @@ -1911,8 +1902,8 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, goto done; par_node = par_buf->data; if (unlikely( - INT_GET(par_node->hdr.level, ARCH_CONVERT) != level || - INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)) { + be16_to_cpu(par_node->hdr.level) != level || + be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); @@ -1923,7 +1914,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * Update the parent entry pointing to the moved block. */ - INT_SET(par_node->btree[entno].before, ARCH_CONVERT, dead_blkno); + par_node->btree[entno].before = cpu_to_be32(dead_blkno); xfs_da_log_buf(tp, par_buf, XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before, sizeof(par_node->btree[entno].before))); @@ -2203,8 +2194,8 @@ xfs_da_do_buf( info = rbp->data; data = rbp->data; free = rbp->data; - magic = INT_GET(info->magic, ARCH_CONVERT); - magic1 = INT_GET(data->hdr.magic, ARCH_CONVERT); + magic = be16_to_cpu(info->magic); + magic1 = be32_to_cpu(data->hdr.magic); if (unlikely( XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) && (magic != XFS_DIR_LEAF_MAGIC) && @@ -2213,7 +2204,7 @@ xfs_da_do_buf( (magic != XFS_DIR2_LEAFN_MAGIC) && (magic1 != XFS_DIR2_BLOCK_MAGIC) && (magic1 != XFS_DIR2_DATA_MAGIC) && - (INT_GET(free->hdr.magic, ARCH_CONVERT) != XFS_DIR2_FREE_MAGIC), + (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC), mp, XFS_ERRTAG_DA_READ_BUF, XFS_RANDOM_DA_READ_BUF))) { xfs_buftrace("DA READ ERROR", rbp->bps[0]); diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h index 41352113721a..243a730d5ec8 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/xfs_da_btree.h @@ -45,10 +45,10 @@ struct zone; (XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC) typedef struct xfs_da_blkinfo { - xfs_dablk_t forw; /* previous block in list */ - xfs_dablk_t back; /* following block in list */ - __uint16_t magic; /* validity check on block */ - __uint16_t pad; /* unused */ + __be32 forw; /* previous block in list */ + __be32 back; /* following block in list */ + __be16 magic; /* validity check on block */ + __be16 pad; /* unused */ } xfs_da_blkinfo_t; /* @@ -65,12 +65,12 @@ typedef struct xfs_da_blkinfo { typedef struct xfs_da_intnode { struct xfs_da_node_hdr { /* constant-structure header block */ xfs_da_blkinfo_t info; /* block type, links, etc. */ - __uint16_t count; /* count of active entries */ - __uint16_t level; /* level above leaves (leaf == 0) */ + __be16 count; /* count of active entries */ + __be16 level; /* level above leaves (leaf == 0) */ } hdr; struct xfs_da_node_entry { - xfs_dahash_t hashval; /* hash value for this descendant */ - xfs_dablk_t before; /* Btree block before this key */ + __be32 hashval; /* hash value for this descendant */ + __be32 before; /* Btree block before this key */ } btree[1]; /* variable sized array of keys */ } xfs_da_intnode_t; typedef struct xfs_da_node_hdr xfs_da_node_hdr_t; diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index c6191d00ad27..4968a6358e61 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c @@ -83,7 +83,7 @@ xfs_swapext( /* Pull information for the target fd */ if (((fp = fget((int)sxp->sx_fdtarget)) == NULL) || - ((vp = LINVFS_GET_VP(fp->f_dentry->d_inode)) == NULL)) { + ((vp = vn_from_inode(fp->f_dentry->d_inode)) == NULL)) { error = XFS_ERROR(EINVAL); goto error0; } @@ -95,7 +95,7 @@ xfs_swapext( } if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) || - ((tvp = LINVFS_GET_VP(tfp->f_dentry->d_inode)) == NULL)) { + ((tvp = vn_from_inode(tfp->f_dentry->d_inode)) == NULL)) { error = XFS_ERROR(EINVAL); goto error0; } diff --git a/fs/xfs/xfs_dir.c b/fs/xfs/xfs_dir.c index bb87d2a700a9..9cc702a839a3 100644 --- a/fs/xfs/xfs_dir.c +++ b/fs/xfs/xfs_dir.c @@ -634,7 +634,7 @@ xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen) return(retval); ASSERT(bp != NULL); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); retval = xfs_dir_leaf_lookup_int(bp, args, &index); if (retval == EEXIST) { (void)xfs_dir_leaf_remove(args->trans, bp, index); @@ -912,7 +912,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, return(error); if (bp) leaf = bp->data; - if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { + if (bp && be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) { xfs_dir_trace_g_dub("node: block not a leaf", dp, uio, bno); xfs_da_brelse(trans, bp); @@ -949,17 +949,17 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, if (bp == NULL) return(XFS_ERROR(EFSCORRUPTED)); node = bp->data; - if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) + if (be16_to_cpu(node->hdr.info.magic) != XFS_DA_NODE_MAGIC) break; btree = &node->btree[0]; xfs_dir_trace_g_dun("node: node detail", dp, uio, node); - for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) { - if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) { - bno = INT_GET(btree->before, ARCH_CONVERT); + for (i = 0; i < be16_to_cpu(node->hdr.count); btree++, i++) { + if (be32_to_cpu(btree->hashval) >= cookhash) { + bno = be32_to_cpu(btree->before); break; } } - if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { + if (i == be16_to_cpu(node->hdr.count)) { xfs_da_brelse(trans, bp); xfs_dir_trace_g_du("node: hash beyond EOF", dp, uio); @@ -982,7 +982,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, */ for (;;) { leaf = bp->data; - if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC)) { + if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC)) { xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf); xfs_da_brelse(trans, bp); XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)", @@ -990,7 +990,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, return XFS_ERROR(EFSCORRUPTED); } xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf); - if ((nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT))) { + if ((nextbno = be32_to_cpu(leaf->hdr.info.forw))) { nextda = xfs_da_reada_buf(trans, dp, nextbno, XFS_DATA_FORK); } else @@ -1118,21 +1118,20 @@ void xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio, xfs_da_intnode_t *node) { - int last = INT_GET(node->hdr.count, ARCH_CONVERT) - 1; + int last = be16_to_cpu(node->hdr.count) - 1; xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where, (void *)dp, (void *)dp->i_mount, (void *)((unsigned long)(uio->uio_offset >> 32)), (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)), (void *)(unsigned long)uio->uio_resid, + (void *)(unsigned long)be32_to_cpu(node->hdr.info.forw), (void *)(unsigned long) - INT_GET(node->hdr.info.forw, ARCH_CONVERT), + be16_to_cpu(node->hdr.count), (void *)(unsigned long) - INT_GET(node->hdr.count, ARCH_CONVERT), + be32_to_cpu(node->btree[0].hashval), (void *)(unsigned long) - INT_GET(node->btree[0].hashval, ARCH_CONVERT), - (void *)(unsigned long) - INT_GET(node->btree[last].hashval, ARCH_CONVERT), + be32_to_cpu(node->btree[last].hashval), NULL, NULL, NULL); } @@ -1150,8 +1149,7 @@ xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio, (void *)((unsigned long)(uio->uio_offset >> 32)), (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)), (void *)(unsigned long)uio->uio_resid, - (void *)(unsigned long) - INT_GET(leaf->hdr.info.forw, ARCH_CONVERT), + (void *)(unsigned long)be32_to_cpu(leaf->hdr.info.forw), (void *)(unsigned long) INT_GET(leaf->hdr.count, ARCH_CONVERT), (void *)(unsigned long) diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h index 3158f5dc431f..7dd364b1e038 100644 --- a/fs/xfs/xfs_dir2.h +++ b/fs/xfs/xfs_dir2.h @@ -55,16 +55,16 @@ typedef __uint32_t xfs_dir2_db_t; /* * Byte offset in a directory. */ -typedef xfs_off_t xfs_dir2_off_t; +typedef xfs_off_t xfs_dir2_off_t; /* * For getdents, argument struct for put routines. */ typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa); typedef struct xfs_dir2_put_args { - xfs_off_t cook; /* cookie of (next) entry */ + xfs_off_t cook; /* cookie of (next) entry */ xfs_intino_t ino; /* inode number */ - struct xfs_dirent *dbp; /* buffer pointer */ + xfs_dirent_t *dbp; /* buffer pointer */ char *name; /* directory entry name */ int namelen; /* length of name */ int done; /* output: set if value was stored */ @@ -75,18 +75,13 @@ typedef struct xfs_dir2_put_args { /* * Other interfaces used by the rest of the dir v2 code. */ -extern int - xfs_dir2_grow_inode(struct xfs_da_args *args, int space, - xfs_dir2_db_t *dbp); - -extern int - xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); - -extern int - xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); - -extern int - xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, - struct xfs_dabuf *bp); +extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space, + xfs_dir2_db_t *dbp); +extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, + int *vp); +extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, + int *vp); +extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, + struct xfs_dabuf *bp); #endif /* __XFS_DIR2_H__ */ diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index 31bc99faa704..bd5cee6aa51a 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -81,7 +81,7 @@ xfs_dir2_block_addname( xfs_mount_t *mp; /* filesystem mount point */ int needlog; /* need to log header */ int needscan; /* need to rescan freespace */ - xfs_dir2_data_off_t *tagp; /* pointer to tag value */ + __be16 *tagp; /* pointer to tag value */ xfs_trans_t *tp; /* transaction structure */ xfs_dir2_trace_args("block_addname", args); @@ -100,8 +100,7 @@ xfs_dir2_block_addname( /* * Check the magic number, corrupted if wrong. */ - if (unlikely(INT_GET(block->hdr.magic, ARCH_CONVERT) - != XFS_DIR2_BLOCK_MAGIC)) { + if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", XFS_ERRLEVEL_LOW, mp, block); xfs_da_brelse(tp, bp); @@ -121,38 +120,38 @@ xfs_dir2_block_addname( /* * Tag just before the first leaf entry. */ - tagp = (xfs_dir2_data_off_t *)blp - 1; + tagp = (__be16 *)blp - 1; /* * Data object just before the first leaf entry. */ - enddup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); /* * If it's not free then can't do this add without cleaning up: * the space before the first leaf entry needs to be free so it * can be expanded to hold the pointer to the new entry. */ - if (INT_GET(enddup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + if (be16_to_cpu(enddup->freetag) != XFS_DIR2_DATA_FREE_TAG) dup = enddup = NULL; /* * Check out the biggest freespace and see if it's the same one. */ else { dup = (xfs_dir2_data_unused_t *) - ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); + ((char *)block + be16_to_cpu(bf[0].offset)); if (dup == enddup) { /* * It is the biggest freespace, is it too small * to hold the new leaf too? */ - if (INT_GET(dup->length, ARCH_CONVERT) < len + (uint)sizeof(*blp)) { + if (be16_to_cpu(dup->length) < len + (uint)sizeof(*blp)) { /* * Yes, we use the second-largest * entry instead if it works. */ - if (INT_GET(bf[1].length, ARCH_CONVERT) >= len) + if (be16_to_cpu(bf[1].length) >= len) dup = (xfs_dir2_data_unused_t *) ((char *)block + - INT_GET(bf[1].offset, ARCH_CONVERT)); + be16_to_cpu(bf[1].offset)); else dup = NULL; } @@ -161,7 +160,7 @@ xfs_dir2_block_addname( * Not the same free entry, * just check its length. */ - if (INT_GET(dup->length, ARCH_CONVERT) < len) { + if (be16_to_cpu(dup->length) < len) { dup = NULL; } } @@ -172,9 +171,9 @@ xfs_dir2_block_addname( * If there are stale entries we'll use one for the leaf. * Is the biggest entry enough to avoid compaction? */ - else if (INT_GET(bf[0].length, ARCH_CONVERT) >= len) { + else if (be16_to_cpu(bf[0].length) >= len) { dup = (xfs_dir2_data_unused_t *) - ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); + ((char *)block + be16_to_cpu(bf[0].offset)); compact = 0; } /* @@ -184,20 +183,20 @@ xfs_dir2_block_addname( /* * Tag just before the first leaf entry. */ - tagp = (xfs_dir2_data_off_t *)blp - 1; + tagp = (__be16 *)blp - 1; /* * Data object just before the first leaf entry. */ - dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); /* * If it's not free then the data will go where the * leaf data starts now, if it works at all. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { - if (INT_GET(dup->length, ARCH_CONVERT) + (INT_GET(btp->stale, ARCH_CONVERT) - 1) * + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + if (be16_to_cpu(dup->length) + (be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len) dup = NULL; - } else if ((INT_GET(btp->stale, ARCH_CONVERT) - 1) * (uint)sizeof(*blp) < len) + } else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len) dup = NULL; else dup = (xfs_dir2_data_unused_t *)blp; @@ -243,11 +242,11 @@ xfs_dir2_block_addname( int fromidx; /* source leaf index */ int toidx; /* target leaf index */ - for (fromidx = toidx = INT_GET(btp->count, ARCH_CONVERT) - 1, + for (fromidx = toidx = be32_to_cpu(btp->count) - 1, highstale = lfloghigh = -1; fromidx >= 0; fromidx--) { - if (INT_GET(blp[fromidx].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { + if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) { if (highstale == -1) highstale = toidx; else { @@ -260,15 +259,15 @@ xfs_dir2_block_addname( blp[toidx] = blp[fromidx]; toidx--; } - lfloglow = toidx + 1 - (INT_GET(btp->stale, ARCH_CONVERT) - 1); - lfloghigh -= INT_GET(btp->stale, ARCH_CONVERT) - 1; - INT_MOD(btp->count, ARCH_CONVERT, -(INT_GET(btp->stale, ARCH_CONVERT) - 1)); + lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1); + lfloghigh -= be32_to_cpu(btp->stale) - 1; + be32_add(&btp->count, -(be32_to_cpu(btp->stale) - 1)); xfs_dir2_data_make_free(tp, bp, (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), - (xfs_dir2_data_aoff_t)((INT_GET(btp->stale, ARCH_CONVERT) - 1) * sizeof(*blp)), + (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)), &needlog, &needscan); - blp += INT_GET(btp->stale, ARCH_CONVERT) - 1; - INT_SET(btp->stale, ARCH_CONVERT, 1); + blp += be32_to_cpu(btp->stale) - 1; + btp->stale = cpu_to_be32(1); /* * If we now need to rebuild the bestfree map, do so. * This needs to happen before the next call to use_free. @@ -283,23 +282,23 @@ xfs_dir2_block_addname( * Set leaf logging boundaries to impossible state. * For the no-stale case they're set explicitly. */ - else if (INT_GET(btp->stale, ARCH_CONVERT)) { - lfloglow = INT_GET(btp->count, ARCH_CONVERT); + else if (btp->stale) { + lfloglow = be32_to_cpu(btp->count); lfloghigh = -1; } /* * Find the slot that's first lower than our hash value, -1 if none. */ - for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; low <= high; ) { + for (low = 0, high = be32_to_cpu(btp->count) - 1; low <= high; ) { mid = (low + high) >> 1; - if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) + if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval) break; if (hash < args->hashval) low = mid + 1; else high = mid - 1; } - while (mid >= 0 && INT_GET(blp[mid].hashval, ARCH_CONVERT) >= args->hashval) { + while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) { mid--; } /* @@ -311,14 +310,14 @@ xfs_dir2_block_addname( */ xfs_dir2_data_use_free(tp, bp, enddup, (xfs_dir2_data_aoff_t) - ((char *)enddup - (char *)block + INT_GET(enddup->length, ARCH_CONVERT) - + ((char *)enddup - (char *)block + be16_to_cpu(enddup->length) - sizeof(*blp)), (xfs_dir2_data_aoff_t)sizeof(*blp), &needlog, &needscan); /* * Update the tail (entry count). */ - INT_MOD(btp->count, ARCH_CONVERT, +1); + be32_add(&btp->count, 1); /* * If we now need to rebuild the bestfree map, do so. * This needs to happen before the next call to use_free. @@ -346,12 +345,12 @@ xfs_dir2_block_addname( else { for (lowstale = mid; lowstale >= 0 && - INT_GET(blp[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; + be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; for (highstale = mid + 1; - highstale < INT_GET(btp->count, ARCH_CONVERT) && - INT_GET(blp[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && + highstale < be32_to_cpu(btp->count) && + be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || mid - lowstale > highstale - mid); highstale++) continue; @@ -359,7 +358,7 @@ xfs_dir2_block_addname( * Move entries toward the low-numbered stale entry. */ if (lowstale >= 0 && - (highstale == INT_GET(btp->count, ARCH_CONVERT) || + (highstale == be32_to_cpu(btp->count) || mid - lowstale <= highstale - mid)) { if (mid - lowstale) memmove(&blp[lowstale], &blp[lowstale + 1], @@ -371,7 +370,7 @@ xfs_dir2_block_addname( * Move entries toward the high-numbered stale entry. */ else { - ASSERT(highstale < INT_GET(btp->count, ARCH_CONVERT)); + ASSERT(highstale < be32_to_cpu(btp->count)); mid++; if (highstale - mid) memmove(&blp[mid + 1], &blp[mid], @@ -379,7 +378,7 @@ xfs_dir2_block_addname( lfloglow = MIN(mid, lfloglow); lfloghigh = MAX(highstale, lfloghigh); } - INT_MOD(btp->stale, ARCH_CONVERT, -1); + be32_add(&btp->stale, -1); } /* * Point to the new data entry. @@ -388,8 +387,9 @@ xfs_dir2_block_addname( /* * Fill in the leaf entry. */ - INT_SET(blp[mid].hashval, ARCH_CONVERT, args->hashval); - INT_SET(blp[mid].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + blp[mid].hashval = cpu_to_be32(args->hashval); + blp[mid].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, + (char *)dep - (char *)block)); xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); /* * Mark space for the data entry used. @@ -404,7 +404,7 @@ xfs_dir2_block_addname( dep->namelen = args->namelen; memcpy(dep->name, args->name, args->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + *tagp = cpu_to_be16((char *)dep - (char *)block); /* * Clean up the bestfree array and log the header, tail, and entry. */ @@ -485,8 +485,8 @@ xfs_dir2_block_getdents( /* * Unused, skip it. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { - ptr += INT_GET(dup->length, ARCH_CONVERT); + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + ptr += be16_to_cpu(dup->length); continue; } @@ -622,7 +622,7 @@ xfs_dir2_block_lookup( * Get the offset from the leaf entry, to point to the data. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); /* * Fill in inode number, release the block. */ @@ -674,10 +674,10 @@ xfs_dir2_block_lookup_int( * Loop doing a binary search for our hash value. * Find our entry, ENOENT if it's not there. */ - for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; ; ) { + for (low = 0, high = be32_to_cpu(btp->count) - 1; ; ) { ASSERT(low <= high); mid = (low + high) >> 1; - if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) + if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval) break; if (hash < args->hashval) low = mid + 1; @@ -692,7 +692,7 @@ xfs_dir2_block_lookup_int( /* * Back up to the first one with the right hash value. */ - while (mid > 0 && INT_GET(blp[mid - 1].hashval, ARCH_CONVERT) == args->hashval) { + while (mid > 0 && be32_to_cpu(blp[mid - 1].hashval) == args->hashval) { mid--; } /* @@ -700,7 +700,7 @@ xfs_dir2_block_lookup_int( * right hash value looking for our name. */ do { - if ((addr = INT_GET(blp[mid].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) + if ((addr = be32_to_cpu(blp[mid].address)) == XFS_DIR2_NULL_DATAPTR) continue; /* * Get pointer to the entry from the leaf. @@ -717,7 +717,7 @@ xfs_dir2_block_lookup_int( *entno = mid; return 0; } - } while (++mid < INT_GET(btp->count, ARCH_CONVERT) && INT_GET(blp[mid].hashval, ARCH_CONVERT) == hash); + } while (++mid < be32_to_cpu(btp->count) && be32_to_cpu(blp[mid].hashval) == hash); /* * No match, release the buffer and return ENOENT. */ @@ -767,7 +767,7 @@ xfs_dir2_block_removename( * Point to the data entry using the leaf entry. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); /* * Mark the data entry's space free. */ @@ -778,12 +778,12 @@ xfs_dir2_block_removename( /* * Fix up the block tail. */ - INT_MOD(btp->stale, ARCH_CONVERT, +1); + be32_add(&btp->stale, 1); xfs_dir2_block_log_tail(tp, bp); /* * Remove the leaf entry by marking it stale. */ - INT_SET(blp[ent].address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + blp[ent].address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); xfs_dir2_block_log_leaf(tp, bp, ent, ent); /* * Fix up bestfree, log the header if necessary. @@ -843,7 +843,7 @@ xfs_dir2_block_replace( * Point to the data entry we need to change. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) != args->inumber); /* * Change the inode number to the new value. @@ -868,8 +868,8 @@ xfs_dir2_block_sort( la = a; lb = b; - return INT_GET(la->hashval, ARCH_CONVERT) < INT_GET(lb->hashval, ARCH_CONVERT) ? -1 : - (INT_GET(la->hashval, ARCH_CONVERT) > INT_GET(lb->hashval, ARCH_CONVERT) ? 1 : 0); + return be32_to_cpu(la->hashval) < be32_to_cpu(lb->hashval) ? -1 : + (be32_to_cpu(la->hashval) > be32_to_cpu(lb->hashval) ? 1 : 0); } /* @@ -881,7 +881,7 @@ xfs_dir2_leaf_to_block( xfs_dabuf_t *lbp, /* leaf buffer */ xfs_dabuf_t *dbp) /* data buffer */ { - xfs_dir2_data_off_t *bestsp; /* leaf bests table */ + __be16 *bestsp; /* leaf bests table */ xfs_dir2_block_t *block; /* block structure */ xfs_dir2_block_tail_t *btp; /* block tail */ xfs_inode_t *dp; /* incore directory inode */ @@ -896,7 +896,7 @@ xfs_dir2_leaf_to_block( int needscan; /* need to scan for bestfree */ xfs_dir2_sf_hdr_t sfh; /* shortform header */ int size; /* bytes used */ - xfs_dir2_data_off_t *tagp; /* end of entry (tag) */ + __be16 *tagp; /* end of entry (tag) */ int to; /* block/leaf to index */ xfs_trans_t *tp; /* transaction pointer */ @@ -905,7 +905,7 @@ xfs_dir2_leaf_to_block( tp = args->trans; mp = dp->i_mount; leaf = lbp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); /* * If there are data blocks other than the first one, take this @@ -915,11 +915,11 @@ xfs_dir2_leaf_to_block( */ while (dp->i_d.di_size > mp->m_dirblksize) { bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - if (INT_GET(bestsp[INT_GET(ltp->bestcount, ARCH_CONVERT) - 1], ARCH_CONVERT) == + if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) == mp->m_dirblksize - (uint)sizeof(block->hdr)) { if ((error = xfs_dir2_leaf_trim_data(args, lbp, - (xfs_dir2_db_t)(INT_GET(ltp->bestcount, ARCH_CONVERT) - 1)))) + (xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1)))) goto out; } else { error = 0; @@ -935,28 +935,29 @@ xfs_dir2_leaf_to_block( goto out; } block = dbp->data; - ASSERT(INT_GET(block->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC); /* * Size of the "leaf" area in the block. */ size = (uint)sizeof(block->tail) + - (uint)sizeof(*lep) * (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); + (uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); /* * Look at the last data entry. */ - tagp = (xfs_dir2_data_off_t *)((char *)block + mp->m_dirblksize) - 1; - dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1; + dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); /* * If it's not free or is too short we can't do it. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG || INT_GET(dup->length, ARCH_CONVERT) < size) { + if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG || + be16_to_cpu(dup->length) < size) { error = 0; goto out; } /* * Start converting it to block form. */ - INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); + block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); needlog = 1; needscan = 0; /* @@ -968,20 +969,20 @@ xfs_dir2_leaf_to_block( * Initialize the block tail. */ btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); - INT_SET(btp->count, ARCH_CONVERT, INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); + btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); btp->stale = 0; xfs_dir2_block_log_tail(tp, dbp); /* * Initialize the block leaf area. We compact out stale entries. */ lep = XFS_DIR2_BLOCK_LEAF_P(btp); - for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { - if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { + if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) continue; lep[to++] = leaf->ents[from]; } - ASSERT(to == INT_GET(btp->count, ARCH_CONVERT)); - xfs_dir2_block_log_leaf(tp, dbp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); + ASSERT(to == be32_to_cpu(btp->count)); + xfs_dir2_block_log_leaf(tp, dbp, 0, be32_to_cpu(btp->count) - 1); /* * Scan the bestfree if we need it and log the data block header. */ @@ -1043,7 +1044,7 @@ xfs_dir2_sf_to_block( int offset; /* target block offset */ xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */ xfs_dir2_sf_t *sfp; /* shortform structure */ - xfs_dir2_data_off_t *tagp; /* end of data entry */ + __be16 *tagp; /* end of data entry */ xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_trace_args("sf_to_block", args); @@ -1095,12 +1096,12 @@ xfs_dir2_sf_to_block( return error; } block = bp->data; - INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); + block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); /* * Compute size of block "tail" area. */ i = (uint)sizeof(*btp) + - (INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); + (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); /* * The whole thing is initialized to free by the init routine. * Say we're using the leaf and tail area. @@ -1114,7 +1115,7 @@ xfs_dir2_sf_to_block( * Fill in the tail. */ btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); - INT_SET(btp->count, ARCH_CONVERT, INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2); /* ., .. */ + btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */ btp->stale = 0; blp = XFS_DIR2_BLOCK_LEAF_P(btp); endoffset = (uint)((char *)blp - (char *)block); @@ -1123,7 +1124,7 @@ xfs_dir2_sf_to_block( */ xfs_dir2_data_use_free(tp, bp, dup, (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), - INT_GET(dup->length, ARCH_CONVERT), &needlog, &needscan); + be16_to_cpu(dup->length), &needlog, &needscan); /* * Create entry for . */ @@ -1133,10 +1134,11 @@ xfs_dir2_sf_to_block( dep->namelen = 1; dep->name[0] = '.'; tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + *tagp = cpu_to_be16((char *)dep - (char *)block); xfs_dir2_data_log_entry(tp, bp, dep); - INT_SET(blp[0].hashval, ARCH_CONVERT, xfs_dir_hash_dot); - INT_SET(blp[0].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot); + blp[0].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, + (char *)dep - (char *)block)); /* * Create entry for .. */ @@ -1146,15 +1148,16 @@ xfs_dir2_sf_to_block( dep->namelen = 2; dep->name[0] = dep->name[1] = '.'; tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + *tagp = cpu_to_be16((char *)dep - (char *)block); xfs_dir2_data_log_entry(tp, bp, dep); - INT_SET(blp[1].hashval, ARCH_CONVERT, xfs_dir_hash_dotdot); - INT_SET(blp[1].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); + blp[1].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, + (char *)dep - (char *)block)); offset = XFS_DIR2_DATA_FIRST_OFFSET; /* * Loop over existing entries, stuff them in. */ - if ((i = 0) == INT_GET(sfp->hdr.count, ARCH_CONVERT)) + if ((i = 0) == sfp->hdr.count) sfep = NULL; else sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); @@ -1176,15 +1179,14 @@ xfs_dir2_sf_to_block( if (offset < newoffset) { dup = (xfs_dir2_data_unused_t *) ((char *)block + offset); - INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(dup->length, ARCH_CONVERT, newoffset - offset); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT, - (xfs_dir2_data_off_t) + dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + dup->length = cpu_to_be16(newoffset - offset); + *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16( ((char *)dup - (char *)block)); xfs_dir2_data_log_unused(tp, bp, dup); (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block, dup, &dummy); - offset += INT_GET(dup->length, ARCH_CONVERT); + offset += be16_to_cpu(dup->length); continue; } /* @@ -1196,13 +1198,14 @@ xfs_dir2_sf_to_block( dep->namelen = sfep->namelen; memcpy(dep->name, sfep->name, dep->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + *tagp = cpu_to_be16((char *)dep - (char *)block); xfs_dir2_data_log_entry(tp, bp, dep); - INT_SET(blp[2 + i].hashval, ARCH_CONVERT, xfs_da_hashname((char *)sfep->name, sfep->namelen)); - INT_SET(blp[2 + i].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, + blp[2 + i].hashval = cpu_to_be32(xfs_da_hashname( + (char *)sfep->name, sfep->namelen)); + blp[2 + i].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); offset = (int)((char *)(tagp + 1) - (char *)block); - if (++i == INT_GET(sfp->hdr.count, ARCH_CONVERT)) + if (++i == sfp->hdr.count) sfep = NULL; else sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); @@ -1212,13 +1215,13 @@ xfs_dir2_sf_to_block( /* * Sort the leaf entries by hash value. */ - xfs_sort(blp, INT_GET(btp->count, ARCH_CONVERT), sizeof(*blp), xfs_dir2_block_sort); + xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort); /* * Log the leaf entry area and tail. * Already logged the header in data_init, ignore needlog. */ ASSERT(needscan == 0); - xfs_dir2_block_log_leaf(tp, bp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); + xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1); xfs_dir2_block_log_tail(tp, bp); xfs_dir2_data_check(dp, bp); xfs_da_buf_done(bp); diff --git a/fs/xfs/xfs_dir2_block.h b/fs/xfs/xfs_dir2_block.h index a2e5cb98a838..6722effd0b20 100644 --- a/fs/xfs/xfs_dir2_block.h +++ b/fs/xfs/xfs_dir2_block.h @@ -43,8 +43,8 @@ struct xfs_trans; #define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */ typedef struct xfs_dir2_block_tail { - __uint32_t count; /* count of leaf entries */ - __uint32_t stale; /* count of stale lf entries */ + __be32 count; /* count of leaf entries */ + __be32 stale; /* count of stale lf entries */ } xfs_dir2_block_tail_t; /* @@ -75,8 +75,7 @@ xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block) static inline struct xfs_dir2_leaf_entry * xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp) { - return (((struct xfs_dir2_leaf_entry *) - (btp)) - INT_GET((btp)->count, ARCH_CONVERT)); + return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count); } /* diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index 5b7c47e2f14a..bb3d03ff002b 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c @@ -70,11 +70,11 @@ xfs_dir2_data_check( mp = dp->i_mount; d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); bf = d->hdr.bestfree; p = (char *)d->u; - if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); lep = XFS_DIR2_BLOCK_LEAF_P(btp); endp = (char *)lep; @@ -96,8 +96,8 @@ xfs_dir2_data_check( ASSERT(!bf[2].offset); freeseen |= 1 << 2; } - ASSERT(INT_GET(bf[0].length, ARCH_CONVERT) >= INT_GET(bf[1].length, ARCH_CONVERT)); - ASSERT(INT_GET(bf[1].length, ARCH_CONVERT) >= INT_GET(bf[2].length, ARCH_CONVERT)); + ASSERT(be16_to_cpu(bf[0].length) >= be16_to_cpu(bf[1].length)); + ASSERT(be16_to_cpu(bf[1].length) >= be16_to_cpu(bf[2].length)); /* * Loop over the data/unused entries. */ @@ -108,18 +108,20 @@ xfs_dir2_data_check( * If we find it, account for that, else make sure it * doesn't need to be there. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { ASSERT(lastfree == 0); - ASSERT(INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT) == + ASSERT(be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup)) == (char *)dup - (char *)d); dfp = xfs_dir2_data_freefind(d, dup); if (dfp) { i = (int)(dfp - bf); ASSERT((freeseen & (1 << i)) == 0); freeseen |= 1 << i; - } else - ASSERT(INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(bf[2].length, ARCH_CONVERT)); - p += INT_GET(dup->length, ARCH_CONVERT); + } else { + ASSERT(be16_to_cpu(dup->length) <= + be16_to_cpu(bf[2].length)); + } + p += be16_to_cpu(dup->length); lastfree = 1; continue; } @@ -132,21 +134,21 @@ xfs_dir2_data_check( dep = (xfs_dir2_data_entry_t *)p; ASSERT(dep->namelen != 0); ASSERT(xfs_dir_ino_validate(mp, INT_GET(dep->inumber, ARCH_CONVERT)) == 0); - ASSERT(INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT) == + ASSERT(be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep)) == (char *)dep - (char *)d); count++; lastfree = 0; - if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, (xfs_dir2_data_aoff_t) ((char *)dep - (char *)d)); hash = xfs_da_hashname((char *)dep->name, dep->namelen); - for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { - if (INT_GET(lep[i].address, ARCH_CONVERT) == addr && - INT_GET(lep[i].hashval, ARCH_CONVERT) == hash) + for (i = 0; i < be32_to_cpu(btp->count); i++) { + if (be32_to_cpu(lep[i].address) == addr && + be32_to_cpu(lep[i].hashval) == hash) break; } - ASSERT(i < INT_GET(btp->count, ARCH_CONVERT)); + ASSERT(i < be32_to_cpu(btp->count)); } p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); } @@ -154,15 +156,15 @@ xfs_dir2_data_check( * Need to have seen all the entries and all the bestfree slots. */ ASSERT(freeseen == 7); - if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { - for (i = stale = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { - if (INT_GET(lep[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { + for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { + if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR) stale++; if (i > 0) - ASSERT(INT_GET(lep[i].hashval, ARCH_CONVERT) >= INT_GET(lep[i - 1].hashval, ARCH_CONVERT)); + ASSERT(be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval)); } - ASSERT(count == INT_GET(btp->count, ARCH_CONVERT) - INT_GET(btp->stale, ARCH_CONVERT)); - ASSERT(stale == INT_GET(btp->stale, ARCH_CONVERT)); + ASSERT(count == be32_to_cpu(btp->count) - be32_to_cpu(btp->stale)); + ASSERT(stale == be32_to_cpu(btp->stale)); } } #endif @@ -190,8 +192,8 @@ xfs_dir2_data_freefind( * Check order, non-overlapping entries, and if we find the * one we're looking for it has to be exact. */ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0; dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; dfp++) { @@ -201,23 +203,24 @@ xfs_dir2_data_freefind( continue; } ASSERT(seenzero == 0); - if (INT_GET(dfp->offset, ARCH_CONVERT) == off) { + if (be16_to_cpu(dfp->offset) == off) { matched = 1; - ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(dup->length, ARCH_CONVERT)); - } else if (off < INT_GET(dfp->offset, ARCH_CONVERT)) - ASSERT(off + INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(dfp->offset, ARCH_CONVERT)); + ASSERT(dfp->length == dup->length); + } else if (off < be16_to_cpu(dfp->offset)) + ASSERT(off + be16_to_cpu(dup->length) <= be16_to_cpu(dfp->offset)); else - ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) + INT_GET(dfp->length, ARCH_CONVERT) <= off); - ASSERT(matched || INT_GET(dfp->length, ARCH_CONVERT) >= INT_GET(dup->length, ARCH_CONVERT)); + ASSERT(be16_to_cpu(dfp->offset) + be16_to_cpu(dfp->length) <= off); + ASSERT(matched || be16_to_cpu(dfp->length) >= be16_to_cpu(dup->length)); if (dfp > &d->hdr.bestfree[0]) - ASSERT(INT_GET(dfp[-1].length, ARCH_CONVERT) >= INT_GET(dfp[0].length, ARCH_CONVERT)); + ASSERT(be16_to_cpu(dfp[-1].length) >= be16_to_cpu(dfp[0].length)); } #endif /* * If this is smaller than the smallest bestfree entry, * it can't be there since they're sorted. */ - if (INT_GET(dup->length, ARCH_CONVERT) < INT_GET(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length, ARCH_CONVERT)) + if (be16_to_cpu(dup->length) < + be16_to_cpu(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length)) return NULL; /* * Look at the three bestfree entries for our guy. @@ -227,7 +230,7 @@ xfs_dir2_data_freefind( dfp++) { if (!dfp->offset) return NULL; - if (INT_GET(dfp->offset, ARCH_CONVERT) == off) + if (be16_to_cpu(dfp->offset) == off) return dfp; } /* @@ -249,29 +252,29 @@ xfs_dir2_data_freeinsert( xfs_dir2_data_free_t new; /* new bestfree entry */ #ifdef __KERNEL__ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); #endif dfp = d->hdr.bestfree; - INT_COPY(new.length, dup->length, ARCH_CONVERT); - INT_SET(new.offset, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dup - (char *)d)); + new.length = dup->length; + new.offset = cpu_to_be16((char *)dup - (char *)d); /* * Insert at position 0, 1, or 2; or not at all. */ - if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[0].length, ARCH_CONVERT)) { + if (be16_to_cpu(new.length) > be16_to_cpu(dfp[0].length)) { dfp[2] = dfp[1]; dfp[1] = dfp[0]; dfp[0] = new; *loghead = 1; return &dfp[0]; } - if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[1].length, ARCH_CONVERT)) { + if (be16_to_cpu(new.length) > be16_to_cpu(dfp[1].length)) { dfp[2] = dfp[1]; dfp[1] = new; *loghead = 1; return &dfp[1]; } - if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[2].length, ARCH_CONVERT)) { + if (be16_to_cpu(new.length) > be16_to_cpu(dfp[2].length)) { dfp[2] = new; *loghead = 1; return &dfp[2]; @@ -289,8 +292,8 @@ xfs_dir2_data_freeremove( int *loghead) /* out: log data header */ { #ifdef __KERNEL__ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); #endif /* * It's the first entry, slide the next 2 up. @@ -334,8 +337,8 @@ xfs_dir2_data_freescan( char *p; /* current entry pointer */ #ifdef __KERNEL__ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); #endif /* * Start by clearing the table. @@ -348,7 +351,7 @@ xfs_dir2_data_freescan( p = (char *)d->u; if (aendp) endp = aendp; - else if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + else if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); endp = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); } else @@ -361,11 +364,11 @@ xfs_dir2_data_freescan( /* * If it's a free entry, insert it. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { ASSERT((char *)dup - (char *)d == - INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT)); + be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); xfs_dir2_data_freeinsert(d, dup, loghead); - p += INT_GET(dup->length, ARCH_CONVERT); + p += be16_to_cpu(dup->length); } /* * For active entries, check their tags and skip them. @@ -373,7 +376,7 @@ xfs_dir2_data_freescan( else { dep = (xfs_dir2_data_entry_t *)p; ASSERT((char *)dep - (char *)d == - INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT)); + be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep))); p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); } } @@ -415,8 +418,8 @@ xfs_dir2_data_init( * Initialize the header. */ d = bp->data; - INT_SET(d->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); - INT_SET(d->hdr.bestfree[0].offset, ARCH_CONVERT, (xfs_dir2_data_off_t)sizeof(d->hdr)); + d->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); + d->hdr.bestfree[0].offset = cpu_to_be16(sizeof(d->hdr)); for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { d->hdr.bestfree[i].length = 0; d->hdr.bestfree[i].offset = 0; @@ -425,13 +428,12 @@ xfs_dir2_data_init( * Set up an unused entry for the block's body. */ dup = &d->u[0].unused; - INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); t=mp->m_dirblksize - (uint)sizeof(d->hdr); - INT_SET(d->hdr.bestfree[0].length, ARCH_CONVERT, t); - INT_SET(dup->length, ARCH_CONVERT, t); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)dup - (char *)d)); + d->hdr.bestfree[0].length = cpu_to_be16(t); + dup->length = cpu_to_be16(t); + *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16((char *)dup - (char *)d); /* * Log it and return it. */ @@ -453,8 +455,8 @@ xfs_dir2_data_log_entry( xfs_dir2_data_t *d; /* data block pointer */ d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d), (uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) - (char *)d - 1)); @@ -471,8 +473,8 @@ xfs_dir2_data_log_header( xfs_dir2_data_t *d; /* data block pointer */ d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d), (uint)(sizeof(d->hdr) - 1)); } @@ -489,8 +491,8 @@ xfs_dir2_data_log_unused( xfs_dir2_data_t *d; /* data block pointer */ d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); /* * Log the first part of the unused entry. */ @@ -533,12 +535,12 @@ xfs_dir2_data_make_free( /* * Figure out where the end of the data area is. */ - if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) + if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC) endptr = (char *)d + mp->m_dirblksize; else { xfs_dir2_block_tail_t *btp; /* block tail */ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); } @@ -547,11 +549,11 @@ xfs_dir2_data_make_free( * the previous entry and see if it's free. */ if (offset > sizeof(d->hdr)) { - xfs_dir2_data_off_t *tagp; /* tag just before us */ + __be16 *tagp; /* tag just before us */ - tagp = (xfs_dir2_data_off_t *)((char *)d + offset) - 1; - prevdup = (xfs_dir2_data_unused_t *)((char *)d + INT_GET(*tagp, ARCH_CONVERT)); - if (INT_GET(prevdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + tagp = (__be16 *)((char *)d + offset) - 1; + prevdup = (xfs_dir2_data_unused_t *)((char *)d + be16_to_cpu(*tagp)); + if (be16_to_cpu(prevdup->freetag) != XFS_DIR2_DATA_FREE_TAG) prevdup = NULL; } else prevdup = NULL; @@ -562,7 +564,7 @@ xfs_dir2_data_make_free( if ((char *)d + offset + len < endptr) { postdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); - if (INT_GET(postdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG) postdup = NULL; } else postdup = NULL; @@ -586,13 +588,13 @@ xfs_dir2_data_make_free( * since the third bestfree is there, there might be more * entries. */ - needscan = d->hdr.bestfree[2].length; + needscan = (d->hdr.bestfree[2].length != 0); /* * Fix up the new big freespace. */ - INT_MOD(prevdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); + be16_add(&prevdup->length, len + be16_to_cpu(postdup->length)); + *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = + cpu_to_be16((char *)prevdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, prevdup); if (!needscan) { /* @@ -614,7 +616,7 @@ xfs_dir2_data_make_free( */ dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp); ASSERT(dfp == &d->hdr.bestfree[0]); - ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(prevdup->length, ARCH_CONVERT)); + ASSERT(dfp->length == prevdup->length); ASSERT(!dfp[1].length); ASSERT(!dfp[2].length); } @@ -624,9 +626,9 @@ xfs_dir2_data_make_free( */ else if (prevdup) { dfp = xfs_dir2_data_freefind(d, prevdup); - INT_MOD(prevdup->length, ARCH_CONVERT, len); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); + be16_add(&prevdup->length, len); + *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = + cpu_to_be16((char *)prevdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, prevdup); /* * If the previous entry was in the table, the new entry @@ -640,8 +642,10 @@ xfs_dir2_data_make_free( /* * Otherwise we need a scan if the new entry is big enough. */ - else - needscan = INT_GET(prevdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); + else { + needscan = be16_to_cpu(prevdup->length) > + be16_to_cpu(d->hdr.bestfree[2].length); + } } /* * The following entry is free, merge with it. @@ -649,10 +653,10 @@ xfs_dir2_data_make_free( else if (postdup) { dfp = xfs_dir2_data_freefind(d, postdup); newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); - INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(newdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length)); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If the following entry was in the table, the new entry @@ -666,18 +670,20 @@ xfs_dir2_data_make_free( /* * Otherwise we need a scan if the new entry is big enough. */ - else - needscan = INT_GET(newdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); + else { + needscan = be16_to_cpu(newdup->length) > + be16_to_cpu(d->hdr.bestfree[2].length); + } } /* * Neither neighbor is free. Make a new entry. */ else { newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); - INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(newdup->length, ARCH_CONVERT, len); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + newdup->length = cpu_to_be16(len); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); } @@ -707,18 +713,18 @@ xfs_dir2_data_use_free( int oldlen; /* old unused entry's length */ d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); - ASSERT(INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); ASSERT(offset >= (char *)dup - (char *)d); - ASSERT(offset + len <= (char *)dup + INT_GET(dup->length, ARCH_CONVERT) - (char *)d); - ASSERT((char *)dup - (char *)d == INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT)); + ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d); + ASSERT((char *)dup - (char *)d == be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); /* * Look up the entry in the bestfree table. */ dfp = xfs_dir2_data_freefind(d, dup); - oldlen = INT_GET(dup->length, ARCH_CONVERT); - ASSERT(dfp || oldlen <= INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT)); + oldlen = be16_to_cpu(dup->length); + ASSERT(dfp || oldlen <= be16_to_cpu(d->hdr.bestfree[2].length)); /* * Check for alignment with front and back of the entry. */ @@ -732,7 +738,7 @@ xfs_dir2_data_use_free( */ if (matchfront && matchback) { if (dfp) { - needscan = d->hdr.bestfree[2].offset; + needscan = (d->hdr.bestfree[2].offset != 0); if (!needscan) xfs_dir2_data_freeremove(d, dfp, needlogp); } @@ -743,10 +749,10 @@ xfs_dir2_data_use_free( */ else if (matchfront) { newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); - INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(newdup->length, ARCH_CONVERT, oldlen - len); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + newdup->length = cpu_to_be16(oldlen - len); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. @@ -755,8 +761,8 @@ xfs_dir2_data_use_free( xfs_dir2_data_freeremove(d, dfp, needlogp); dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); ASSERT(dfp != NULL); - ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); - ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); + ASSERT(dfp->length == newdup->length); + ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d); /* * If we got inserted at the last slot, * that means we don't know if there was a better @@ -771,10 +777,9 @@ xfs_dir2_data_use_free( */ else if (matchback) { newdup = dup; - INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) - (((char *)d + offset) - (char *)newdup)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. @@ -783,8 +788,8 @@ xfs_dir2_data_use_free( xfs_dir2_data_freeremove(d, dfp, needlogp); dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); ASSERT(dfp != NULL); - ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); - ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); + ASSERT(dfp->length == newdup->length); + ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d); /* * If we got inserted at the last slot, * that means we don't know if there was a better @@ -799,16 +804,15 @@ xfs_dir2_data_use_free( */ else { newdup = dup; - INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) - (((char *)d + offset) - (char *)newdup)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len); - INT_SET(newdup2->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(newdup2->length, ARCH_CONVERT, oldlen - len - INT_GET(newdup->length, ARCH_CONVERT)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup2), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup2 - (char *)d)); + newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length)); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup2) = + cpu_to_be16((char *)newdup2 - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup2); /* * If the old entry was in the table, we need to scan @@ -819,7 +823,7 @@ xfs_dir2_data_use_free( * the 2 new will work. */ if (dfp) { - needscan = d->hdr.bestfree[2].length; + needscan = (d->hdr.bestfree[2].length != 0); if (!needscan) { xfs_dir2_data_freeremove(d, dfp, needlogp); (void)xfs_dir2_data_freeinsert(d, newdup, diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h index 5e3a7f9ec735..0847cbb53e17 100644 --- a/fs/xfs/xfs_dir2_data.h +++ b/fs/xfs/xfs_dir2_data.h @@ -65,8 +65,8 @@ struct xfs_trans; * The freespace will be formatted as a xfs_dir2_data_unused_t. */ typedef struct xfs_dir2_data_free { - xfs_dir2_data_off_t offset; /* start of freespace */ - xfs_dir2_data_off_t length; /* length of freespace */ + __be16 offset; /* start of freespace */ + __be16 length; /* length of freespace */ } xfs_dir2_data_free_t; /* @@ -75,7 +75,7 @@ typedef struct xfs_dir2_data_free { * The code knows that XFS_DIR2_DATA_FD_COUNT is 3. */ typedef struct xfs_dir2_data_hdr { - __uint32_t magic; /* XFS_DIR2_DATA_MAGIC */ + __be32 magic; /* XFS_DIR2_DATA_MAGIC */ /* or XFS_DIR2_BLOCK_MAGIC */ xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT]; } xfs_dir2_data_hdr_t; @@ -97,10 +97,10 @@ typedef struct xfs_dir2_data_entry { * Tag appears as the last 2 bytes. */ typedef struct xfs_dir2_data_unused { - __uint16_t freetag; /* XFS_DIR2_DATA_FREE_TAG */ - xfs_dir2_data_off_t length; /* total free length */ + __be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */ + __be16 length; /* total free length */ /* variable offset */ - xfs_dir2_data_off_t tag; /* starting offset of us */ + __be16 tag; /* starting offset of us */ } xfs_dir2_data_unused_t; typedef union { @@ -134,12 +134,11 @@ static inline int xfs_dir2_data_entsize(int n) * Pointer to an entry's tag word. */ #define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep) -static inline xfs_dir2_data_off_t * +static inline __be16 * xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) { - return (xfs_dir2_data_off_t *) \ - ((char *)(dep) + XFS_DIR2_DATA_ENTSIZE((dep)->namelen) - \ - (uint)sizeof(xfs_dir2_data_off_t)); + return (__be16 *)((char *)dep + + XFS_DIR2_DATA_ENTSIZE(dep->namelen) - sizeof(__be16)); } /* @@ -147,12 +146,11 @@ xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) */ #define XFS_DIR2_DATA_UNUSED_TAG_P(dup) \ xfs_dir2_data_unused_tag_p(dup) -static inline xfs_dir2_data_off_t * +static inline __be16 * xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup) { - return (xfs_dir2_data_off_t *) \ - ((char *)(dup) + INT_GET((dup)->length, ARCH_CONVERT) \ - - (uint)sizeof(xfs_dir2_data_off_t)); + return (__be16 *)((char *)dup + + be16_to_cpu(dup->length) - sizeof(__be16)); } /* diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index d342b6b55239..08648b18265c 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -66,7 +66,7 @@ xfs_dir2_block_to_leaf( xfs_da_args_t *args, /* operation arguments */ xfs_dabuf_t *dbp) /* input block's buffer */ { - xfs_dir2_data_off_t *bestsp; /* leaf's bestsp entries */ + __be16 *bestsp; /* leaf's bestsp entries */ xfs_dablk_t blkno; /* leaf block's bno */ xfs_dir2_block_t *block; /* block structure */ xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */ @@ -111,14 +111,14 @@ xfs_dir2_block_to_leaf( /* * Set the counts in the leaf header. */ - INT_COPY(leaf->hdr.count, btp->count, ARCH_CONVERT); /* INT_: type change */ - INT_COPY(leaf->hdr.stale, btp->stale, ARCH_CONVERT); /* INT_: type change */ + leaf->hdr.count = cpu_to_be16(be32_to_cpu(btp->count)); + leaf->hdr.stale = cpu_to_be16(be32_to_cpu(btp->stale)); /* * Could compact these but I think we always do the conversion * after squeezing out stale entries. */ - memcpy(leaf->ents, blp, INT_GET(btp->count, ARCH_CONVERT) * sizeof(xfs_dir2_leaf_entry_t)); - xfs_dir2_leaf_log_ents(tp, lbp, 0, INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1); + memcpy(leaf->ents, blp, be32_to_cpu(btp->count) * sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, lbp, 0, be16_to_cpu(leaf->hdr.count) - 1); needscan = 0; needlog = 1; /* @@ -133,7 +133,7 @@ xfs_dir2_block_to_leaf( /* * Fix up the block header, make it a data block. */ - INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); + block->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); if (needscan) xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, NULL); @@ -141,9 +141,9 @@ xfs_dir2_block_to_leaf( * Set up leaf tail and bests table. */ ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); - INT_SET(ltp->bestcount, ARCH_CONVERT, 1); + ltp->bestcount = cpu_to_be32(1); bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - INT_COPY(bestsp[0], block->hdr.bestfree[0].length, ARCH_CONVERT); + bestsp[0] = block->hdr.bestfree[0].length; /* * Log the data header and leaf bests table. */ @@ -163,7 +163,7 @@ int /* error */ xfs_dir2_leaf_addname( xfs_da_args_t *args) /* operation arguments */ { - xfs_dir2_data_off_t *bestsp; /* freespace table in leaf */ + __be16 *bestsp; /* freespace table in leaf */ int compact; /* need to compact leaves */ xfs_dir2_data_t *data; /* data block structure */ xfs_dabuf_t *dbp; /* data block buffer */ @@ -187,7 +187,7 @@ xfs_dir2_leaf_addname( int needbytes; /* leaf block bytes needed */ int needlog; /* need to log data header */ int needscan; /* need to rescan data free */ - xfs_dir2_data_off_t *tagp; /* end of data entry */ + __be16 *tagp; /* end of data entry */ xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_db_t use_block; /* data block number */ @@ -222,14 +222,14 @@ xfs_dir2_leaf_addname( * in a data block, improving the lookup of those entries. */ for (use_block = -1, lep = &leaf->ents[index]; - index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; index++, lep++) { - if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) continue; - i = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); - ASSERT(i < INT_GET(ltp->bestcount, ARCH_CONVERT)); - ASSERT(INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF); - if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { + i = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); + ASSERT(i < be32_to_cpu(ltp->bestcount)); + ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF); + if (be16_to_cpu(bestsp[i]) >= length) { use_block = i; break; } @@ -238,13 +238,13 @@ xfs_dir2_leaf_addname( * Didn't find a block yet, linear search all the data blocks. */ if (use_block == -1) { - for (i = 0; i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++) { + for (i = 0; i < be32_to_cpu(ltp->bestcount); i++) { /* * Remember a block we see that's missing. */ - if (INT_GET(bestsp[i], ARCH_CONVERT) == NULLDATAOFF && use_block == -1) + if (be16_to_cpu(bestsp[i]) == NULLDATAOFF && use_block == -1) use_block = i; - else if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { + else if (be16_to_cpu(bestsp[i]) >= length) { use_block = i; break; } @@ -260,21 +260,21 @@ xfs_dir2_leaf_addname( * Now kill use_block if it refers to a missing block, so we * can use it as an indication of allocation needed. */ - if (use_block != -1 && INT_GET(bestsp[use_block], ARCH_CONVERT) == NULLDATAOFF) + if (use_block != -1 && be16_to_cpu(bestsp[use_block]) == NULLDATAOFF) use_block = -1; /* * If we don't have enough free bytes but we can make enough * by compacting out stale entries, we'll do that. */ - if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < needbytes && - INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1) { + if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < needbytes && + be16_to_cpu(leaf->hdr.stale) > 1) { compact = 1; } /* * Otherwise if we don't have enough free bytes we need to * convert to node form. */ - else if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < + else if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < needbytes) { /* * Just checking or no space reservation, give up. @@ -330,8 +330,8 @@ xfs_dir2_leaf_addname( * There are stale entries, so we'll need log-low and log-high * impossibly bad values later. */ - else if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) { - lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); + else if (be16_to_cpu(leaf->hdr.stale)) { + lfloglow = be16_to_cpu(leaf->hdr.count); lfloghigh = -1; } /* @@ -358,13 +358,13 @@ xfs_dir2_leaf_addname( * If we're adding a new data block on the end we need to * extend the bests table. Copy it up one entry. */ - if (use_block >= INT_GET(ltp->bestcount, ARCH_CONVERT)) { + if (use_block >= be32_to_cpu(ltp->bestcount)) { bestsp--; memmove(&bestsp[0], &bestsp[1], - INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(bestsp[0])); - INT_MOD(ltp->bestcount, ARCH_CONVERT, +1); + be32_to_cpu(ltp->bestcount) * sizeof(bestsp[0])); + be32_add(<p->bestcount, 1); xfs_dir2_leaf_log_tail(tp, lbp); - xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); } /* * If we're filling in a previously empty block just log it. @@ -372,7 +372,7 @@ xfs_dir2_leaf_addname( else xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); data = dbp->data; - INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); + bestsp[use_block] = data->hdr.bestfree[0].length; grown = 1; } /* @@ -394,8 +394,8 @@ xfs_dir2_leaf_addname( * Point to the biggest freespace in our data block. */ dup = (xfs_dir2_data_unused_t *) - ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); - ASSERT(INT_GET(dup->length, ARCH_CONVERT) >= length); + ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset)); + ASSERT(be16_to_cpu(dup->length) >= length); needscan = needlog = 0; /* * Mark the initial part of our freespace in use for the new entry. @@ -411,7 +411,7 @@ xfs_dir2_leaf_addname( dep->namelen = args->namelen; memcpy(dep->name, args->name, dep->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); + *tagp = cpu_to_be16((char *)dep - (char *)data); /* * Need to scan fix up the bestfree table. */ @@ -427,8 +427,8 @@ xfs_dir2_leaf_addname( * If the bests table needs to be changed, do it. * Log the change unless we've already done that. */ - if (INT_GET(bestsp[use_block], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { - INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); + if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(data->hdr.bestfree[0].length)) { + bestsp[use_block] = data->hdr.bestfree[0].length; if (!grown) xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); } @@ -440,15 +440,15 @@ xfs_dir2_leaf_addname( /* * lep is still good as the index leaf entry. */ - if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + if (index < be16_to_cpu(leaf->hdr.count)) memmove(lep + 1, lep, - (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); + (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); /* * Record low and high logging indices for the leaf. */ lfloglow = index; - lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); - INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); + lfloghigh = be16_to_cpu(leaf->hdr.count); + be16_add(&leaf->hdr.count, 1); } /* * There are stale entries. @@ -468,7 +468,7 @@ xfs_dir2_leaf_addname( */ for (lowstale = index - 1; lowstale >= 0 && - INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != + be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; @@ -478,8 +478,8 @@ xfs_dir2_leaf_addname( * lowstale entry would be better. */ for (highstale = index; - highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && - INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != + highstale < be16_to_cpu(leaf->hdr.count) && + be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || index - lowstale - 1 >= highstale - index); @@ -490,10 +490,10 @@ xfs_dir2_leaf_addname( * If the low one is better, use it. */ if (lowstale >= 0 && - (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + (highstale == be16_to_cpu(leaf->hdr.count) || index - lowstale - 1 < highstale - index)) { ASSERT(index - lowstale - 1 >= 0); - ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == + ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == XFS_DIR2_NULL_DATAPTR); /* * Copy entries up to cover the stale entry @@ -512,7 +512,7 @@ xfs_dir2_leaf_addname( */ else { ASSERT(highstale - index >= 0); - ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == + ASSERT(be32_to_cpu(leaf->ents[highstale].address) == XFS_DIR2_NULL_DATAPTR); /* * Copy entries down to copver the stale entry @@ -526,13 +526,14 @@ xfs_dir2_leaf_addname( lfloglow = MIN(index, lfloglow); lfloghigh = MAX(highstale, lfloghigh); } - INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); + be16_add(&leaf->hdr.stale, -1); } /* * Fill in the new leaf entry. */ - INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); - INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, INT_GET(*tagp, ARCH_CONVERT))); + lep->hashval = cpu_to_be32(args->hashval); + lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, + be16_to_cpu(*tagp))); /* * Log the leaf fields and give up the buffers. */ @@ -563,30 +564,30 @@ xfs_dir2_leaf_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); /* * This value is not restrictive enough. * Should factor in the size of the bests table as well. * We can deduce a value for that from di_size. */ - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); /* * Leaves and bests don't overlap. */ - ASSERT((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] <= + ASSERT((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <= (char *)XFS_DIR2_LEAF_BESTS_P(ltp)); /* * Check hash value order, count stale entries. */ - for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { - if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) - ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= - INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); - if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { + if (i + 1 < be16_to_cpu(leaf->hdr.count)) + ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= + be32_to_cpu(leaf->ents[i + 1].hashval)); + if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) stale++; } - ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); + ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); } #endif /* DEBUG */ @@ -611,8 +612,8 @@ xfs_dir2_leaf_compact( /* * Compress out the stale entries in place. */ - for (from = to = 0, loglow = -1; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { - if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + for (from = to = 0, loglow = -1; from < be16_to_cpu(leaf->hdr.count); from++) { + if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) continue; /* * Only actually copy the entries that are different. @@ -627,8 +628,8 @@ xfs_dir2_leaf_compact( /* * Update and log the header, log the leaf entries. */ - ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == from - to); - INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(INT_GET(leaf->hdr.stale, ARCH_CONVERT))); + ASSERT(be16_to_cpu(leaf->hdr.stale) == from - to); + be16_add(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale))); leaf->hdr.stale = 0; xfs_dir2_leaf_log_header(args->trans, bp); if (loglow != -1) @@ -662,14 +663,14 @@ xfs_dir2_leaf_compact_x1( int to; /* destination copy index */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1); + ASSERT(be16_to_cpu(leaf->hdr.stale) > 1); index = *indexp; /* * Find the first stale entry before our index, if any. */ for (lowstale = index - 1; lowstale >= 0 && - INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; + be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; /* @@ -677,8 +678,8 @@ xfs_dir2_leaf_compact_x1( * Stop if the answer would be worse than lowstale. */ for (highstale = index; - highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && - INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && + highstale < be16_to_cpu(leaf->hdr.count) && + be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || index - lowstale > highstale - index); highstale++) continue; @@ -686,7 +687,7 @@ xfs_dir2_leaf_compact_x1( * Pick the better of lowstale and highstale. */ if (lowstale >= 0 && - (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + (highstale == be16_to_cpu(leaf->hdr.count) || index - lowstale <= highstale - index)) keepstale = lowstale; else @@ -695,14 +696,14 @@ xfs_dir2_leaf_compact_x1( * Copy the entries in place, removing all the stale entries * except keepstale. */ - for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { /* * Notice the new value of index. */ if (index == from) newindex = to; if (from != keepstale && - INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { + be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) { if (from == to) *lowlogp = to; continue; @@ -730,8 +731,8 @@ xfs_dir2_leaf_compact_x1( /* * Adjust the leaf header values. */ - INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(from - to)); - INT_SET(leaf->hdr.stale, ARCH_CONVERT, 1); + be16_add(&leaf->hdr.count, -(from - to)); + leaf->hdr.stale = cpu_to_be16(1); /* * Remember the low/high stale value only in the "right" * direction. @@ -739,8 +740,8 @@ xfs_dir2_leaf_compact_x1( if (lowstale >= newindex) lowstale = -1; else - highstale = INT_GET(leaf->hdr.count, ARCH_CONVERT); - *highlogp = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1; + highstale = be16_to_cpu(leaf->hdr.count); + *highlogp = be16_to_cpu(leaf->hdr.count) - 1; *lowstalep = lowstale; *highstalep = highstale; } @@ -766,7 +767,7 @@ xfs_dir2_leaf_getdents( xfs_dir2_data_entry_t *dep; /* data entry */ xfs_dir2_data_unused_t *dup; /* unused entry */ int eof; /* reached end of directory */ - int error=0; /* error return value */ + int error = 0; /* error return value */ int i; /* temporary loop index */ int j; /* temporary loop index */ int length; /* temporary length value */ @@ -778,8 +779,8 @@ xfs_dir2_leaf_getdents( xfs_mount_t *mp; /* filesystem mount point */ xfs_dir2_off_t newoff; /* new curoff after new blk */ int nmap; /* mappings to ask xfs_bmapi */ - xfs_dir2_put_args_t p; /* formatting arg bundle */ - char *ptr=NULL; /* pointer to current data */ + xfs_dir2_put_args_t *p; /* formatting arg bundle */ + char *ptr = NULL; /* pointer to current data */ int ra_current; /* number of read-ahead blks */ int ra_index; /* *map index for read-ahead */ int ra_offset; /* map entry offset for ra */ @@ -797,9 +798,10 @@ xfs_dir2_leaf_getdents( /* * Setup formatting arguments. */ - p.dbp = dbp; - p.put = put; - p.uio = uio; + p = kmem_alloc(sizeof(*p), KM_SLEEP); + p->dbp = dbp; + p->put = put; + p->uio = uio; /* * Set up to bmap a number of blocks based on the caller's * buffer size, the directory block size, and the filesystem @@ -1046,11 +1048,10 @@ xfs_dir2_leaf_getdents( while ((char *)ptr - (char *)data < byteoff) { dup = (xfs_dir2_data_unused_t *)ptr; - if (INT_GET(dup->freetag, ARCH_CONVERT) + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { - length = INT_GET(dup->length, - ARCH_CONVERT); + length = be16_to_cpu(dup->length); ptr += length; continue; } @@ -1079,9 +1080,8 @@ xfs_dir2_leaf_getdents( /* * No, it's unused, skip over it. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) - == XFS_DIR2_DATA_FREE_TAG) { - length = INT_GET(dup->length, ARCH_CONVERT); + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + length = be16_to_cpu(dup->length); ptr += length; curoff += length; continue; @@ -1092,24 +1092,24 @@ xfs_dir2_leaf_getdents( */ dep = (xfs_dir2_data_entry_t *)ptr; - p.namelen = dep->namelen; + p->namelen = dep->namelen; - length = XFS_DIR2_DATA_ENTSIZE(p.namelen); + length = XFS_DIR2_DATA_ENTSIZE(p->namelen); - p.cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); + p->cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); - p.ino = INT_GET(dep->inumber, ARCH_CONVERT); + p->ino = INT_GET(dep->inumber, ARCH_CONVERT); #if XFS_BIG_INUMS - p.ino += mp->m_inoadd; + p->ino += mp->m_inoadd; #endif - p.name = (char *)dep->name; + p->name = (char *)dep->name; - error = p.put(&p); + error = p->put(p); /* * Won't fit. Return to caller. */ - if (!p.done) { + if (!p->done) { eof = 0; break; } @@ -1129,6 +1129,7 @@ xfs_dir2_leaf_getdents( else uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff); kmem_free(map, map_size * sizeof(*map)); + kmem_free(p, sizeof(*p)); if (bp) xfs_da_brelse(tp, bp); return error; @@ -1171,7 +1172,7 @@ xfs_dir2_leaf_init( /* * Initialize the header. */ - INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, magic); + leaf->hdr.info.magic = cpu_to_be16(magic); leaf->hdr.info.forw = 0; leaf->hdr.info.back = 0; leaf->hdr.count = 0; @@ -1201,13 +1202,13 @@ xfs_dir2_leaf_log_bests( int first, /* first entry to log */ int last) /* last entry to log */ { - xfs_dir2_data_off_t *firstb; /* pointer to first entry */ - xfs_dir2_data_off_t *lastb; /* pointer to last entry */ + __be16 *firstb; /* pointer to first entry */ + __be16 *lastb; /* pointer to last entry */ xfs_dir2_leaf_t *leaf; /* leaf structure */ xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf); firstb = XFS_DIR2_LEAF_BESTS_P(ltp) + first; lastb = XFS_DIR2_LEAF_BESTS_P(ltp) + last; @@ -1230,8 +1231,8 @@ xfs_dir2_leaf_log_ents( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || - INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || + be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); firstlep = &leaf->ents[first]; lastlep = &leaf->ents[last]; xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf), @@ -1249,8 +1250,8 @@ xfs_dir2_leaf_log_header( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || - INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || + be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf), (uint)(sizeof(leaf->hdr) - 1)); } @@ -1269,7 +1270,7 @@ xfs_dir2_leaf_log_tail( mp = tp->t_mountp; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), (uint)(mp->m_dirblksize - 1)); @@ -1314,7 +1315,7 @@ xfs_dir2_leaf_lookup( */ dep = (xfs_dir2_data_entry_t *) ((char *)dbp->data + - XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); /* * Return the found inode number. */ @@ -1373,17 +1374,17 @@ xfs_dir2_leaf_lookup_int( * looking to match the name. */ for (lep = &leaf->ents[index], dbp = NULL, curdb = -1; - index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; lep++, index++) { /* * Skip over stale leaf entries. */ - if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) continue; /* * Get the new data block number. */ - newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); /* * If it's not the same as the old data block number, * need to pitch the old one and read the new one. @@ -1406,7 +1407,7 @@ xfs_dir2_leaf_lookup_int( */ dep = (xfs_dir2_data_entry_t *) ((char *)dbp->data + - XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); /* * If it matches then return it. */ @@ -1435,7 +1436,7 @@ int /* error */ xfs_dir2_leaf_removename( xfs_da_args_t *args) /* operation arguments */ { - xfs_dir2_data_off_t *bestsp; /* leaf block best freespace */ + __be16 *bestsp; /* leaf block best freespace */ xfs_dir2_data_t *data; /* data block structure */ xfs_dir2_db_t db; /* data block number */ xfs_dabuf_t *dbp; /* data block buffer */ @@ -1471,14 +1472,14 @@ xfs_dir2_leaf_removename( * Point to the leaf entry, use that to point to the data entry. */ lep = &leaf->ents[index]; - db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); dep = (xfs_dir2_data_entry_t *) - ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); needscan = needlog = 0; - oldbest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + oldbest = be16_to_cpu(data->hdr.bestfree[0].length); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - ASSERT(INT_GET(bestsp[db], ARCH_CONVERT) == oldbest); + ASSERT(be16_to_cpu(bestsp[db]) == oldbest); /* * Mark the former data entry unused. */ @@ -1488,9 +1489,9 @@ xfs_dir2_leaf_removename( /* * We just mark the leaf entry stale by putting a null in it. */ - INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); + be16_add(&leaf->hdr.stale, 1); xfs_dir2_leaf_log_header(tp, lbp); - INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); xfs_dir2_leaf_log_ents(tp, lbp, index, index); /* * Scan the freespace in the data block again if necessary, @@ -1504,15 +1505,15 @@ xfs_dir2_leaf_removename( * If the longest freespace in the data block has changed, * put the new value in the bests table and log that. */ - if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) != oldbest) { - INT_COPY(bestsp[db], data->hdr.bestfree[0].length, ARCH_CONVERT); + if (be16_to_cpu(data->hdr.bestfree[0].length) != oldbest) { + bestsp[db] = data->hdr.bestfree[0].length; xfs_dir2_leaf_log_bests(tp, lbp, db, db); } xfs_dir2_data_check(dp, dbp); /* * If the data block is now empty then get rid of the data block. */ - if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == + if (be16_to_cpu(data->hdr.bestfree[0].length) == mp->m_dirblksize - (uint)sizeof(data->hdr)) { ASSERT(db != mp->m_dirdatablk); if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { @@ -1535,12 +1536,12 @@ xfs_dir2_leaf_removename( * If this is the last data block then compact the * bests table by getting rid of entries. */ - if (db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1) { + if (db == be32_to_cpu(ltp->bestcount) - 1) { /* * Look for the last active entry (i). */ for (i = db - 1; i > 0; i--) { - if (INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF) + if (be16_to_cpu(bestsp[i]) != NULLDATAOFF) break; } /* @@ -1548,12 +1549,12 @@ xfs_dir2_leaf_removename( * end are removed. */ memmove(&bestsp[db - i], bestsp, - (INT_GET(ltp->bestcount, ARCH_CONVERT) - (db - i)) * sizeof(*bestsp)); - INT_MOD(ltp->bestcount, ARCH_CONVERT, -(db - i)); + (be32_to_cpu(ltp->bestcount) - (db - i)) * sizeof(*bestsp)); + be32_add(<p->bestcount, -(db - i)); xfs_dir2_leaf_log_tail(tp, lbp); - xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); } else - INT_SET(bestsp[db], ARCH_CONVERT, NULLDATAOFF); + bestsp[db] = cpu_to_be16(NULLDATAOFF); } /* * If the data block was not the first one, drop it. @@ -1604,7 +1605,7 @@ xfs_dir2_leaf_replace( */ dep = (xfs_dir2_data_entry_t *) ((char *)dbp->data + - XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); ASSERT(args->inumber != INT_GET(dep->inumber, ARCH_CONVERT)); /* * Put the new inode number in, log it. @@ -1645,11 +1646,11 @@ xfs_dir2_leaf_search_hash( * Note, the table cannot be empty, so we have to go through the loop. * Binary search the leaf entries looking for our hash value. */ - for (lep = leaf->ents, low = 0, high = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1, + for (lep = leaf->ents, low = 0, high = be16_to_cpu(leaf->hdr.count) - 1, hashwant = args->hashval; low <= high; ) { mid = (low + high) >> 1; - if ((hash = INT_GET(lep[mid].hashval, ARCH_CONVERT)) == hashwant) + if ((hash = be32_to_cpu(lep[mid].hashval)) == hashwant) break; if (hash < hashwant) low = mid + 1; @@ -1660,7 +1661,7 @@ xfs_dir2_leaf_search_hash( * Found one, back up through all the equal hash values. */ if (hash == hashwant) { - while (mid > 0 && INT_GET(lep[mid - 1].hashval, ARCH_CONVERT) == hashwant) { + while (mid > 0 && be32_to_cpu(lep[mid - 1].hashval) == hashwant) { mid--; } } @@ -1682,7 +1683,7 @@ xfs_dir2_leaf_trim_data( xfs_dabuf_t *lbp, /* leaf buffer */ xfs_dir2_db_t db) /* data block number */ { - xfs_dir2_data_off_t *bestsp; /* leaf bests table */ + __be16 *bestsp; /* leaf bests table */ #ifdef DEBUG xfs_dir2_data_t *data; /* data block structure */ #endif @@ -1706,7 +1707,7 @@ xfs_dir2_leaf_trim_data( } #ifdef DEBUG data = dbp->data; - ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); #endif /* this seems to be an error * data is only valid if DEBUG is defined? @@ -1715,9 +1716,9 @@ xfs_dir2_leaf_trim_data( leaf = lbp->data; ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); - ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == + ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) == mp->m_dirblksize - (uint)sizeof(data->hdr)); - ASSERT(db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + ASSERT(db == be32_to_cpu(ltp->bestcount) - 1); /* * Get rid of the data block. */ @@ -1730,10 +1731,10 @@ xfs_dir2_leaf_trim_data( * Eliminate the last bests entry from the table. */ bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - INT_MOD(ltp->bestcount, ARCH_CONVERT, -1); - memmove(&bestsp[1], &bestsp[0], INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(*bestsp)); + be32_add(<p->bestcount, -1); + memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp)); xfs_dir2_leaf_log_tail(tp, lbp); - xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); return 0; } @@ -1805,7 +1806,7 @@ xfs_dir2_node_to_leaf( return 0; lbp = state->path.blk[0].bp; leaf = lbp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); /* * Read the freespace block. */ @@ -1814,15 +1815,15 @@ xfs_dir2_node_to_leaf( return error; } free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); ASSERT(!free->hdr.firstdb); /* * Now see if the leafn and free data will fit in a leaf1. * If not, release the buffer and give up. */ if ((uint)sizeof(leaf->hdr) + - (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)) * (uint)sizeof(leaf->ents[0]) + - INT_GET(free->hdr.nvalid, ARCH_CONVERT) * (uint)sizeof(leaf->bests[0]) + + (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)) * (uint)sizeof(leaf->ents[0]) + + be32_to_cpu(free->hdr.nvalid) * (uint)sizeof(leaf->bests[0]) + (uint)sizeof(leaf->tail) > mp->m_dirblksize) { xfs_da_brelse(tp, fbp); @@ -1832,22 +1833,22 @@ xfs_dir2_node_to_leaf( * If the leaf has any stale entries in it, compress them out. * The compact routine will log the header. */ - if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) + if (be16_to_cpu(leaf->hdr.stale)) xfs_dir2_leaf_compact(args, lbp); else xfs_dir2_leaf_log_header(tp, lbp); - INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAF1_MAGIC); + leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAF1_MAGIC); /* * Set up the leaf tail from the freespace block. */ ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); - INT_COPY(ltp->bestcount, free->hdr.nvalid, ARCH_CONVERT); + ltp->bestcount = free->hdr.nvalid; /* * Set up the leaf bests table. */ memcpy(XFS_DIR2_LEAF_BESTS_P(ltp), free->bests, - INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(leaf->bests[0])); - xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0])); + xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); xfs_dir2_leaf_log_tail(tp, lbp); xfs_dir2_leaf_check(dp, lbp); /* diff --git a/fs/xfs/xfs_dir2_leaf.h b/fs/xfs/xfs_dir2_leaf.h index 1393993d61e9..f57ca1162412 100644 --- a/fs/xfs/xfs_dir2_leaf.h +++ b/fs/xfs/xfs_dir2_leaf.h @@ -46,23 +46,23 @@ typedef __uint32_t xfs_dir2_dataptr_t; */ typedef struct xfs_dir2_leaf_hdr { xfs_da_blkinfo_t info; /* header for da routines */ - __uint16_t count; /* count of entries */ - __uint16_t stale; /* count of stale entries */ + __be16 count; /* count of entries */ + __be16 stale; /* count of stale entries */ } xfs_dir2_leaf_hdr_t; /* * Leaf block entry. */ typedef struct xfs_dir2_leaf_entry { - xfs_dahash_t hashval; /* hash value of name */ - xfs_dir2_dataptr_t address; /* address of data entry */ + __be32 hashval; /* hash value of name */ + __be32 address; /* address of data entry */ } xfs_dir2_leaf_entry_t; /* * Leaf block tail. */ typedef struct xfs_dir2_leaf_tail { - __uint32_t bestcount; + __be32 bestcount; } xfs_dir2_leaf_tail_t; /* @@ -105,11 +105,10 @@ xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp) * Get address of the bests array in the single-leaf block. */ #define XFS_DIR2_LEAF_BESTS_P(ltp) xfs_dir2_leaf_bests_p(ltp) -static inline xfs_dir2_data_off_t * +static inline __be16 * xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp) { - return (xfs_dir2_data_off_t *) - (ltp) - INT_GET((ltp)->bestcount, ARCH_CONVERT); + return (__be16 *)ltp - be32_to_cpu(ltp->bestcount); } /* diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 641f8633d254..af556f16a0c7 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -76,7 +76,7 @@ xfs_dir2_free_log_bests( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&free->bests[first] - (char *)free), (uint)((char *)&free->bests[last] - (char *)free + @@ -94,7 +94,7 @@ xfs_dir2_free_log_header( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free), (uint)(sizeof(xfs_dir2_free_hdr_t) - 1)); } @@ -114,14 +114,14 @@ xfs_dir2_leaf_to_node( xfs_dabuf_t *fbp; /* freespace buffer */ xfs_dir2_db_t fdb; /* freespace block number */ xfs_dir2_free_t *free; /* freespace structure */ - xfs_dir2_data_off_t *from; /* pointer to freespace entry */ + __be16 *from; /* pointer to freespace entry */ int i; /* leaf freespace index */ xfs_dir2_leaf_t *leaf; /* leaf structure */ xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ xfs_mount_t *mp; /* filesystem mount point */ int n; /* count of live freespc ents */ xfs_dir2_data_off_t off; /* freespace entry value */ - xfs_dir2_data_off_t *to; /* pointer to freespace entry */ + __be16 *to; /* pointer to freespace entry */ xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_trace_args_b("leaf_to_node", args, lbp); @@ -149,28 +149,28 @@ xfs_dir2_leaf_to_node( /* * Initialize the freespace block header. */ - INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); + free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC); free->hdr.firstdb = 0; - ASSERT(INT_GET(ltp->bestcount, ARCH_CONVERT) <= (uint)dp->i_d.di_size / mp->m_dirblksize); - INT_COPY(free->hdr.nvalid, ltp->bestcount, ARCH_CONVERT); + ASSERT(be32_to_cpu(ltp->bestcount) <= (uint)dp->i_d.di_size / mp->m_dirblksize); + free->hdr.nvalid = ltp->bestcount; /* * Copy freespace entries from the leaf block to the new block. * Count active entries. */ for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P(ltp), to = free->bests; - i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++, from++, to++) { - if ((off = INT_GET(*from, ARCH_CONVERT)) != NULLDATAOFF) + i < be32_to_cpu(ltp->bestcount); i++, from++, to++) { + if ((off = be16_to_cpu(*from)) != NULLDATAOFF) n++; - INT_SET(*to, ARCH_CONVERT, off); + *to = cpu_to_be16(off); } - INT_SET(free->hdr.nused, ARCH_CONVERT, n); - INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAFN_MAGIC); + free->hdr.nused = cpu_to_be32(n); + leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAFN_MAGIC); /* * Log everything. */ xfs_dir2_leaf_log_header(tp, lbp); xfs_dir2_free_log_header(tp, fbp); - xfs_dir2_free_log_bests(tp, fbp, 0, INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1); + xfs_dir2_free_log_bests(tp, fbp, 0, be32_to_cpu(free->hdr.nvalid) - 1); xfs_da_buf_done(fbp); xfs_dir2_leafn_check(dp, lbp); return 0; @@ -217,15 +217,15 @@ xfs_dir2_leafn_add( * a compact. */ - if (INT_GET(leaf->hdr.count, ARCH_CONVERT) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { + if (be16_to_cpu(leaf->hdr.count) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { if (!leaf->hdr.stale) return XFS_ERROR(ENOSPC); - compact = INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1; + compact = be16_to_cpu(leaf->hdr.stale) > 1; } else compact = 0; - ASSERT(index == 0 || INT_GET(leaf->ents[index - 1].hashval, ARCH_CONVERT) <= args->hashval); - ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || - INT_GET(leaf->ents[index].hashval, ARCH_CONVERT) >= args->hashval); + ASSERT(index == 0 || be32_to_cpu(leaf->ents[index - 1].hashval) <= args->hashval); + ASSERT(index == be16_to_cpu(leaf->hdr.count) || + be32_to_cpu(leaf->ents[index].hashval) >= args->hashval); if (args->justcheck) return 0; @@ -242,7 +242,7 @@ xfs_dir2_leafn_add( * Set impossible logging indices for this case. */ else if (leaf->hdr.stale) { - lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); + lfloglow = be16_to_cpu(leaf->hdr.count); lfloghigh = -1; } /* @@ -250,12 +250,12 @@ xfs_dir2_leafn_add( */ if (!leaf->hdr.stale) { lep = &leaf->ents[index]; - if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + if (index < be16_to_cpu(leaf->hdr.count)) memmove(lep + 1, lep, - (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); + (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); lfloglow = index; - lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); - INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); + lfloghigh = be16_to_cpu(leaf->hdr.count); + be16_add(&leaf->hdr.count, 1); } /* * There are stale entries. We'll use one for the new entry. @@ -271,7 +271,7 @@ xfs_dir2_leafn_add( */ for (lowstale = index - 1; lowstale >= 0 && - INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != + be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; @@ -281,8 +281,8 @@ xfs_dir2_leafn_add( * lowstale already found. */ for (highstale = index; - highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && - INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != + highstale < be16_to_cpu(leaf->hdr.count) && + be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || index - lowstale - 1 >= highstale - index); @@ -294,9 +294,9 @@ xfs_dir2_leafn_add( * Shift entries up toward the stale slot. */ if (lowstale >= 0 && - (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + (highstale == be16_to_cpu(leaf->hdr.count) || index - lowstale - 1 < highstale - index)) { - ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == + ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == XFS_DIR2_NULL_DATAPTR); ASSERT(index - lowstale - 1 >= 0); if (index - lowstale - 1 > 0) @@ -312,7 +312,7 @@ xfs_dir2_leafn_add( * Shift entries down toward the stale slot. */ else { - ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == + ASSERT(be32_to_cpu(leaf->ents[highstale].address) == XFS_DIR2_NULL_DATAPTR); ASSERT(highstale - index >= 0); if (highstale - index > 0) @@ -323,13 +323,14 @@ xfs_dir2_leafn_add( lfloglow = MIN(index, lfloglow); lfloghigh = MAX(highstale, lfloghigh); } - INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); + be16_add(&leaf->hdr.stale, -1); } /* * Insert the new entry, log everything. */ - INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); - INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, args->blkno, args->index)); + lep->hashval = cpu_to_be32(args->hashval); + lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, + args->blkno, args->index)); xfs_dir2_leaf_log_header(tp, bp); xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh); xfs_dir2_leafn_check(dp, bp); @@ -352,17 +353,17 @@ xfs_dir2_leafn_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); - for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { - if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) { - ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= - INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { + if (i + 1 < be16_to_cpu(leaf->hdr.count)) { + ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= + be32_to_cpu(leaf->ents[i + 1].hashval)); } - if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) stale++; } - ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); + ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); } #endif /* DEBUG */ @@ -378,12 +379,12 @@ xfs_dir2_leafn_lasthash( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); if (count) - *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + *count = be16_to_cpu(leaf->hdr.count); if (!leaf->hdr.count) return 0; - return INT_GET(leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + return be32_to_cpu(leaf->ents[be16_to_cpu(leaf->hdr.count) - 1].hashval); } /* @@ -419,9 +420,9 @@ xfs_dir2_leafn_lookup_int( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); #ifdef __KERNEL__ - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) > 0); + ASSERT(be16_to_cpu(leaf->hdr.count) > 0); #endif xfs_dir2_leafn_check(dp, bp); /* @@ -443,7 +444,7 @@ xfs_dir2_leafn_lookup_int( curdb = -1; length = XFS_DIR2_DATA_ENTSIZE(args->namelen); if ((free = (curbp ? curbp->data : NULL))) - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); } /* * For others, it's a data block buffer, get the block number. @@ -456,17 +457,17 @@ xfs_dir2_leafn_lookup_int( * Loop over leaf entries with the right hash value. */ for (lep = &leaf->ents[index]; - index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; lep++, index++) { /* * Skip stale leaf entries. */ - if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) continue; /* * Pull the data block number from the entry. */ - newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); /* * For addname, we're looking for a place to put the new entry. * We want to use a data block with an entry of equal @@ -506,15 +507,15 @@ xfs_dir2_leafn_lookup_int( } curfdb = newfdb; free = curbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); - ASSERT((INT_GET(free->hdr.firstdb, ARCH_CONVERT) % + ASSERT((be32_to_cpu(free->hdr.firstdb) % XFS_DIR2_MAX_FREE_BESTS(mp)) == 0); - ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) <= curdb); + ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb); ASSERT(curdb < - INT_GET(free->hdr.firstdb, ARCH_CONVERT) + - INT_GET(free->hdr.nvalid, ARCH_CONVERT)); + be32_to_cpu(free->hdr.firstdb) + + be32_to_cpu(free->hdr.nvalid)); } /* * Get the index for our entry. @@ -523,12 +524,12 @@ xfs_dir2_leafn_lookup_int( /* * If it has room, return it. */ - if (unlikely(INT_GET(free->bests[fi], ARCH_CONVERT) == NULLDATAOFF)) { + if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) { XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); } - if (INT_GET(free->bests[fi], ARCH_CONVERT) >= length) { + if (be16_to_cpu(free->bests[fi]) >= length) { *indexp = index; state->extravalid = 1; state->extrablk.bp = curbp; @@ -572,7 +573,7 @@ xfs_dir2_leafn_lookup_int( */ dep = (xfs_dir2_data_entry_t *) ((char *)curbp->data + - XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); /* * Compare the entry, return it if it matches. */ @@ -619,7 +620,7 @@ xfs_dir2_leafn_lookup_int( * Return the final index, that will be the insertion point. */ *indexp = index; - ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); + ASSERT(index == be16_to_cpu(leaf->hdr.count) || args->oknoent); return XFS_ERROR(ENOENT); } @@ -657,12 +658,12 @@ xfs_dir2_leafn_moveents( * destination leaf entries, open up a hole in the destination * to hold the new entries. */ - if (start_d < INT_GET(leaf_d->hdr.count, ARCH_CONVERT)) { + if (start_d < be16_to_cpu(leaf_d->hdr.count)) { memmove(&leaf_d->ents[start_d + count], &leaf_d->ents[start_d], - (INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - start_d) * + (be16_to_cpu(leaf_d->hdr.count) - start_d) * sizeof(xfs_dir2_leaf_entry_t)); xfs_dir2_leaf_log_ents(tp, bp_d, start_d + count, - count + INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - 1); + count + be16_to_cpu(leaf_d->hdr.count) - 1); } /* * If the source has stale leaves, count the ones in the copy range @@ -672,7 +673,7 @@ xfs_dir2_leafn_moveents( int i; /* temp leaf index */ for (i = start_s, stale = 0; i < start_s + count; i++) { - if (INT_GET(leaf_s->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(leaf_s->ents[i].address) == XFS_DIR2_NULL_DATAPTR) stale++; } } else @@ -687,7 +688,7 @@ xfs_dir2_leafn_moveents( * If there are source entries after the ones we copied, * delete the ones we copied by sliding the next ones down. */ - if (start_s + count < INT_GET(leaf_s->hdr.count, ARCH_CONVERT)) { + if (start_s + count < be16_to_cpu(leaf_s->hdr.count)) { memmove(&leaf_s->ents[start_s], &leaf_s->ents[start_s + count], count * sizeof(xfs_dir2_leaf_entry_t)); xfs_dir2_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1); @@ -695,10 +696,10 @@ xfs_dir2_leafn_moveents( /* * Update the headers and log them. */ - INT_MOD(leaf_s->hdr.count, ARCH_CONVERT, -(count)); - INT_MOD(leaf_s->hdr.stale, ARCH_CONVERT, -(stale)); - INT_MOD(leaf_d->hdr.count, ARCH_CONVERT, count); - INT_MOD(leaf_d->hdr.stale, ARCH_CONVERT, stale); + be16_add(&leaf_s->hdr.count, -(count)); + be16_add(&leaf_s->hdr.stale, -(stale)); + be16_add(&leaf_d->hdr.count, count); + be16_add(&leaf_d->hdr.stale, stale); xfs_dir2_leaf_log_header(tp, bp_s); xfs_dir2_leaf_log_header(tp, bp_d); xfs_dir2_leafn_check(args->dp, bp_s); @@ -719,13 +720,13 @@ xfs_dir2_leafn_order( leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0 && - INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0 && - (INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT) < INT_GET(leaf1->ents[0].hashval, ARCH_CONVERT) || - INT_GET(leaf2->ents[INT_GET(leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT) < - INT_GET(leaf1->ents[INT_GET(leaf1->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT))) + ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + if (be16_to_cpu(leaf1->hdr.count) > 0 && + be16_to_cpu(leaf2->hdr.count) > 0 && + (be32_to_cpu(leaf2->ents[0].hashval) < be32_to_cpu(leaf1->ents[0].hashval) || + be32_to_cpu(leaf2->ents[be16_to_cpu(leaf2->hdr.count) - 1].hashval) < + be32_to_cpu(leaf1->ents[be16_to_cpu(leaf1->hdr.count) - 1].hashval))) return 1; return 0; } @@ -768,9 +769,9 @@ xfs_dir2_leafn_rebalance( } leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - oldsum = INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT); + oldsum = be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count); #ifdef DEBUG - oldstale = INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT); + oldstale = be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale); #endif mid = oldsum >> 1; /* @@ -780,10 +781,10 @@ xfs_dir2_leafn_rebalance( if (oldsum & 1) { xfs_dahash_t midhash; /* middle entry hash value */ - if (mid >= INT_GET(leaf1->hdr.count, ARCH_CONVERT)) - midhash = INT_GET(leaf2->ents[mid - INT_GET(leaf1->hdr.count, ARCH_CONVERT)].hashval, ARCH_CONVERT); + if (mid >= be16_to_cpu(leaf1->hdr.count)) + midhash = be32_to_cpu(leaf2->ents[mid - be16_to_cpu(leaf1->hdr.count)].hashval); else - midhash = INT_GET(leaf1->ents[mid].hashval, ARCH_CONVERT); + midhash = be32_to_cpu(leaf1->ents[mid].hashval); isleft = args->hashval <= midhash; } /* @@ -797,30 +798,30 @@ xfs_dir2_leafn_rebalance( * Calculate moved entry count. Positive means left-to-right, * negative means right-to-left. Then move the entries. */ - count = INT_GET(leaf1->hdr.count, ARCH_CONVERT) - mid + (isleft == 0); + count = be16_to_cpu(leaf1->hdr.count) - mid + (isleft == 0); if (count > 0) xfs_dir2_leafn_moveents(args, blk1->bp, - INT_GET(leaf1->hdr.count, ARCH_CONVERT) - count, blk2->bp, 0, count); + be16_to_cpu(leaf1->hdr.count) - count, blk2->bp, 0, count); else if (count < 0) xfs_dir2_leafn_moveents(args, blk2->bp, 0, blk1->bp, - INT_GET(leaf1->hdr.count, ARCH_CONVERT), count); - ASSERT(INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT) == oldsum); - ASSERT(INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT) == oldstale); + be16_to_cpu(leaf1->hdr.count), count); + ASSERT(be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count) == oldsum); + ASSERT(be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale) == oldstale); /* * Mark whether we're inserting into the old or new leaf. */ - if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) < INT_GET(leaf2->hdr.count, ARCH_CONVERT)) + if (be16_to_cpu(leaf1->hdr.count) < be16_to_cpu(leaf2->hdr.count)) state->inleaf = swap; - else if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > INT_GET(leaf2->hdr.count, ARCH_CONVERT)) + else if (be16_to_cpu(leaf1->hdr.count) > be16_to_cpu(leaf2->hdr.count)) state->inleaf = !swap; else state->inleaf = - swap ^ (blk1->index <= INT_GET(leaf1->hdr.count, ARCH_CONVERT)); + swap ^ (blk1->index <= be16_to_cpu(leaf1->hdr.count)); /* * Adjust the expected index for insertion. */ if (!state->inleaf) - blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count); /* * Finally sanity check just to make sure we are not returning a negative index @@ -867,7 +868,7 @@ xfs_dir2_leafn_remove( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); /* * Point to the entry we're removing. */ @@ -875,17 +876,17 @@ xfs_dir2_leafn_remove( /* * Extract the data block and offset from the entry. */ - db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); ASSERT(dblk->blkno == db); - off = XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT)); + off = XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address)); ASSERT(dblk->index == off); /* * Kill the leaf entry by marking it stale. * Log the leaf block changes. */ - INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); + be16_add(&leaf->hdr.stale, 1); xfs_dir2_leaf_log_header(tp, bp); - INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); xfs_dir2_leaf_log_ents(tp, bp, index, index); /* * Make the data entry free. Keep track of the longest freespace @@ -894,7 +895,7 @@ xfs_dir2_leafn_remove( dbp = dblk->bp; data = dbp->data; dep = (xfs_dir2_data_entry_t *)((char *)data + off); - longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + longest = be16_to_cpu(data->hdr.bestfree[0].length); needlog = needscan = 0; xfs_dir2_data_make_free(tp, dbp, off, XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); @@ -911,7 +912,7 @@ xfs_dir2_leafn_remove( * If the longest data block freespace changes, need to update * the corresponding freeblock entry. */ - if (longest < INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + if (longest < be16_to_cpu(data->hdr.bestfree[0].length)) { int error; /* error return value */ xfs_dabuf_t *fbp; /* freeblock buffer */ xfs_dir2_db_t fdb; /* freeblock block number */ @@ -929,15 +930,15 @@ xfs_dir2_leafn_remove( return error; } free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); - ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) == + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.firstdb) == XFS_DIR2_MAX_FREE_BESTS(mp) * (fdb - XFS_DIR2_FREE_FIRSTDB(mp))); /* * Calculate which entry we need to fix. */ findex = XFS_DIR2_DB_TO_FDINDEX(mp, db); - longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + longest = be16_to_cpu(data->hdr.bestfree[0].length); /* * If the data block is now empty we can get rid of it * (usually). @@ -969,7 +970,7 @@ xfs_dir2_leafn_remove( /* * One less used entry in the free table. */ - INT_MOD(free->hdr.nused, ARCH_CONVERT, -1); + free->hdr.nused = cpu_to_be32(-1); xfs_dir2_free_log_header(tp, fbp); /* * If this was the last entry in the table, we can @@ -977,21 +978,21 @@ xfs_dir2_leafn_remove( * entries at the end referring to non-existent * data blocks, get those too. */ - if (findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1) { + if (findex == be32_to_cpu(free->hdr.nvalid) - 1) { int i; /* free entry index */ for (i = findex - 1; - i >= 0 && INT_GET(free->bests[i], ARCH_CONVERT) == NULLDATAOFF; + i >= 0 && be16_to_cpu(free->bests[i]) == NULLDATAOFF; i--) continue; - INT_SET(free->hdr.nvalid, ARCH_CONVERT, i + 1); + free->hdr.nvalid = cpu_to_be32(i + 1); logfree = 0; } /* * Not the last entry, just punch it out. */ else { - INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); + free->bests[findex] = cpu_to_be16(NULLDATAOFF); logfree = 1; } /* @@ -1017,7 +1018,7 @@ xfs_dir2_leafn_remove( * the new value. */ else { - INT_SET(free->bests[findex], ARCH_CONVERT, longest); + free->bests[findex] = cpu_to_be16(longest); logfree = 1; } /* @@ -1039,7 +1040,7 @@ xfs_dir2_leafn_remove( *rval = ((uint)sizeof(leaf->hdr) + (uint)sizeof(leaf->ents[0]) * - (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT))) < + (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale))) < mp->m_dir_magicpct; return 0; } @@ -1138,9 +1139,9 @@ xfs_dir2_leafn_toosmall( */ blk = &state->path.blk[state->path.active - 1]; info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC); leaf = (xfs_dir2_leaf_t *)info; - count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]); if (bytes > (state->blocksize >> 1)) { /* @@ -1160,7 +1161,7 @@ xfs_dir2_leafn_toosmall( * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = info->forw; + forward = (info->forw != 0); memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &rval); @@ -1176,9 +1177,9 @@ xfs_dir2_leafn_toosmall( * We prefer coalescing with the lower numbered sibling so as * to shrink a directory over time. */ - forward = INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT); + forward = be32_to_cpu(info->forw) < be32_to_cpu(info->back); for (i = 0, bp = NULL; i < 2; forward = !forward, i++) { - blkno = forward ?INT_GET( info->forw, ARCH_CONVERT) : INT_GET(info->back, ARCH_CONVERT); + blkno = forward ? be32_to_cpu(info->forw) : be32_to_cpu(info->back); if (blkno == 0) continue; /* @@ -1194,11 +1195,11 @@ xfs_dir2_leafn_toosmall( * Count bytes in the two blocks combined. */ leaf = (xfs_dir2_leaf_t *)info; - count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes = state->blocksize - (state->blocksize >> 2); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - count += INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes -= count * (uint)sizeof(leaf->ents[0]); /* * Fits with at least 25% to spare. @@ -1256,27 +1257,27 @@ xfs_dir2_leafn_unbalance( ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); /* * If there are any stale leaf entries, take this opportunity * to purge them. */ - if (INT_GET(drop_leaf->hdr.stale, ARCH_CONVERT)) + if (drop_leaf->hdr.stale) xfs_dir2_leaf_compact(args, drop_blk->bp); - if (INT_GET(save_leaf->hdr.stale, ARCH_CONVERT)) + if (save_leaf->hdr.stale) xfs_dir2_leaf_compact(args, save_blk->bp); /* * Move the entries from drop to the appropriate end of save. */ - drop_blk->hashval = INT_GET(drop_leaf->ents[INT_GET(drop_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + drop_blk->hashval = be32_to_cpu(drop_leaf->ents[be16_to_cpu(drop_leaf->hdr.count) - 1].hashval); if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp)) xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, 0, - INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); + be16_to_cpu(drop_leaf->hdr.count)); else xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, - INT_GET(save_leaf->hdr.count, ARCH_CONVERT), INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); - save_blk->hashval = INT_GET(save_leaf->ents[INT_GET(save_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + be16_to_cpu(save_leaf->hdr.count), be16_to_cpu(drop_leaf->hdr.count)); + save_blk->hashval = be32_to_cpu(save_leaf->ents[be16_to_cpu(save_leaf->hdr.count) - 1].hashval); xfs_dir2_leafn_check(args->dp, save_blk->bp); } @@ -1378,7 +1379,7 @@ xfs_dir2_node_addname_int( xfs_mount_t *mp; /* filesystem mount point */ int needlog; /* need to log data header */ int needscan; /* need to rescan data frees */ - xfs_dir2_data_off_t *tagp; /* data entry tag pointer */ + __be16 *tagp; /* data entry tag pointer */ xfs_trans_t *tp; /* transaction pointer */ dp = args->dp; @@ -1397,7 +1398,7 @@ xfs_dir2_node_addname_int( */ ifbno = fblk->blkno; free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); findex = fblk->index; /* * This means the free entry showed that the data block had @@ -1405,10 +1406,10 @@ xfs_dir2_node_addname_int( * Use that data block. */ if (findex >= 0) { - ASSERT(findex < INT_GET(free->hdr.nvalid, ARCH_CONVERT)); - ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF); - ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) >= length); - dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; + ASSERT(findex < be32_to_cpu(free->hdr.nvalid)); + ASSERT(be16_to_cpu(free->bests[findex]) != NULLDATAOFF); + ASSERT(be16_to_cpu(free->bests[findex]) >= length); + dbno = be32_to_cpu(free->hdr.firstdb) + findex; } /* * The data block looked at didn't have enough room. @@ -1481,20 +1482,20 @@ xfs_dir2_node_addname_int( continue; } free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); findex = 0; } /* * Look at the current free entry. Is it good enough? */ - if (INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF && - INT_GET(free->bests[findex], ARCH_CONVERT) >= length) - dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; + if (be16_to_cpu(free->bests[findex]) != NULLDATAOFF && + be16_to_cpu(free->bests[findex]) >= length) + dbno = be32_to_cpu(free->hdr.firstdb) + findex; else { /* * Are we done with the freeblock? */ - if (++findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { + if (++findex == be32_to_cpu(free->hdr.nvalid)) { /* * Drop the block. */ @@ -1608,15 +1609,15 @@ xfs_dir2_node_addname_int( * its first slot as our empty slot. */ free = fbp->data; - INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); - INT_SET(free->hdr.firstdb, ARCH_CONVERT, + free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC); + free->hdr.firstdb = cpu_to_be32( (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * XFS_DIR2_MAX_FREE_BESTS(mp)); free->hdr.nvalid = 0; free->hdr.nused = 0; } else { free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); } /* @@ -1627,20 +1628,20 @@ xfs_dir2_node_addname_int( * If it's after the end of the current entries in the * freespace block, extend that table. */ - if (findex >= INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { + if (findex >= be32_to_cpu(free->hdr.nvalid)) { ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp)); - INT_SET(free->hdr.nvalid, ARCH_CONVERT, findex + 1); + free->hdr.nvalid = cpu_to_be32(findex + 1); /* * Tag new entry so nused will go up. */ - INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); + free->bests[findex] = cpu_to_be16(NULLDATAOFF); } /* * If this entry was for an empty data block * (this should always be true) then update the header. */ - if (INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF) { - INT_MOD(free->hdr.nused, ARCH_CONVERT, +1); + if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) { + be32_add(&free->hdr.nused, 1); xfs_dir2_free_log_header(tp, fbp); } /* @@ -1649,7 +1650,7 @@ xfs_dir2_node_addname_int( * change again. */ data = dbp->data; - INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); + free->bests[findex] = data->hdr.bestfree[0].length; logfree = 1; } /* @@ -1677,12 +1678,12 @@ xfs_dir2_node_addname_int( data = dbp->data; logfree = 0; } - ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) >= length); + ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) >= length); /* * Point to the existing unused space. */ dup = (xfs_dir2_data_unused_t *) - ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); + ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset)); needscan = needlog = 0; /* * Mark the first part of the unused space, inuse for us. @@ -1698,7 +1699,7 @@ xfs_dir2_node_addname_int( dep->namelen = args->namelen; memcpy(dep->name, args->name, dep->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); + *tagp = cpu_to_be16((char *)dep - (char *)data); xfs_dir2_data_log_entry(tp, dbp, dep); /* * Rescan the block for bestfree if needed. @@ -1713,8 +1714,8 @@ xfs_dir2_node_addname_int( /* * If the freespace entry is now wrong, update it. */ - if (INT_GET(free->bests[findex], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { - INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); + if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(data->hdr.bestfree[0].length)) { + free->bests[findex] = data->hdr.bestfree[0].length; logfree = 1; } /* @@ -1731,7 +1732,7 @@ xfs_dir2_node_addname_int( * Return the data block and offset in args, then drop the data block. */ args->blkno = (xfs_dablk_t)dbno; - args->index = INT_GET(*tagp, ARCH_CONVERT); + args->index = be16_to_cpu(*tagp); xfs_da_buf_done(dbp); return 0; } @@ -1900,10 +1901,10 @@ xfs_dir2_node_replace( * Point to the data entry. */ data = state->extrablk.bp->data; - ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); dep = (xfs_dir2_data_entry_t *) ((char *)data + - XFS_DIR2_DATAPTR_TO_OFF(state->mp, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(state->mp, be32_to_cpu(lep->address))); ASSERT(inum != INT_GET(dep->inumber, ARCH_CONVERT)); /* * Fill in the new inode number and log the entry. @@ -1966,11 +1967,11 @@ xfs_dir2_node_trim_free( return 0; } free = bp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); /* * If there are used entries, there's nothing to do. */ - if (INT_GET(free->hdr.nused, ARCH_CONVERT) > 0) { + if (be32_to_cpu(free->hdr.nused) > 0) { xfs_da_brelse(tp, bp); *rvalp = 0; return 0; diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h index 0ab8fbd59512..c7c870ee7857 100644 --- a/fs/xfs/xfs_dir2_node.h +++ b/fs/xfs/xfs_dir2_node.h @@ -41,15 +41,15 @@ struct xfs_trans; #define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */ typedef struct xfs_dir2_free_hdr { - __uint32_t magic; /* XFS_DIR2_FREE_MAGIC */ - __int32_t firstdb; /* db of first entry */ - __int32_t nvalid; /* count of valid entries */ - __int32_t nused; /* count of used entries */ + __be32 magic; /* XFS_DIR2_FREE_MAGIC */ + __be32 firstdb; /* db of first entry */ + __be32 nvalid; /* count of valid entries */ + __be32 nused; /* count of used entries */ } xfs_dir2_free_hdr_t; typedef struct xfs_dir2_free { xfs_dir2_free_hdr_t hdr; /* block header */ - xfs_dir2_data_off_t bests[1]; /* best free counts */ + __be16 bests[1]; /* best free counts */ /* unused entries are -1 */ } xfs_dir2_free_t; diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index ec8e7476c8b7..d98a41d1fe63 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -98,8 +98,8 @@ xfs_dir2_block_sfsize( /* * Iterate over the block's data entries by using the leaf pointers. */ - for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { - if ((addr = INT_GET(blp[i].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) + for (i = 0; i < be32_to_cpu(btp->count); i++) { + if ((addr = be32_to_cpu(blp[i].address)) == XFS_DIR2_NULL_DATAPTR) continue; /* * Calculate the pointer to the entry at hand. @@ -220,8 +220,8 @@ xfs_dir2_block_to_sf( * If it's unused, just skip over it. */ dup = (xfs_dir2_data_unused_t *)ptr; - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { - ptr += INT_GET(dup->length, ARCH_CONVERT); + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + ptr += be16_to_cpu(dup->length); continue; } dep = (xfs_dir2_data_entry_t *)ptr; diff --git a/fs/xfs/xfs_dir_leaf.c b/fs/xfs/xfs_dir_leaf.c index e83074016abb..ee88751c3be6 100644 --- a/fs/xfs/xfs_dir_leaf.c +++ b/fs/xfs/xfs_dir_leaf.c @@ -176,7 +176,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args) ASSERT(dp->i_df.if_u1.if_data != NULL); sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; sfe = &sf->list[0]; - for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + for (i = sf->hdr.count-1; i >= 0; i--) { if (sfe->namelen == args->namelen && args->name[0] == sfe->name[0] && memcmp(args->name, sfe->name, args->namelen) == 0) @@ -193,7 +193,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args) XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber); sfe->namelen = args->namelen; memcpy(sfe->name, args->name, sfe->namelen); - INT_MOD(sf->hdr.count, ARCH_CONVERT, +1); + sf->hdr.count++; dp->i_d.di_size += size; xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); @@ -227,7 +227,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args) base = sizeof(xfs_dir_sf_hdr_t); sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; sfe = &sf->list[0]; - for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + for (i = sf->hdr.count-1; i >= 0; i--) { size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe); if (sfe->namelen == args->namelen && sfe->name[0] == args->name[0] && @@ -245,7 +245,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args) memmove(&((char *)sf)[base], &((char *)sf)[base+size], dp->i_d.di_size - (base+size)); } - INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); + sf->hdr.count--; xfs_idata_realloc(dp, -size, XFS_DATA_FORK); dp->i_d.di_size -= size; @@ -288,7 +288,7 @@ xfs_dir_shortform_lookup(xfs_da_args_t *args) return(XFS_ERROR(EEXIST)); } sfe = &sf->list[0]; - for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + for (i = sf->hdr.count-1; i >= 0; i--) { if (sfe->namelen == args->namelen && sfe->name[0] == args->name[0] && memcmp(args->name, sfe->name, args->namelen) == 0) { @@ -375,7 +375,7 @@ xfs_dir_shortform_to_leaf(xfs_da_args_t *iargs) goto out; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + for (i = 0; i < sf->hdr.count; i++) { args.name = (char *)(sfe->name); args.namelen = sfe->namelen; args.hashval = xfs_da_hashname((char *)(sfe->name), @@ -428,7 +428,7 @@ xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp, sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); - nsbuf = INT_GET(sf->hdr.count, ARCH_CONVERT) + 2; + nsbuf = sf->hdr.count + 2; sbsize = (nsbuf + 1) * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); @@ -460,8 +460,7 @@ xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp, /* * Scan the directory data for the rest of the entries. */ - for (i = 0, sfe = &sf->list[0]; - i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { if (unlikely( ((char *)sfe < (char *)sf) || @@ -600,7 +599,7 @@ xfs_dir_shortform_replace(xfs_da_args_t *args) } ASSERT(args->namelen != 1 || args->name[0] != '.'); sfe = &sf->list[0]; - for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + for (i = sf->hdr.count-1; i >= 0; i--) { if (sfe->namelen == args->namelen && sfe->name[0] == args->name[0] && memcmp(args->name, sfe->name, args->namelen) == 0) { @@ -644,7 +643,7 @@ xfs_dir_leaf_to_shortform(xfs_da_args_t *iargs) ASSERT(bp != NULL); memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); leaf = (xfs_dir_leafblock_t *)tmpbuffer; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); /* @@ -742,11 +741,13 @@ xfs_dir_leaf_to_node(xfs_da_args_t *args) } node = bp1->data; leaf = bp2->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); - INT_SET(node->btree[0].hashval, ARCH_CONVERT, INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + node->btree[0].hashval = cpu_to_be32( + INT_GET(leaf->entries[ + INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); xfs_da_buf_done(bp2); - INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); - INT_SET(node->hdr.count, ARCH_CONVERT, 1); + node->btree[0].before = cpu_to_be32(blkno); + node->hdr.count = cpu_to_be16(1); xfs_da_log_buf(args->trans, bp1, XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0]))); xfs_da_buf_done(bp1); @@ -781,7 +782,7 @@ xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) leaf = bp->data; memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); hdr = &leaf->hdr; - INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_DIR_LEAF_MAGIC); + hdr->info.magic = cpu_to_be16(XFS_DIR_LEAF_MAGIC); INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); if (!hdr->firstused) INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - 1); @@ -860,7 +861,7 @@ xfs_dir_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index) int tablesize, entsize, sum, i, tmp, error; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); ASSERT((index >= 0) && (index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); hdr = &leaf->hdr; entsize = XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen); @@ -940,7 +941,7 @@ xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index, int tmp, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); hdr = &leaf->hdr; ASSERT((mapindex >= 0) && (mapindex < XFS_DIR_LEAF_MAPSIZE)); ASSERT((index >= 0) && (index <= INT_GET(hdr->count, ARCH_CONVERT))); @@ -1097,8 +1098,8 @@ xfs_dir_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(blk2->magic == XFS_DIR_LEAF_MAGIC); leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); - ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); /* * Check ordering of blocks, reverse if it makes things simpler. @@ -1325,7 +1326,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(info->magic) == XFS_DIR_LEAF_MAGIC); leaf = (xfs_dir_leafblock_t *)info; count = INT_GET(leaf->hdr.count, ARCH_CONVERT); bytes = (uint)sizeof(xfs_dir_leaf_hdr_t) + @@ -1348,7 +1349,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = info->forw; + forward = (info->forw != 0); memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &retval); @@ -1369,12 +1370,12 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) * We prefer coalescing with the lower numbered sibling so as * to shrink a directory over time. */ - forward = (INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT)); /* start with smaller blk num */ + forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); /* start with smaller blk num */ for (i = 0; i < 2; forward = !forward, i++) { if (forward) - blkno = INT_GET(info->forw, ARCH_CONVERT); + blkno = be32_to_cpu(info->forw); else - blkno = INT_GET(info->back, ARCH_CONVERT); + blkno = be32_to_cpu(info->back); if (blkno == 0) continue; error = xfs_da_read_buf(state->args->trans, state->args->dp, @@ -1389,7 +1390,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) bytes = state->blocksize - (state->blocksize>>2); bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); count += INT_GET(leaf->hdr.count, ARCH_CONVERT); bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); bytes -= count * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); @@ -1447,7 +1448,7 @@ xfs_dir_leaf_remove(xfs_trans_t *trans, xfs_dabuf_t *bp, int index) xfs_mount_t *mp; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); hdr = &leaf->hdr; mp = trans->t_mountp; ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); @@ -1599,8 +1600,8 @@ xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_DIR_LEAF_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); - ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); drop_hdr = &drop_leaf->hdr; save_hdr = &save_leaf->hdr; @@ -1695,7 +1696,7 @@ xfs_dir_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args, int *index) xfs_dahash_t hashval; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8)); /* @@ -1782,8 +1783,8 @@ xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s, /* * Set up environment. */ - ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); - ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) && (INT_GET(hdr_s->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); @@ -1883,8 +1884,8 @@ xfs_dir_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) && - (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC)); + ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC) && + (be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC)); if ((INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) && ((INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) || @@ -1904,7 +1905,7 @@ xfs_dir_leaf_lasthash(xfs_dabuf_t *bp, int *count) xfs_dir_leafblock_t *leaf; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); if (count) *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); if (!leaf->hdr.count) @@ -1940,7 +1941,7 @@ xfs_dir_leaf_getdents_int( mp = dp->i_mount; leaf = bp->data; - if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { + if (be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) { *eobp = 1; return XFS_ERROR(ENOENT); /* XXX wrong code */ } @@ -1992,7 +1993,7 @@ xfs_dir_leaf_getdents_int( if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { xfs_dir_trace_g_du("leaf: hash not found", dp, uio); - if (!INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) + if (!leaf->hdr.info.forw) uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); /* @@ -2047,8 +2048,7 @@ xfs_dir_leaf_getdents_int( xfs_dir_trace_g_duc("leaf: middle cookie ", dp, uio, p.cook.o); - } else if ((thishash = INT_GET(leaf->hdr.info.forw, - ARCH_CONVERT))) { + } else if ((thishash = be32_to_cpu(leaf->hdr.info.forw))) { xfs_dabuf_t *bp2; xfs_dir_leafblock_t *leaf2; @@ -2064,9 +2064,9 @@ xfs_dir_leaf_getdents_int( leaf2 = bp2->data; if (unlikely( - (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + (be16_to_cpu(leaf2->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) - || (INT_GET(leaf2->hdr.info.back, ARCH_CONVERT) + || (be32_to_cpu(leaf2->hdr.info.back) != bno))) { /* GROT */ XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)", XFS_ERRLEVEL_LOW, mp, diff --git a/fs/xfs/xfs_dir_sf.h b/fs/xfs/xfs_dir_sf.h index fe44c6f4d560..5b20b4d3f57d 100644 --- a/fs/xfs/xfs_dir_sf.h +++ b/fs/xfs/xfs_dir_sf.h @@ -35,19 +35,21 @@ typedef struct { __uint8_t i[sizeof(xfs_ino_t)]; } xfs_dir_ino_t; * and the elements much be memcpy'd out into a work area to get correct * alignment for the inode number fields. */ +typedef struct xfs_dir_sf_hdr { /* constant-structure header block */ + xfs_dir_ino_t parent; /* parent dir inode number */ + __uint8_t count; /* count of active entries */ +} xfs_dir_sf_hdr_t; + +typedef struct xfs_dir_sf_entry { + xfs_dir_ino_t inumber; /* referenced inode number */ + __uint8_t namelen; /* actual length of name (no NULL) */ + __uint8_t name[1]; /* name */ +} xfs_dir_sf_entry_t; + typedef struct xfs_dir_shortform { - struct xfs_dir_sf_hdr { /* constant-structure header block */ - xfs_dir_ino_t parent; /* parent dir inode number */ - __uint8_t count; /* count of active entries */ - } hdr; - struct xfs_dir_sf_entry { - xfs_dir_ino_t inumber; /* referenced inode number */ - __uint8_t namelen; /* actual length of name (no NULL) */ - __uint8_t name[1]; /* name */ - } list[1]; /* variable sized array */ + xfs_dir_sf_hdr_t hdr; + xfs_dir_sf_entry_t list[1]; /* variable sized array */ } xfs_dir_shortform_t; -typedef struct xfs_dir_sf_hdr xfs_dir_sf_hdr_t; -typedef struct xfs_dir_sf_entry xfs_dir_sf_entry_t; /* * We generate this then sort it, so that readdirs are returned in diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h index b4c7f2bc55a0..00b1540f8108 100644 --- a/fs/xfs/xfs_dmapi.h +++ b/fs/xfs/xfs_dmapi.h @@ -191,14 +191,4 @@ typedef enum { extern struct bhv_vfsops xfs_dmops; -#ifdef CONFIG_XFS_DMAPI -void xfs_dm_init(struct file_system_type *); -void xfs_dm_exit(struct file_system_type *); -#define XFS_DM_INIT(fstype) xfs_dm_init(fstype) -#define XFS_DM_EXIT(fstype) xfs_dm_exit(fstype) -#else -#define XFS_DM_INIT(fstype) -#define XFS_DM_EXIT(fstype) -#endif - #endif /* __XFS_DMAPI_H__ */ diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index b4d971b01588..56caa88713ab 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -462,6 +462,7 @@ xfs_fs_counts( { unsigned long s; + xfs_icsb_sync_counters_lazy(mp); s = XFS_SB_LOCK(mp); cnt->freedata = mp->m_sb.sb_fdblocks; cnt->freertx = mp->m_sb.sb_frextents; diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 8f3fae1aa98a..0024892841a3 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -138,8 +138,6 @@ xfs_ialloc_ag_alloc( int version; /* inode version number to use */ int isaligned; /* inode allocation at stripe unit */ /* boundary */ - xfs_dinode_core_t dic; /* a dinode_core to copy to new */ - /* inodes */ args.tp = tp; args.mp = tp->t_mountp; @@ -250,10 +248,6 @@ xfs_ialloc_ag_alloc( else version = XFS_DINODE_VERSION_1; - memset(&dic, 0, sizeof(xfs_dinode_core_t)); - INT_SET(dic.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC); - INT_SET(dic.di_version, ARCH_CONVERT, version); - for (j = 0; j < nbufs; j++) { /* * Get the block. @@ -266,12 +260,13 @@ xfs_ialloc_ag_alloc( ASSERT(fbuf); ASSERT(!XFS_BUF_GETERROR(fbuf)); /* - * Loop over the inodes in this buffer. + * Set initial values for the inodes in this buffer. */ - + xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog); for (i = 0; i < ninodes; i++) { free = XFS_MAKE_IPTR(args.mp, fbuf, i); - memcpy(&(free->di_core), &dic, sizeof(xfs_dinode_core_t)); + INT_SET(free->di_core.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC); + INT_SET(free->di_core.di_version, ARCH_CONVERT, version); INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO); xfs_ialloc_log_di(tp, fbuf, i, XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 8e380a1fb79b..3ce35a6f700b 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -258,7 +258,7 @@ again: goto finish_inode; } else if (vp != inode_vp) { - struct inode *inode = LINVFS_GET_IP(inode_vp); + struct inode *inode = vn_to_inode(inode_vp); /* The inode is being torn down, pause and * try again. @@ -495,7 +495,7 @@ retry: if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) { xfs_inode_t *ip; - vp = LINVFS_GET_VP(inode); + vp = vn_from_inode(inode); if (inode->i_state & I_NEW) { vn_initialize(inode); error = xfs_iget_core(vp, mp, tp, ino, flags, @@ -617,7 +617,7 @@ xfs_iput_new(xfs_inode_t *ip, uint lock_flags) { vnode_t *vp = XFS_ITOV(ip); - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1d7f5a7e063e..88a517fad07b 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -76,16 +76,18 @@ STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); */ STATIC void xfs_validate_extents( - xfs_bmbt_rec_t *ep, + xfs_ifork_t *ifp, int nrecs, int disk, xfs_exntfmt_t fmt) { + xfs_bmbt_rec_t *ep; xfs_bmbt_irec_t irec; xfs_bmbt_rec_t rec; int i; for (i = 0; i < nrecs; i++) { + ep = xfs_iext_get_ext(ifp, i); rec.l0 = get_unaligned((__uint64_t*)&ep->l0); rec.l1 = get_unaligned((__uint64_t*)&ep->l1); if (disk) @@ -94,11 +96,10 @@ xfs_validate_extents( xfs_bmbt_get_all(&rec, &irec); if (fmt == XFS_EXTFMT_NOSTATE) ASSERT(irec.br_state == XFS_EXT_NORM); - ep++; } } #else /* DEBUG */ -#define xfs_validate_extents(ep, nrecs, disk, fmt) +#define xfs_validate_extents(ifp, nrecs, disk, fmt) #endif /* DEBUG */ /* @@ -252,7 +253,8 @@ xfs_itobp( xfs_inode_t *ip, xfs_dinode_t **dipp, xfs_buf_t **bpp, - xfs_daddr_t bno) + xfs_daddr_t bno, + uint imap_flags) { xfs_buf_t *bp; int error; @@ -268,10 +270,9 @@ xfs_itobp( * inode on disk. */ imap.im_blkno = bno; - error = xfs_imap(mp, tp, ip->i_ino, &imap, XFS_IMAP_LOOKUP); - if (error != 0) { + if ((error = xfs_imap(mp, tp, ip->i_ino, &imap, + XFS_IMAP_LOOKUP | imap_flags))) return error; - } /* * If the inode number maps to a block outside the bounds @@ -335,9 +336,10 @@ xfs_itobp( * (if DEBUG kernel) or the first inode in the buffer, otherwise. */ #ifdef DEBUG - ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; + ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : + (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); #else - ni = 1; + ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; #endif for (i = 0; i < ni; i++) { int di_ok; @@ -504,7 +506,7 @@ xfs_iformat( switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) { case XFS_DINODE_FMT_LOCAL: atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); - size = (int)INT_GET(atp->hdr.totsize, ARCH_CONVERT); + size = be16_to_cpu(atp->hdr.totsize); error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); break; case XFS_DINODE_FMT_EXTENTS: @@ -597,7 +599,6 @@ xfs_iformat_extents( xfs_bmbt_rec_t *ep, *dp; xfs_ifork_t *ifp; int nex; - int real_size; int size; int i; @@ -619,23 +620,20 @@ xfs_iformat_extents( return XFS_ERROR(EFSCORRUPTED); } - real_size = 0; + ifp->if_real_bytes = 0; if (nex == 0) ifp->if_u1.if_extents = NULL; else if (nex <= XFS_INLINE_EXTS) ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; - else { - ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); - ASSERT(ifp->if_u1.if_extents != NULL); - real_size = size; - } + else + xfs_iext_add(ifp, 0, nex); + ifp->if_bytes = size; - ifp->if_real_bytes = real_size; if (size) { dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); - xfs_validate_extents(dp, nex, 1, XFS_EXTFMT_INODE(ip)); - ep = ifp->if_u1.if_extents; - for (i = 0; i < nex; i++, ep++, dp++) { + xfs_validate_extents(ifp, nex, 1, XFS_EXTFMT_INODE(ip)); + for (i = 0; i < nex; i++, dp++) { + ep = xfs_iext_get_ext(ifp, i); ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0), ARCH_CONVERT); ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1), @@ -646,7 +644,7 @@ xfs_iformat_extents( if (whichfork != XFS_DATA_FORK || XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) if (unlikely(xfs_check_nostate_extents( - ifp->if_u1.if_extents, nex))) { + ifp, 0, nex))) { XFS_ERROR_REPORT("xfs_iformat_extents(2)", XFS_ERRLEVEL_LOW, ip->i_mount); @@ -871,9 +869,8 @@ xfs_iread( * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will * know that this is a new incore inode. */ - error = xfs_itobp(mp, tp, ip, &dip, &bp, bno); - - if (error != 0) { + error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0); + if (error) { kmem_zone_free(xfs_inode_zone, ip); return error; } @@ -1015,6 +1012,7 @@ xfs_iread_extents( { int error; xfs_ifork_t *ifp; + xfs_extnum_t nextents; size_t size; if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { @@ -1022,26 +1020,24 @@ xfs_iread_extents( ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } - size = XFS_IFORK_NEXTENTS(ip, whichfork) * (uint)sizeof(xfs_bmbt_rec_t); + nextents = XFS_IFORK_NEXTENTS(ip, whichfork); + size = nextents * sizeof(xfs_bmbt_rec_t); ifp = XFS_IFORK_PTR(ip, whichfork); + /* * We know that the size is valid (it's checked in iformat_btree) */ - ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); - ASSERT(ifp->if_u1.if_extents != NULL); ifp->if_lastex = NULLEXTNUM; - ifp->if_bytes = ifp->if_real_bytes = (int)size; + ifp->if_bytes = ifp->if_real_bytes = 0; ifp->if_flags |= XFS_IFEXTENTS; + xfs_iext_add(ifp, 0, nextents); error = xfs_bmap_read_extents(tp, ip, whichfork); if (error) { - kmem_free(ifp->if_u1.if_extents, size); - ifp->if_u1.if_extents = NULL; - ifp->if_bytes = ifp->if_real_bytes = 0; + xfs_iext_destroy(ifp); ifp->if_flags &= ~XFS_IFEXTENTS; return error; } - xfs_validate_extents((xfs_bmbt_rec_t *)ifp->if_u1.if_extents, - XFS_IFORK_NEXTENTS(ip, whichfork), 0, XFS_EXTFMT_INODE(ip)); + xfs_validate_extents(ifp, nextents, 0, XFS_EXTFMT_INODE(ip)); return 0; } @@ -1376,10 +1372,10 @@ xfs_itrunc_trace( (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), (void*)(unsigned long)(toss_finish & 0xffffffff), (void*)(unsigned long)current_cpu(), - (void*)0, - (void*)0, - (void*)0, - (void*)0); + (void*)(unsigned long)current_pid(), + (void*)NULL, + (void*)NULL, + (void*)NULL); } #else #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) @@ -1397,6 +1393,16 @@ xfs_itrunc_trace( * calling into the buffer/page cache code and we can't hold the * inode lock when we do so. * + * We need to wait for any direct I/Os in flight to complete before we + * proceed with the truncate. This is needed to prevent the extents + * being read or written by the direct I/Os from being removed while the + * I/O is in flight as there is no other method of synchronising + * direct I/O with the truncate operation. Also, because we hold + * the IOLOCK in exclusive mode, we prevent new direct I/Os from being + * started until the truncate completes and drops the lock. Essentially, + * the vn_iowait() call forms an I/O barrier that provides strict ordering + * between direct I/Os and the truncate operation. + * * The flags parameter can have either the value XFS_ITRUNC_DEFINITE * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used * in the case that the caller is locking things out of order and @@ -1424,6 +1430,9 @@ xfs_itruncate_start( mp = ip->i_mount; vp = XFS_ITOV(ip); + + vn_iowait(vp); /* wait for the completion of any pending DIOs */ + /* * Call VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES() to get rid of pages and buffers * overlapping the region being removed. We have to use @@ -1899,7 +1908,7 @@ xfs_iunlink( * Here we put the head pointer into our next pointer, * and then we fall through to point the head at us. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); if (error) { return error; } @@ -2008,7 +2017,7 @@ xfs_iunlink_remove( * of dealing with the buffer when there is no need to * change it. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -2070,7 +2079,7 @@ xfs_iunlink_remove( * Now last_ibp points to the buffer previous to us on * the unlinked list. Pull us from the list. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -2476,92 +2485,6 @@ xfs_iroot_realloc( /* - * This is called when the amount of space needed for if_extents - * is increased or decreased. The change in size is indicated by - * the number of extents that need to be added or deleted in the - * ext_diff parameter. - * - * If the amount of space needed has decreased below the size of the - * inline buffer, then switch to using the inline buffer. Otherwise, - * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer - * to what is needed. - * - * ip -- the inode whose if_extents area is changing - * ext_diff -- the change in the number of extents, positive or negative, - * requested for the if_extents array. - */ -void -xfs_iext_realloc( - xfs_inode_t *ip, - int ext_diff, - int whichfork) -{ - int byte_diff; - xfs_ifork_t *ifp; - int new_size; - uint rnew_size; - - if (ext_diff == 0) { - return; - } - - ifp = XFS_IFORK_PTR(ip, whichfork); - byte_diff = ext_diff * (uint)sizeof(xfs_bmbt_rec_t); - new_size = (int)ifp->if_bytes + byte_diff; - ASSERT(new_size >= 0); - - if (new_size == 0) { - if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { - ASSERT(ifp->if_real_bytes != 0); - kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); - } - ifp->if_u1.if_extents = NULL; - rnew_size = 0; - } else if (new_size <= sizeof(ifp->if_u2.if_inline_ext)) { - /* - * If the valid extents can fit in if_inline_ext, - * copy them from the malloc'd vector and free it. - */ - if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { - /* - * For now, empty files are format EXTENTS, - * so the if_extents pointer is null. - */ - if (ifp->if_u1.if_extents) { - memcpy(ifp->if_u2.if_inline_ext, - ifp->if_u1.if_extents, new_size); - kmem_free(ifp->if_u1.if_extents, - ifp->if_real_bytes); - } - ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; - } - rnew_size = 0; - } else { - rnew_size = new_size; - if ((rnew_size & (rnew_size - 1)) != 0) - rnew_size = xfs_iroundup(rnew_size); - /* - * Stuck with malloc/realloc. - */ - if (ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext) { - ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) - kmem_alloc(rnew_size, KM_SLEEP); - memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, - sizeof(ifp->if_u2.if_inline_ext)); - } else if (rnew_size != ifp->if_real_bytes) { - ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) - kmem_realloc(ifp->if_u1.if_extents, - rnew_size, - ifp->if_real_bytes, - KM_NOFS); - } - } - ifp->if_real_bytes = rnew_size; - ifp->if_bytes = new_size; -} - - -/* * This is called when the amount of space needed for if_data * is increased or decreased. The change in size is indicated by * the number of bytes that need to be added or deleted in the @@ -2720,12 +2643,11 @@ xfs_idestroy_fork( ifp->if_real_bytes = 0; } } else if ((ifp->if_flags & XFS_IFEXTENTS) && - (ifp->if_u1.if_extents != NULL) && - (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)) { + ((ifp->if_flags & XFS_IFEXTIREC) || + ((ifp->if_u1.if_extents != NULL) && + (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { ASSERT(ifp->if_real_bytes != 0); - kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); - ifp->if_u1.if_extents = NULL; - ifp->if_real_bytes = 0; + xfs_iext_destroy(ifp); } ASSERT(ifp->if_u1.if_extents == NULL || ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); @@ -2814,7 +2736,7 @@ xfs_iunpin( /* make sync come back and flush this inode */ if (vp) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); if (!(inode->i_state & I_NEW)) mark_inode_dirty_sync(inode); @@ -2902,16 +2824,15 @@ xfs_iextents_copy( * the delayed ones. There must be at least one * non-delayed extent. */ - ep = ifp->if_u1.if_extents; dest_ep = buffer; copied = 0; for (i = 0; i < nrecs; i++) { + ep = xfs_iext_get_ext(ifp, i); start_block = xfs_bmbt_get_startblock(ep); if (ISNULLSTARTBLOCK(start_block)) { /* * It's a delayed allocation extent, so skip it. */ - ep++; continue; } @@ -2921,11 +2842,10 @@ xfs_iextents_copy( put_unaligned(INT_GET(ep->l1, ARCH_CONVERT), (__uint64_t*)&dest_ep->l1); dest_ep++; - ep++; copied++; } ASSERT(copied != 0); - xfs_validate_extents(buffer, copied, 1, XFS_EXTFMT_INODE(ip)); + xfs_validate_extents(ifp, copied, 1, XFS_EXTFMT_INODE(ip)); return (copied * (uint)sizeof(xfs_bmbt_rec_t)); } @@ -2995,8 +2915,10 @@ xfs_iflush_fork( case XFS_DINODE_FMT_EXTENTS: ASSERT((ifp->if_flags & XFS_IFEXTENTS) || !(iip->ili_format.ilf_fields & extflag[whichfork])); - ASSERT((ifp->if_u1.if_extents != NULL) || (ifp->if_bytes == 0)); - ASSERT((ifp->if_u1.if_extents == NULL) || (ifp->if_bytes > 0)); + ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) || + (ifp->if_bytes == 0)); + ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) || + (ifp->if_bytes > 0)); if ((iip->ili_format.ilf_fields & extflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); @@ -3114,8 +3036,8 @@ xfs_iflush( /* * Get the buffer containing the on-disk inode. */ - error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); - if (error != 0) { + error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0); + if (error) { xfs_ifunlock(ip); return error; } @@ -3610,7 +3532,7 @@ xfs_iaccess( { int error; mode_t orgmode = mode; - struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); if (mode & S_IWUSR) { umode_t imode = inode->i_mode; @@ -3704,3 +3626,1100 @@ xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); } #endif + +/* + * Return a pointer to the extent record at file index idx. + */ +xfs_bmbt_rec_t * +xfs_iext_get_ext( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx) /* index of target extent */ +{ + ASSERT(idx >= 0); + if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { + return ifp->if_u1.if_ext_irec->er_extbuf; + } else if (ifp->if_flags & XFS_IFEXTIREC) { + xfs_ext_irec_t *erp; /* irec pointer */ + int erp_idx = 0; /* irec index */ + xfs_extnum_t page_idx = idx; /* ext index in target list */ + + erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); + return &erp->er_extbuf[page_idx]; + } else if (ifp->if_bytes) { + return &ifp->if_u1.if_extents[idx]; + } else { + return NULL; + } +} + +/* + * Insert new item(s) into the extent records for incore inode + * fork 'ifp'. 'count' new items are inserted at index 'idx'. + */ +void +xfs_iext_insert( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* starting index of new items */ + xfs_extnum_t count, /* number of inserted items */ + xfs_bmbt_irec_t *new) /* items to insert */ +{ + xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_extnum_t i; /* extent record index */ + + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + xfs_iext_add(ifp, idx, count); + for (i = idx; i < idx + count; i++, new++) { + ep = xfs_iext_get_ext(ifp, i); + xfs_bmbt_set_all(ep, new); + } +} + +/* + * This is called when the amount of space required for incore file + * extents needs to be increased. The ext_diff parameter stores the + * number of new extents being added and the idx parameter contains + * the extent index where the new extents will be added. If the new + * extents are being appended, then we just need to (re)allocate and + * initialize the space. Otherwise, if the new extents are being + * inserted into the middle of the existing entries, a bit more work + * is required to make room for the new extents to be inserted. The + * caller is responsible for filling in the new extent entries upon + * return. + */ +void +xfs_iext_add( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin adding exts */ + int ext_diff) /* nubmer of extents to add */ +{ + int byte_diff; /* new bytes being added */ + int new_size; /* size of extents after adding */ + xfs_extnum_t nextents; /* number of extents in file */ + + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT((idx >= 0) && (idx <= nextents)); + byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); + new_size = ifp->if_bytes + byte_diff; + /* + * If the new number of extents (nextents + ext_diff) + * fits inside the inode, then continue to use the inline + * extent buffer. + */ + if (nextents + ext_diff <= XFS_INLINE_EXTS) { + if (idx < nextents) { + memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], + &ifp->if_u2.if_inline_ext[idx], + (nextents - idx) * sizeof(xfs_bmbt_rec_t)); + memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); + } + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; + ifp->if_real_bytes = 0; + ifp->if_lastex = nextents + ext_diff; + } + /* + * Otherwise use a linear (direct) extent list. + * If the extents are currently inside the inode, + * xfs_iext_realloc_direct will switch us from + * inline to direct extent allocation mode. + */ + else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { + xfs_iext_realloc_direct(ifp, new_size); + if (idx < nextents) { + memmove(&ifp->if_u1.if_extents[idx + ext_diff], + &ifp->if_u1.if_extents[idx], + (nextents - idx) * sizeof(xfs_bmbt_rec_t)); + memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); + } + } + /* Indirection array */ + else { + xfs_ext_irec_t *erp; + int erp_idx = 0; + int page_idx = idx; + + ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); + if (ifp->if_flags & XFS_IFEXTIREC) { + erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); + } else { + xfs_iext_irec_init(ifp); + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + erp = ifp->if_u1.if_ext_irec; + } + /* Extents fit in target extent page */ + if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { + if (page_idx < erp->er_extcount) { + memmove(&erp->er_extbuf[page_idx + ext_diff], + &erp->er_extbuf[page_idx], + (erp->er_extcount - page_idx) * + sizeof(xfs_bmbt_rec_t)); + memset(&erp->er_extbuf[page_idx], 0, byte_diff); + } + erp->er_extcount += ext_diff; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); + } + /* Insert a new extent page */ + else if (erp) { + xfs_iext_add_indirect_multi(ifp, + erp_idx, page_idx, ext_diff); + } + /* + * If extent(s) are being appended to the last page in + * the indirection array and the new extent(s) don't fit + * in the page, then erp is NULL and erp_idx is set to + * the next index needed in the indirection array. + */ + else { + int count = ext_diff; + + while (count) { + erp = xfs_iext_irec_new(ifp, erp_idx); + erp->er_extcount = count; + count -= MIN(count, (int)XFS_LINEAR_EXTS); + if (count) { + erp_idx++; + } + } + } + } + ifp->if_bytes = new_size; +} + +/* + * This is called when incore extents are being added to the indirection + * array and the new extents do not fit in the target extent list. The + * erp_idx parameter contains the irec index for the target extent list + * in the indirection array, and the idx parameter contains the extent + * index within the list. The number of extents being added is stored + * in the count parameter. + * + * |-------| |-------| + * | | | | idx - number of extents before idx + * | idx | | count | + * | | | | count - number of extents being inserted at idx + * |-------| |-------| + * | count | | nex2 | nex2 - number of extents after idx + count + * |-------| |-------| + */ +void +xfs_iext_add_indirect_multi( + xfs_ifork_t *ifp, /* inode fork pointer */ + int erp_idx, /* target extent irec index */ + xfs_extnum_t idx, /* index within target list */ + int count) /* new extents being added */ +{ + int byte_diff; /* new bytes being added */ + xfs_ext_irec_t *erp; /* pointer to irec entry */ + xfs_extnum_t ext_diff; /* number of extents to add */ + xfs_extnum_t ext_cnt; /* new extents still needed */ + xfs_extnum_t nex2; /* extents after idx + count */ + xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ + int nlists; /* number of irec's (lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + nex2 = erp->er_extcount - idx; + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + + /* + * Save second part of target extent list + * (all extents past */ + if (nex2) { + byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); + nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP); + memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); + erp->er_extcount -= nex2; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); + memset(&erp->er_extbuf[idx], 0, byte_diff); + } + + /* + * Add the new extents to the end of the target + * list, then allocate new irec record(s) and + * extent buffer(s) as needed to store the rest + * of the new extents. + */ + ext_cnt = count; + ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); + if (ext_diff) { + erp->er_extcount += ext_diff; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); + ext_cnt -= ext_diff; + } + while (ext_cnt) { + erp_idx++; + erp = xfs_iext_irec_new(ifp, erp_idx); + ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); + erp->er_extcount = ext_diff; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); + ext_cnt -= ext_diff; + } + + /* Add nex2 extents back to indirection array */ + if (nex2) { + xfs_extnum_t ext_avail; + int i; + + byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); + ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; + i = 0; + /* + * If nex2 extents fit in the current page, append + * nex2_ep after the new extents. + */ + if (nex2 <= ext_avail) { + i = erp->er_extcount; + } + /* + * Otherwise, check if space is available in the + * next page. + */ + else if ((erp_idx < nlists - 1) && + (nex2 <= (ext_avail = XFS_LINEAR_EXTS - + ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { + erp_idx++; + erp++; + /* Create a hole for nex2 extents */ + memmove(&erp->er_extbuf[nex2], erp->er_extbuf, + erp->er_extcount * sizeof(xfs_bmbt_rec_t)); + } + /* + * Final choice, create a new extent page for + * nex2 extents. + */ + else { + erp_idx++; + erp = xfs_iext_irec_new(ifp, erp_idx); + } + memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); + kmem_free(nex2_ep, byte_diff); + erp->er_extcount += nex2; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); + } +} + +/* + * This is called when the amount of space required for incore file + * extents needs to be decreased. The ext_diff parameter stores the + * number of extents to be removed and the idx parameter contains + * the extent index where the extents will be removed from. + * + * If the amount of space needed has decreased below the linear + * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous + * extent array. Otherwise, use kmem_realloc() to adjust the + * size to what is needed. + */ +void +xfs_iext_remove( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin removing exts */ + int ext_diff) /* number of extents to remove */ +{ + xfs_extnum_t nextents; /* number of extents in file */ + int new_size; /* size of extents after removal */ + + ASSERT(ext_diff > 0); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); + + if (new_size == 0) { + xfs_iext_destroy(ifp); + } else if (ifp->if_flags & XFS_IFEXTIREC) { + xfs_iext_remove_indirect(ifp, idx, ext_diff); + } else if (ifp->if_real_bytes) { + xfs_iext_remove_direct(ifp, idx, ext_diff); + } else { + xfs_iext_remove_inline(ifp, idx, ext_diff); + } + ifp->if_bytes = new_size; +} + +/* + * This removes ext_diff extents from the inline buffer, beginning + * at extent index idx. + */ +void +xfs_iext_remove_inline( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin removing exts */ + int ext_diff) /* number of extents to remove */ +{ + int nextents; /* number of extents in file */ + + ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); + ASSERT(idx < XFS_INLINE_EXTS); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(((nextents - ext_diff) > 0) && + (nextents - ext_diff) < XFS_INLINE_EXTS); + + if (idx + ext_diff < nextents) { + memmove(&ifp->if_u2.if_inline_ext[idx], + &ifp->if_u2.if_inline_ext[idx + ext_diff], + (nextents - (idx + ext_diff)) * + sizeof(xfs_bmbt_rec_t)); + memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], + 0, ext_diff * sizeof(xfs_bmbt_rec_t)); + } else { + memset(&ifp->if_u2.if_inline_ext[idx], 0, + ext_diff * sizeof(xfs_bmbt_rec_t)); + } +} + +/* + * This removes ext_diff extents from a linear (direct) extent list, + * beginning at extent index idx. If the extents are being removed + * from the end of the list (ie. truncate) then we just need to re- + * allocate the list to remove the extra space. Otherwise, if the + * extents are being removed from the middle of the existing extent + * entries, then we first need to move the extent records beginning + * at idx + ext_diff up in the list to overwrite the records being + * removed, then remove the extra space via kmem_realloc. + */ +void +xfs_iext_remove_direct( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin removing exts */ + int ext_diff) /* number of extents to remove */ +{ + xfs_extnum_t nextents; /* number of extents in file */ + int new_size; /* size of extents after removal */ + + ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); + new_size = ifp->if_bytes - + (ext_diff * sizeof(xfs_bmbt_rec_t)); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + + if (new_size == 0) { + xfs_iext_destroy(ifp); + return; + } + /* Move extents up in the list (if needed) */ + if (idx + ext_diff < nextents) { + memmove(&ifp->if_u1.if_extents[idx], + &ifp->if_u1.if_extents[idx + ext_diff], + (nextents - (idx + ext_diff)) * + sizeof(xfs_bmbt_rec_t)); + } + memset(&ifp->if_u1.if_extents[nextents - ext_diff], + 0, ext_diff * sizeof(xfs_bmbt_rec_t)); + /* + * Reallocate the direct extent list. If the extents + * will fit inside the inode then xfs_iext_realloc_direct + * will switch from direct to inline extent allocation + * mode for us. + */ + xfs_iext_realloc_direct(ifp, new_size); + ifp->if_bytes = new_size; +} + +/* + * This is called when incore extents are being removed from the + * indirection array and the extents being removed span multiple extent + * buffers. The idx parameter contains the file extent index where we + * want to begin removing extents, and the count parameter contains + * how many extents need to be removed. + * + * |-------| |-------| + * | nex1 | | | nex1 - number of extents before idx + * |-------| | count | + * | | | | count - number of extents being removed at idx + * | count | |-------| + * | | | nex2 | nex2 - number of extents after idx + count + * |-------| |-------| + */ +void +xfs_iext_remove_indirect( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin removing extents */ + int count) /* number of extents to remove */ +{ + xfs_ext_irec_t *erp; /* indirection array pointer */ + int erp_idx = 0; /* indirection array index */ + xfs_extnum_t ext_cnt; /* extents left to remove */ + xfs_extnum_t ext_diff; /* extents to remove in current list */ + xfs_extnum_t nex1; /* number of extents before idx */ + xfs_extnum_t nex2; /* extents after idx + count */ + int nlists; /* entries in indirecton array */ + int page_idx = idx; /* index in target extent list */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); + ASSERT(erp != NULL); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + nex1 = page_idx; + ext_cnt = count; + while (ext_cnt) { + nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); + ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); + /* + * Check for deletion of entire list; + * xfs_iext_irec_remove() updates extent offsets. + */ + if (ext_diff == erp->er_extcount) { + xfs_iext_irec_remove(ifp, erp_idx); + ext_cnt -= ext_diff; + nex1 = 0; + if (ext_cnt) { + ASSERT(erp_idx < ifp->if_real_bytes / + XFS_IEXT_BUFSZ); + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + nex1 = 0; + continue; + } else { + break; + } + } + /* Move extents up (if needed) */ + if (nex2) { + memmove(&erp->er_extbuf[nex1], + &erp->er_extbuf[nex1 + ext_diff], + nex2 * sizeof(xfs_bmbt_rec_t)); + } + /* Zero out rest of page */ + memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - + ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); + /* Update remaining counters */ + erp->er_extcount -= ext_diff; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); + ext_cnt -= ext_diff; + nex1 = 0; + erp_idx++; + erp++; + } + ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); + xfs_iext_irec_compact(ifp); +} + +/* + * Create, destroy, or resize a linear (direct) block of extents. + */ +void +xfs_iext_realloc_direct( + xfs_ifork_t *ifp, /* inode fork pointer */ + int new_size) /* new size of extents */ +{ + int rnew_size; /* real new size of extents */ + + rnew_size = new_size; + + ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || + ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && + (new_size != ifp->if_real_bytes))); + + /* Free extent records */ + if (new_size == 0) { + xfs_iext_destroy(ifp); + } + /* Resize direct extent list and zero any new bytes */ + else if (ifp->if_real_bytes) { + /* Check if extents will fit inside the inode */ + if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { + xfs_iext_direct_to_inline(ifp, new_size / + (uint)sizeof(xfs_bmbt_rec_t)); + ifp->if_bytes = new_size; + return; + } + if ((new_size & (new_size - 1)) != 0) { + rnew_size = xfs_iroundup(new_size); + } + if (rnew_size != ifp->if_real_bytes) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_realloc(ifp->if_u1.if_extents, + rnew_size, + ifp->if_real_bytes, + KM_SLEEP); + } + if (rnew_size > ifp->if_real_bytes) { + memset(&ifp->if_u1.if_extents[ifp->if_bytes / + (uint)sizeof(xfs_bmbt_rec_t)], 0, + rnew_size - ifp->if_real_bytes); + } + } + /* + * Switch from the inline extent buffer to a direct + * extent list. Be sure to include the inline extent + * bytes in new_size. + */ + else { + new_size += ifp->if_bytes; + if ((new_size & (new_size - 1)) != 0) { + rnew_size = xfs_iroundup(new_size); + } + xfs_iext_inline_to_direct(ifp, rnew_size); + } + ifp->if_real_bytes = rnew_size; + ifp->if_bytes = new_size; +} + +/* + * Switch from linear (direct) extent records to inline buffer. + */ +void +xfs_iext_direct_to_inline( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t nextents) /* number of extents in file */ +{ + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + ASSERT(nextents <= XFS_INLINE_EXTS); + /* + * The inline buffer was zeroed when we switched + * from inline to direct extent allocation mode, + * so we don't need to clear it here. + */ + memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, + nextents * sizeof(xfs_bmbt_rec_t)); + kmem_free(ifp->if_u1.if_extents, KM_SLEEP); + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; + ifp->if_real_bytes = 0; +} + +/* + * Switch from inline buffer to linear (direct) extent records. + * new_size should already be rounded up to the next power of 2 + * by the caller (when appropriate), so use new_size as it is. + * However, since new_size may be rounded up, we can't update + * if_bytes here. It is the caller's responsibility to update + * if_bytes upon return. + */ +void +xfs_iext_inline_to_direct( + xfs_ifork_t *ifp, /* inode fork pointer */ + int new_size) /* number of extents in file */ +{ + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_alloc(new_size, KM_SLEEP); + memset(ifp->if_u1.if_extents, 0, new_size); + if (ifp->if_bytes) { + memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, + ifp->if_bytes); + memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * + sizeof(xfs_bmbt_rec_t)); + } + ifp->if_real_bytes = new_size; +} + +/* + * Resize an extent indirection array to new_size bytes. + */ +void +xfs_iext_realloc_indirect( + xfs_ifork_t *ifp, /* inode fork pointer */ + int new_size) /* new indirection array size */ +{ + int nlists; /* number of irec's (ex lists) */ + int size; /* current indirection array size */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + size = nlists * sizeof(xfs_ext_irec_t); + ASSERT(ifp->if_real_bytes); + ASSERT((new_size >= 0) && (new_size != size)); + if (new_size == 0) { + xfs_iext_destroy(ifp); + } else { + ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) + kmem_realloc(ifp->if_u1.if_ext_irec, + new_size, size, KM_SLEEP); + } +} + +/* + * Switch from indirection array to linear (direct) extent allocations. + */ +void +xfs_iext_indirect_to_direct( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_extnum_t nextents; /* number of extents in file */ + int size; /* size of file extents */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(nextents <= XFS_LINEAR_EXTS); + size = nextents * sizeof(xfs_bmbt_rec_t); + + xfs_iext_irec_compact_full(ifp); + ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); + + ep = ifp->if_u1.if_ext_irec->er_extbuf; + kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t)); + ifp->if_flags &= ~XFS_IFEXTIREC; + ifp->if_u1.if_extents = ep; + ifp->if_bytes = size; + if (nextents < XFS_LINEAR_EXTS) { + xfs_iext_realloc_direct(ifp, size); + } +} + +/* + * Free incore file extents. + */ +void +xfs_iext_destroy( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + if (ifp->if_flags & XFS_IFEXTIREC) { + int erp_idx; + int nlists; + + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { + xfs_iext_irec_remove(ifp, erp_idx); + } + ifp->if_flags &= ~XFS_IFEXTIREC; + } else if (ifp->if_real_bytes) { + kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); + } else if (ifp->if_bytes) { + memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * + sizeof(xfs_bmbt_rec_t)); + } + ifp->if_u1.if_extents = NULL; + ifp->if_real_bytes = 0; + ifp->if_bytes = 0; +} + +/* + * Return a pointer to the extent record for file system block bno. + */ +xfs_bmbt_rec_t * /* pointer to found extent record */ +xfs_iext_bno_to_ext( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_fileoff_t bno, /* block number to search for */ + xfs_extnum_t *idxp) /* index of target extent */ +{ + xfs_bmbt_rec_t *base; /* pointer to first extent */ + xfs_filblks_t blockcount = 0; /* number of blocks in extent */ + xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */ + xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ + int high; /* upper boundry in search */ + xfs_extnum_t idx = 0; /* index of target extent */ + int low; /* lower boundry in search */ + xfs_extnum_t nextents; /* number of file extents */ + xfs_fileoff_t startoff = 0; /* start offset of extent */ + + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (nextents == 0) { + *idxp = 0; + return NULL; + } + low = 0; + if (ifp->if_flags & XFS_IFEXTIREC) { + /* Find target extent list */ + int erp_idx = 0; + erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); + base = erp->er_extbuf; + high = erp->er_extcount - 1; + } else { + base = ifp->if_u1.if_extents; + high = nextents - 1; + } + /* Binary search extent records */ + while (low <= high) { + idx = (low + high) >> 1; + ep = base + idx; + startoff = xfs_bmbt_get_startoff(ep); + blockcount = xfs_bmbt_get_blockcount(ep); + if (bno < startoff) { + high = idx - 1; + } else if (bno >= startoff + blockcount) { + low = idx + 1; + } else { + /* Convert back to file-based extent index */ + if (ifp->if_flags & XFS_IFEXTIREC) { + idx += erp->er_extoff; + } + *idxp = idx; + return ep; + } + } + /* Convert back to file-based extent index */ + if (ifp->if_flags & XFS_IFEXTIREC) { + idx += erp->er_extoff; + } + if (bno >= startoff + blockcount) { + if (++idx == nextents) { + ep = NULL; + } else { + ep = xfs_iext_get_ext(ifp, idx); + } + } + *idxp = idx; + return ep; +} + +/* + * Return a pointer to the indirection array entry containing the + * extent record for filesystem block bno. Store the index of the + * target irec in *erp_idxp. + */ +xfs_ext_irec_t * /* pointer to found extent record */ +xfs_iext_bno_to_irec( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_fileoff_t bno, /* block number to search for */ + int *erp_idxp) /* irec index of target ext list */ +{ + xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ + xfs_ext_irec_t *erp_next; /* next indirection array entry */ + int erp_idx; /* indirection array index */ + int nlists; /* number of extent irec's (lists) */ + int high; /* binary search upper limit */ + int low; /* binary search lower limit */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + erp_idx = 0; + low = 0; + high = nlists - 1; + while (low <= high) { + erp_idx = (low + high) >> 1; + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; + if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { + high = erp_idx - 1; + } else if (erp_next && bno >= + xfs_bmbt_get_startoff(erp_next->er_extbuf)) { + low = erp_idx + 1; + } else { + break; + } + } + *erp_idxp = erp_idx; + return erp; +} + +/* + * Return a pointer to the indirection array entry containing the + * extent record at file extent index *idxp. Store the index of the + * target irec in *erp_idxp and store the page index of the target + * extent record in *idxp. + */ +xfs_ext_irec_t * +xfs_iext_idx_to_irec( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t *idxp, /* extent index (file -> page) */ + int *erp_idxp, /* pointer to target irec */ + int realloc) /* new bytes were just added */ +{ + xfs_ext_irec_t *prev; /* pointer to previous irec */ + xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ + int erp_idx; /* indirection array index */ + int nlists; /* number of irec's (ex lists) */ + int high; /* binary search upper limit */ + int low; /* binary search lower limit */ + xfs_extnum_t page_idx = *idxp; /* extent index in target list */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + ASSERT(page_idx >= 0 && page_idx <= + ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + erp_idx = 0; + low = 0; + high = nlists - 1; + + /* Binary search extent irec's */ + while (low <= high) { + erp_idx = (low + high) >> 1; + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + prev = erp_idx > 0 ? erp - 1 : NULL; + if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && + realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { + high = erp_idx - 1; + } else if (page_idx > erp->er_extoff + erp->er_extcount || + (page_idx == erp->er_extoff + erp->er_extcount && + !realloc)) { + low = erp_idx + 1; + } else if (page_idx == erp->er_extoff + erp->er_extcount && + erp->er_extcount == XFS_LINEAR_EXTS) { + ASSERT(realloc); + page_idx = 0; + erp_idx++; + erp = erp_idx < nlists ? erp + 1 : NULL; + break; + } else { + page_idx -= erp->er_extoff; + break; + } + } + *idxp = page_idx; + *erp_idxp = erp_idx; + return(erp); +} + +/* + * Allocate and initialize an indirection array once the space needed + * for incore extents increases above XFS_IEXT_BUFSZ. + */ +void +xfs_iext_irec_init( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_ext_irec_t *erp; /* indirection array pointer */ + xfs_extnum_t nextents; /* number of extents in file */ + + ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(nextents <= XFS_LINEAR_EXTS); + + erp = (xfs_ext_irec_t *) + kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP); + + if (nextents == 0) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); + } else if (!ifp->if_real_bytes) { + xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); + } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { + xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); + } + erp->er_extbuf = ifp->if_u1.if_extents; + erp->er_extcount = nextents; + erp->er_extoff = 0; + + ifp->if_flags |= XFS_IFEXTIREC; + ifp->if_real_bytes = XFS_IEXT_BUFSZ; + ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); + ifp->if_u1.if_ext_irec = erp; + + return; +} + +/* + * Allocate and initialize a new entry in the indirection array. + */ +xfs_ext_irec_t * +xfs_iext_irec_new( + xfs_ifork_t *ifp, /* inode fork pointer */ + int erp_idx) /* index for new irec */ +{ + xfs_ext_irec_t *erp; /* indirection array pointer */ + int i; /* loop counter */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + + /* Resize indirection array */ + xfs_iext_realloc_indirect(ifp, ++nlists * + sizeof(xfs_ext_irec_t)); + /* + * Move records down in the array so the + * new page can use erp_idx. + */ + erp = ifp->if_u1.if_ext_irec; + for (i = nlists - 1; i > erp_idx; i--) { + memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); + } + ASSERT(i == erp_idx); + + /* Initialize new extent record */ + erp = ifp->if_u1.if_ext_irec; + erp[erp_idx].er_extbuf = (xfs_bmbt_rec_t *) + kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); + ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; + memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); + erp[erp_idx].er_extcount = 0; + erp[erp_idx].er_extoff = erp_idx > 0 ? + erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; + return (&erp[erp_idx]); +} + +/* + * Remove a record from the indirection array. + */ +void +xfs_iext_irec_remove( + xfs_ifork_t *ifp, /* inode fork pointer */ + int erp_idx) /* irec index to remove */ +{ + xfs_ext_irec_t *erp; /* indirection array pointer */ + int i; /* loop counter */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + if (erp->er_extbuf) { + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, + -erp->er_extcount); + kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ); + } + /* Compact extent records */ + erp = ifp->if_u1.if_ext_irec; + for (i = erp_idx; i < nlists - 1; i++) { + memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); + } + /* + * Manually free the last extent record from the indirection + * array. A call to xfs_iext_realloc_indirect() with a size + * of zero would result in a call to xfs_iext_destroy() which + * would in turn call this function again, creating a nasty + * infinite loop. + */ + if (--nlists) { + xfs_iext_realloc_indirect(ifp, + nlists * sizeof(xfs_ext_irec_t)); + } else { + kmem_free(ifp->if_u1.if_ext_irec, + sizeof(xfs_ext_irec_t)); + } + ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; +} + +/* + * This is called to clean up large amounts of unused memory allocated + * by the indirection array. Before compacting anything though, verify + * that the indirection array is still needed and switch back to the + * linear extent list (or even the inline buffer) if possible. The + * compaction policy is as follows: + * + * Full Compaction: Extents fit into a single page (or inline buffer) + * Full Compaction: Extents occupy less than 10% of allocated space + * Partial Compaction: Extents occupy > 10% and < 50% of allocated space + * No Compaction: Extents occupy at least 50% of allocated space + */ +void +xfs_iext_irec_compact( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_extnum_t nextents; /* number of extents in file */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + + if (nextents == 0) { + xfs_iext_destroy(ifp); + } else if (nextents <= XFS_INLINE_EXTS) { + xfs_iext_indirect_to_direct(ifp); + xfs_iext_direct_to_inline(ifp, nextents); + } else if (nextents <= XFS_LINEAR_EXTS) { + xfs_iext_indirect_to_direct(ifp); + } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) { + xfs_iext_irec_compact_full(ifp); + } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { + xfs_iext_irec_compact_pages(ifp); + } +} + +/* + * Combine extents from neighboring extent pages. + */ +void +xfs_iext_irec_compact_pages( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ + int erp_idx = 0; /* indirection array index */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + while (erp_idx < nlists - 1) { + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + erp_next = erp + 1; + if (erp_next->er_extcount <= + (XFS_LINEAR_EXTS - erp->er_extcount)) { + memmove(&erp->er_extbuf[erp->er_extcount], + erp_next->er_extbuf, erp_next->er_extcount * + sizeof(xfs_bmbt_rec_t)); + erp->er_extcount += erp_next->er_extcount; + /* + * Free page before removing extent record + * so er_extoffs don't get modified in + * xfs_iext_irec_remove. + */ + kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ); + erp_next->er_extbuf = NULL; + xfs_iext_irec_remove(ifp, erp_idx + 1); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + } else { + erp_idx++; + } + } +} + +/* + * Fully compact the extent records managed by the indirection array. + */ +void +xfs_iext_irec_compact_full( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_bmbt_rec_t *ep, *ep_next; /* extent record pointers */ + xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */ + int erp_idx = 0; /* extent irec index */ + int ext_avail; /* empty entries in ex list */ + int ext_diff; /* number of exts to add */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + erp = ifp->if_u1.if_ext_irec; + ep = &erp->er_extbuf[erp->er_extcount]; + erp_next = erp + 1; + ep_next = erp_next->er_extbuf; + while (erp_idx < nlists - 1) { + ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; + ext_diff = MIN(ext_avail, erp_next->er_extcount); + memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); + erp->er_extcount += ext_diff; + erp_next->er_extcount -= ext_diff; + /* Remove next page */ + if (erp_next->er_extcount == 0) { + /* + * Free page before removing extent record + * so er_extoffs don't get modified in + * xfs_iext_irec_remove. + */ + kmem_free(erp_next->er_extbuf, + erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); + erp_next->er_extbuf = NULL; + xfs_iext_irec_remove(ifp, erp_idx + 1); + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + /* Update next page */ + } else { + /* Move rest of page up to become next new page */ + memmove(erp_next->er_extbuf, ep_next, + erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); + ep_next = erp_next->er_extbuf; + memset(&ep_next[erp_next->er_extcount], 0, + (XFS_LINEAR_EXTS - erp_next->er_extcount) * + sizeof(xfs_bmbt_rec_t)); + } + if (erp->er_extcount == XFS_LINEAR_EXTS) { + erp_idx++; + if (erp_idx < nlists) + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + else + break; + } + ep = &erp->er_extbuf[erp->er_extcount]; + erp_next = erp + 1; + ep_next = erp_next->er_extbuf; + } +} + +/* + * This is called to update the er_extoff field in the indirection + * array when extents have been added or removed from one of the + * extent lists. erp_idx contains the irec index to begin updating + * at and ext_diff contains the number of extents that were added + * or removed. + */ +void +xfs_iext_irec_update_extoffs( + xfs_ifork_t *ifp, /* inode fork pointer */ + int erp_idx, /* irec index to update */ + int ext_diff) /* number of new extents */ +{ + int i; /* loop counter */ + int nlists; /* number of irec's (ex lists */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + for (i = erp_idx; i < nlists; i++) { + ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; + } +} diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 1cfbcf18ce86..39ef9c36ea55 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -25,10 +25,37 @@ #define XFS_ATTR_FORK 1 /* + * The following xfs_ext_irec_t struct introduces a second (top) level + * to the in-core extent allocation scheme. These structs are allocated + * in a contiguous block, creating an indirection array where each entry + * (irec) contains a pointer to a buffer of in-core extent records which + * it manages. Each extent buffer is 4k in size, since 4k is the system + * page size on Linux i386 and systems with larger page sizes don't seem + * to gain much, if anything, by using their native page size as the + * extent buffer size. Also, using 4k extent buffers everywhere provides + * a consistent interface for CXFS across different platforms. + * + * There is currently no limit on the number of irec's (extent lists) + * allowed, so heavily fragmented files may require an indirection array + * which spans multiple system pages of memory. The number of extents + * which would require this amount of contiguous memory is very large + * and should not cause problems in the foreseeable future. However, + * if the memory needed for the contiguous array ever becomes a problem, + * it is possible that a third level of indirection may be required. + */ +typedef struct xfs_ext_irec { + xfs_bmbt_rec_t *er_extbuf; /* block of extent records */ + xfs_extnum_t er_extoff; /* extent offset in file */ + xfs_extnum_t er_extcount; /* number of extents in page/block */ +} xfs_ext_irec_t; + +/* * File incore extent information, present for each of data & attr forks. */ -#define XFS_INLINE_EXTS 2 -#define XFS_INLINE_DATA 32 +#define XFS_IEXT_BUFSZ 4096 +#define XFS_LINEAR_EXTS (XFS_IEXT_BUFSZ / (uint)sizeof(xfs_bmbt_rec_t)) +#define XFS_INLINE_EXTS 2 +#define XFS_INLINE_DATA 32 typedef struct xfs_ifork { int if_bytes; /* bytes in if_u1 */ int if_real_bytes; /* bytes allocated in if_u1 */ @@ -39,6 +66,7 @@ typedef struct xfs_ifork { xfs_extnum_t if_lastex; /* last if_extents used */ union { xfs_bmbt_rec_t *if_extents; /* linear map file exts */ + xfs_ext_irec_t *if_ext_irec; /* irec map file exts */ char *if_data; /* inline file data */ } if_u1; union { @@ -61,20 +89,16 @@ typedef struct xfs_ifork { /* * Per-fork incore inode flags. */ -#define XFS_IFINLINE 0x0001 /* Inline data is read in */ -#define XFS_IFEXTENTS 0x0002 /* All extent pointers are read in */ -#define XFS_IFBROOT 0x0004 /* i_broot points to the bmap b-tree root */ +#define XFS_IFINLINE 0x01 /* Inline data is read in */ +#define XFS_IFEXTENTS 0x02 /* All extent pointers are read in */ +#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */ +#define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */ /* - * Flags for xfs_imap() and xfs_dilocate(). + * Flags for xfs_itobp(), xfs_imap() and xfs_dilocate(). */ -#define XFS_IMAP_LOOKUP 0x1 - -/* - * Maximum number of extent pointers in if_u1.if_extents. - */ -#define XFS_MAX_INCORE_EXTENTS 32768 - +#define XFS_IMAP_LOOKUP 0x1 +#define XFS_IMAP_BULKSTAT 0x2 #ifdef __KERNEL__ struct bhv_desc; @@ -398,7 +422,7 @@ int xfs_finish_reclaim_all(struct xfs_mount *, int); */ int xfs_itobp(struct xfs_mount *, struct xfs_trans *, xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **, - xfs_daddr_t); + xfs_daddr_t, uint); int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, xfs_inode_t **, xfs_daddr_t); int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); @@ -440,6 +464,32 @@ xfs_inode_t *xfs_vtoi(struct vnode *vp); void xfs_synchronize_atime(xfs_inode_t *); +xfs_bmbt_rec_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); +void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t, + xfs_bmbt_irec_t *); +void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int); +void xfs_iext_remove(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_realloc_direct(xfs_ifork_t *, int); +void xfs_iext_realloc_indirect(xfs_ifork_t *, int); +void xfs_iext_indirect_to_direct(xfs_ifork_t *); +void xfs_iext_direct_to_inline(xfs_ifork_t *, xfs_extnum_t); +void xfs_iext_inline_to_direct(xfs_ifork_t *, int); +void xfs_iext_destroy(xfs_ifork_t *); +xfs_bmbt_rec_t *xfs_iext_bno_to_ext(xfs_ifork_t *, xfs_fileoff_t, int *); +xfs_ext_irec_t *xfs_iext_bno_to_irec(xfs_ifork_t *, xfs_fileoff_t, int *); +xfs_ext_irec_t *xfs_iext_idx_to_irec(xfs_ifork_t *, xfs_extnum_t *, int *, int); +void xfs_iext_irec_init(xfs_ifork_t *); +xfs_ext_irec_t *xfs_iext_irec_new(xfs_ifork_t *, int); +void xfs_iext_irec_remove(xfs_ifork_t *, int); +void xfs_iext_irec_compact(xfs_ifork_t *); +void xfs_iext_irec_compact_pages(xfs_ifork_t *); +void xfs_iext_irec_compact_full(xfs_ifork_t *); +void xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int); + #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) #ifdef DEBUG diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 788917f355c4..d5dfedcb8922 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -76,7 +76,7 @@ xfs_iomap_enter_trace( (void *)((unsigned long)count), (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), (void *)((unsigned long)(io->io_new_size & 0xffffffff)), - (void *)NULL, + (void *)((unsigned long)current_pid()), (void *)NULL, (void *)NULL, (void *)NULL, diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index c59450e1be40..32247b6bfee7 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -562,7 +562,8 @@ xfs_bulkstat( if (bp) xfs_buf_relse(bp); error = xfs_itobp(mp, NULL, ip, - &dip, &bp, bno); + &dip, &bp, bno, + XFS_IMAP_BULKSTAT); if (!error) clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; kmem_zone_free(xfs_inode_zone, ip); @@ -570,6 +571,8 @@ xfs_bulkstat( mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, XFS_RANDOM_BULKSTAT_READ_CHUNK)) { bp = NULL; + ubleft = 0; + rval = error; break; } } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 7d46cbd6a07a..add13f507ed2 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3249,7 +3249,7 @@ xlog_recover_process_iunlinks( * next inode in the bucket. */ error = xfs_itobp(mp, NULL, ip, &dip, - &ibp, 0); + &ibp, 0, 0); ASSERT(error || (dip != NULL)); } diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 62188ea392c7..20e8abc16d18 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -51,11 +51,32 @@ STATIC int xfs_uuid_mount(xfs_mount_t *); STATIC void xfs_uuid_unmount(xfs_mount_t *mp); STATIC void xfs_unmountfs_wait(xfs_mount_t *); + +#ifdef HAVE_PERCPU_SB +STATIC void xfs_icsb_destroy_counters(xfs_mount_t *); +STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int); +STATIC void xfs_icsb_sync_counters(xfs_mount_t *); +STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, + int, int); +STATIC int xfs_icsb_modify_counters_locked(xfs_mount_t *, xfs_sb_field_t, + int, int); +STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); + +#else + +#define xfs_icsb_destroy_counters(mp) do { } while (0) +#define xfs_icsb_balance_counter(mp, a, b) do { } while (0) +#define xfs_icsb_sync_counters(mp) do { } while (0) +#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) +#define xfs_icsb_modify_counters_locked(mp, a, b, c) do { } while (0) + +#endif + static const struct { - short offset; - short type; /* 0 = integer - * 1 = binary / string (no translation) - */ + short offset; + short type; /* 0 = integer + * 1 = binary / string (no translation) + */ } xfs_sb_info[] = { { offsetof(xfs_sb_t, sb_magicnum), 0 }, { offsetof(xfs_sb_t, sb_blocksize), 0 }, @@ -113,7 +134,11 @@ xfs_mount_init(void) { xfs_mount_t *mp; - mp = kmem_zalloc(sizeof(*mp), KM_SLEEP); + mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP); + + if (xfs_icsb_init_counters(mp)) { + mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; + } AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail"); spinlock_init(&mp->m_sb_lock, "xfs_sb"); @@ -136,8 +161,8 @@ xfs_mount_init(void) */ void xfs_mount_free( - xfs_mount_t *mp, - int remove_bhv) + xfs_mount_t *mp, + int remove_bhv) { if (mp->m_ihash) xfs_ihash_free(mp); @@ -177,6 +202,7 @@ xfs_mount_free( VFS_REMOVEBHV(vfsp, &mp->m_bhv); } + xfs_icsb_destroy_counters(mp); kmem_free(mp, sizeof(xfs_mount_t)); } @@ -242,9 +268,12 @@ xfs_mount_validate_sb( sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || + sbp->sb_inodelog < XFS_DINODE_MIN_LOG || + sbp->sb_inodelog > XFS_DINODE_MAX_LOG || + (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) || (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || - sbp->sb_imax_pct > 100)) { + (sbp->sb_imax_pct > 100 || sbp->sb_imax_pct < 1))) { cmn_err(CE_WARN, "XFS: SB sanity check 1 failed"); XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)", XFS_ERRLEVEL_LOW, mp, sbp); @@ -527,6 +556,10 @@ xfs_readsb(xfs_mount_t *mp) ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); } + xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); + mp->m_sb_bp = bp; xfs_buf_relse(bp); ASSERT(XFS_BUF_VALUSEMA(bp) > 0); @@ -1154,6 +1187,9 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) sbp = xfs_getsb(mp, 0); if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || XFS_FORCED_SHUTDOWN(mp))) { + + xfs_icsb_sync_counters(mp); + /* * mark shared-readonly if desired */ @@ -1227,7 +1263,6 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) xfs_trans_log_buf(tp, bp, first, last); } - /* * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply * a delta to a specified field in the in-core superblock. Simply @@ -1237,7 +1272,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) * * The SB_LOCK must be held when this routine is called. */ -STATIC int +int xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) { @@ -1406,9 +1441,26 @@ xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) unsigned long s; int status; - s = XFS_SB_LOCK(mp); - status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); - XFS_SB_UNLOCK(mp, s); + /* check for per-cpu counters */ + switch (field) { +#ifdef HAVE_PERCPU_SB + case XFS_SBS_ICOUNT: + case XFS_SBS_IFREE: + case XFS_SBS_FDBLOCKS: + if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { + status = xfs_icsb_modify_counters(mp, field, + delta, rsvd); + break; + } + /* FALLTHROUGH */ +#endif + default: + s = XFS_SB_LOCK(mp); + status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); + XFS_SB_UNLOCK(mp, s); + break; + } + return status; } @@ -1445,8 +1497,26 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) * from the loop so we'll fall into the undo loop * below. */ - status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, - msbp->msb_delta, rsvd); + switch (msbp->msb_field) { +#ifdef HAVE_PERCPU_SB + case XFS_SBS_ICOUNT: + case XFS_SBS_IFREE: + case XFS_SBS_FDBLOCKS: + if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { + status = xfs_icsb_modify_counters_locked(mp, + msbp->msb_field, + msbp->msb_delta, rsvd); + break; + } + /* FALLTHROUGH */ +#endif + default: + status = xfs_mod_incore_sb_unlocked(mp, + msbp->msb_field, + msbp->msb_delta, rsvd); + break; + } + if (status != 0) { break; } @@ -1463,8 +1533,28 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) if (status != 0) { msbp--; while (msbp >= msb) { - status = xfs_mod_incore_sb_unlocked(mp, - msbp->msb_field, -(msbp->msb_delta), rsvd); + switch (msbp->msb_field) { +#ifdef HAVE_PERCPU_SB + case XFS_SBS_ICOUNT: + case XFS_SBS_IFREE: + case XFS_SBS_FDBLOCKS: + if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { + status = + xfs_icsb_modify_counters_locked(mp, + msbp->msb_field, + -(msbp->msb_delta), + rsvd); + break; + } + /* FALLTHROUGH */ +#endif + default: + status = xfs_mod_incore_sb_unlocked(mp, + msbp->msb_field, + -(msbp->msb_delta), + rsvd); + break; + } ASSERT(status == 0); msbp--; } @@ -1577,3 +1667,525 @@ xfs_mount_log_sbunit( xfs_mod_sb(tp, fields); xfs_trans_commit(tp, 0, NULL); } + + +#ifdef HAVE_PERCPU_SB +/* + * Per-cpu incore superblock counters + * + * Simple concept, difficult implementation + * + * Basically, replace the incore superblock counters with a distributed per cpu + * counter for contended fields (e.g. free block count). + * + * Difficulties arise in that the incore sb is used for ENOSPC checking, and + * hence needs to be accurately read when we are running low on space. Hence + * there is a method to enable and disable the per-cpu counters based on how + * much "stuff" is available in them. + * + * Basically, a counter is enabled if there is enough free resource to justify + * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local + * ENOSPC), then we disable the counters to synchronise all callers and + * re-distribute the available resources. + * + * If, once we redistributed the available resources, we still get a failure, + * we disable the per-cpu counter and go through the slow path. + * + * The slow path is the current xfs_mod_incore_sb() function. This means that + * when we disable a per-cpu counter, we need to drain it's resources back to + * the global superblock. We do this after disabling the counter to prevent + * more threads from queueing up on the counter. + * + * Essentially, this means that we still need a lock in the fast path to enable + * synchronisation between the global counters and the per-cpu counters. This + * is not a problem because the lock will be local to a CPU almost all the time + * and have little contention except when we get to ENOSPC conditions. + * + * Basically, this lock becomes a barrier that enables us to lock out the fast + * path while we do things like enabling and disabling counters and + * synchronising the counters. + * + * Locking rules: + * + * 1. XFS_SB_LOCK() before picking up per-cpu locks + * 2. per-cpu locks always picked up via for_each_online_cpu() order + * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks + * 4. modifying per-cpu counters requires holding per-cpu lock + * 5. modifying global counters requires holding XFS_SB_LOCK + * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK + * and _none_ of the per-cpu locks. + * + * Disabled counters are only ever re-enabled by a balance operation + * that results in more free resources per CPU than a given threshold. + * To ensure counters don't remain disabled, they are rebalanced when + * the global resource goes above a higher threshold (i.e. some hysteresis + * is present to prevent thrashing). + */ + +/* + * hot-plug CPU notifier support. + * + * We cannot use the hotcpu_register() function because it does + * not allow notifier instances. We need a notifier per filesystem + * as we need to be able to identify the filesystem to balance + * the counters out. This is acheived by having a notifier block + * embedded in the xfs_mount_t and doing pointer magic to get the + * mount pointer from the notifier block address. + */ +STATIC int +xfs_icsb_cpu_notify( + struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + xfs_icsb_cnts_t *cntp; + xfs_mount_t *mp; + int s; + + mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); + cntp = (xfs_icsb_cnts_t *) + per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); + switch (action) { + case CPU_UP_PREPARE: + /* Easy Case - initialize the area and locks, and + * then rebalance when online does everything else for us. */ + memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); + break; + case CPU_ONLINE: + xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); + break; + case CPU_DEAD: + /* Disable all the counters, then fold the dead cpu's + * count into the total on the global superblock and + * re-enable the counters. */ + s = XFS_SB_LOCK(mp); + xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); + xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); + xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); + + mp->m_sb.sb_icount += cntp->icsb_icount; + mp->m_sb.sb_ifree += cntp->icsb_ifree; + mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; + + memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); + + xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, XFS_ICSB_SB_LOCKED); + xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, XFS_ICSB_SB_LOCKED); + xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED); + XFS_SB_UNLOCK(mp, s); + break; + } + + return NOTIFY_OK; +} + +int +xfs_icsb_init_counters( + xfs_mount_t *mp) +{ + xfs_icsb_cnts_t *cntp; + int i; + + mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); + if (mp->m_sb_cnts == NULL) + return -ENOMEM; + + mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; + mp->m_icsb_notifier.priority = 0; + register_cpu_notifier(&mp->m_icsb_notifier); + + for_each_online_cpu(i) { + cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); + memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); + } + /* + * start with all counters disabled so that the + * initial balance kicks us off correctly + */ + mp->m_icsb_counters = -1; + return 0; +} + +STATIC void +xfs_icsb_destroy_counters( + xfs_mount_t *mp) +{ + if (mp->m_sb_cnts) { + unregister_cpu_notifier(&mp->m_icsb_notifier); + free_percpu(mp->m_sb_cnts); + } +} + +STATIC inline void +xfs_icsb_lock_cntr( + xfs_icsb_cnts_t *icsbp) +{ + while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { + ndelay(1000); + } +} + +STATIC inline void +xfs_icsb_unlock_cntr( + xfs_icsb_cnts_t *icsbp) +{ + clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); +} + + +STATIC inline void +xfs_icsb_lock_all_counters( + xfs_mount_t *mp) +{ + xfs_icsb_cnts_t *cntp; + int i; + + for_each_online_cpu(i) { + cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); + xfs_icsb_lock_cntr(cntp); + } +} + +STATIC inline void +xfs_icsb_unlock_all_counters( + xfs_mount_t *mp) +{ + xfs_icsb_cnts_t *cntp; + int i; + + for_each_online_cpu(i) { + cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); + xfs_icsb_unlock_cntr(cntp); + } +} + +STATIC void +xfs_icsb_count( + xfs_mount_t *mp, + xfs_icsb_cnts_t *cnt, + int flags) +{ + xfs_icsb_cnts_t *cntp; + int i; + + memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); + + if (!(flags & XFS_ICSB_LAZY_COUNT)) + xfs_icsb_lock_all_counters(mp); + + for_each_online_cpu(i) { + cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); + cnt->icsb_icount += cntp->icsb_icount; + cnt->icsb_ifree += cntp->icsb_ifree; + cnt->icsb_fdblocks += cntp->icsb_fdblocks; + } + + if (!(flags & XFS_ICSB_LAZY_COUNT)) + xfs_icsb_unlock_all_counters(mp); +} + +STATIC int +xfs_icsb_counter_disabled( + xfs_mount_t *mp, + xfs_sb_field_t field) +{ + ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); + return test_bit(field, &mp->m_icsb_counters); +} + +STATIC int +xfs_icsb_disable_counter( + xfs_mount_t *mp, + xfs_sb_field_t field) +{ + xfs_icsb_cnts_t cnt; + + ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); + + xfs_icsb_lock_all_counters(mp); + if (!test_and_set_bit(field, &mp->m_icsb_counters)) { + /* drain back to superblock */ + + xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT); + switch(field) { + case XFS_SBS_ICOUNT: + mp->m_sb.sb_icount = cnt.icsb_icount; + break; + case XFS_SBS_IFREE: + mp->m_sb.sb_ifree = cnt.icsb_ifree; + break; + case XFS_SBS_FDBLOCKS: + mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; + break; + default: + BUG(); + } + } + + xfs_icsb_unlock_all_counters(mp); + + return 0; +} + +STATIC void +xfs_icsb_enable_counter( + xfs_mount_t *mp, + xfs_sb_field_t field, + uint64_t count, + uint64_t resid) +{ + xfs_icsb_cnts_t *cntp; + int i; + + ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); + + xfs_icsb_lock_all_counters(mp); + for_each_online_cpu(i) { + cntp = per_cpu_ptr(mp->m_sb_cnts, i); + switch (field) { + case XFS_SBS_ICOUNT: + cntp->icsb_icount = count + resid; + break; + case XFS_SBS_IFREE: + cntp->icsb_ifree = count + resid; + break; + case XFS_SBS_FDBLOCKS: + cntp->icsb_fdblocks = count + resid; + break; + default: + BUG(); + break; + } + resid = 0; + } + clear_bit(field, &mp->m_icsb_counters); + xfs_icsb_unlock_all_counters(mp); +} + +STATIC void +xfs_icsb_sync_counters_int( + xfs_mount_t *mp, + int flags) +{ + xfs_icsb_cnts_t cnt; + int s; + + /* Pass 1: lock all counters */ + if ((flags & XFS_ICSB_SB_LOCKED) == 0) + s = XFS_SB_LOCK(mp); + + xfs_icsb_count(mp, &cnt, flags); + + /* Step 3: update mp->m_sb fields */ + if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) + mp->m_sb.sb_icount = cnt.icsb_icount; + if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) + mp->m_sb.sb_ifree = cnt.icsb_ifree; + if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) + mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; + + if ((flags & XFS_ICSB_SB_LOCKED) == 0) + XFS_SB_UNLOCK(mp, s); +} + +/* + * Accurate update of per-cpu counters to incore superblock + */ +STATIC void +xfs_icsb_sync_counters( + xfs_mount_t *mp) +{ + xfs_icsb_sync_counters_int(mp, 0); +} + +/* + * lazy addition used for things like df, background sb syncs, etc + */ +void +xfs_icsb_sync_counters_lazy( + xfs_mount_t *mp) +{ + xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT); +} + +/* + * Balance and enable/disable counters as necessary. + * + * Thresholds for re-enabling counters are somewhat magic. + * inode counts are chosen to be the same number as single + * on disk allocation chunk per CPU, and free blocks is + * something far enough zero that we aren't going thrash + * when we get near ENOSPC. + */ +#define XFS_ICSB_INO_CNTR_REENABLE 64 +#define XFS_ICSB_FDBLK_CNTR_REENABLE 512 +STATIC void +xfs_icsb_balance_counter( + xfs_mount_t *mp, + xfs_sb_field_t field, + int flags) +{ + uint64_t count, resid = 0; + int weight = num_online_cpus(); + int s; + + if (!(flags & XFS_ICSB_SB_LOCKED)) + s = XFS_SB_LOCK(mp); + + /* disable counter and sync counter */ + xfs_icsb_disable_counter(mp, field); + + /* update counters - first CPU gets residual*/ + switch (field) { + case XFS_SBS_ICOUNT: + count = mp->m_sb.sb_icount; + resid = do_div(count, weight); + if (count < XFS_ICSB_INO_CNTR_REENABLE) + goto out; + break; + case XFS_SBS_IFREE: + count = mp->m_sb.sb_ifree; + resid = do_div(count, weight); + if (count < XFS_ICSB_INO_CNTR_REENABLE) + goto out; + break; + case XFS_SBS_FDBLOCKS: + count = mp->m_sb.sb_fdblocks; + resid = do_div(count, weight); + if (count < XFS_ICSB_FDBLK_CNTR_REENABLE) + goto out; + break; + default: + BUG(); + break; + } + + xfs_icsb_enable_counter(mp, field, count, resid); +out: + if (!(flags & XFS_ICSB_SB_LOCKED)) + XFS_SB_UNLOCK(mp, s); +} + +STATIC int +xfs_icsb_modify_counters_int( + xfs_mount_t *mp, + xfs_sb_field_t field, + int delta, + int rsvd, + int flags) +{ + xfs_icsb_cnts_t *icsbp; + long long lcounter; /* long counter for 64 bit fields */ + int cpu, s, locked = 0; + int ret = 0, balance_done = 0; + +again: + cpu = get_cpu(); + icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu), + xfs_icsb_lock_cntr(icsbp); + if (unlikely(xfs_icsb_counter_disabled(mp, field))) + goto slow_path; + + switch (field) { + case XFS_SBS_ICOUNT: + lcounter = icsbp->icsb_icount; + lcounter += delta; + if (unlikely(lcounter < 0)) + goto slow_path; + icsbp->icsb_icount = lcounter; + break; + + case XFS_SBS_IFREE: + lcounter = icsbp->icsb_ifree; + lcounter += delta; + if (unlikely(lcounter < 0)) + goto slow_path; + icsbp->icsb_ifree = lcounter; + break; + + case XFS_SBS_FDBLOCKS: + BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); + + lcounter = icsbp->icsb_fdblocks; + lcounter += delta; + if (unlikely(lcounter < 0)) + goto slow_path; + icsbp->icsb_fdblocks = lcounter; + break; + default: + BUG(); + break; + } + xfs_icsb_unlock_cntr(icsbp); + put_cpu(); + if (locked) + XFS_SB_UNLOCK(mp, s); + return 0; + + /* + * The slow path needs to be run with the SBLOCK + * held so that we prevent other threads from + * attempting to run this path at the same time. + * this provides exclusion for the balancing code, + * and exclusive fallback if the balance does not + * provide enough resources to continue in an unlocked + * manner. + */ +slow_path: + xfs_icsb_unlock_cntr(icsbp); + put_cpu(); + + /* need to hold superblock incase we need + * to disable a counter */ + if (!(flags & XFS_ICSB_SB_LOCKED)) { + s = XFS_SB_LOCK(mp); + locked = 1; + flags |= XFS_ICSB_SB_LOCKED; + } + if (!balance_done) { + xfs_icsb_balance_counter(mp, field, flags); + balance_done = 1; + goto again; + } else { + /* + * we might not have enough on this local + * cpu to allocate for a bulk request. + * We need to drain this field from all CPUs + * and disable the counter fastpath + */ + xfs_icsb_disable_counter(mp, field); + } + + ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); + + if (locked) + XFS_SB_UNLOCK(mp, s); + return ret; +} + +STATIC int +xfs_icsb_modify_counters( + xfs_mount_t *mp, + xfs_sb_field_t field, + int delta, + int rsvd) +{ + return xfs_icsb_modify_counters_int(mp, field, delta, rsvd, 0); +} + +/* + * Called when superblock is already locked + */ +STATIC int +xfs_icsb_modify_counters_locked( + xfs_mount_t *mp, + xfs_sb_field_t field, + int delta, + int rsvd) +{ + return xfs_icsb_modify_counters_int(mp, field, delta, + rsvd, XFS_ICSB_SB_LOCKED); +} +#endif diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index cd3cf9613a00..ebd73960e9db 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -267,6 +267,34 @@ typedef struct xfs_ioops { #define XFS_IODONE(vfsp) \ (*(mp)->m_io_ops.xfs_iodone)(vfsp) +#ifdef HAVE_PERCPU_SB + +/* + * Valid per-cpu incore superblock counters. Note that if you add new counters, + * you may need to define new counter disabled bit field descriptors as there + * are more possible fields in the superblock that can fit in a bitfield on a + * 32 bit platform. The XFS_SBS_* values for the current current counters just + * fit. + */ +typedef struct xfs_icsb_cnts { + uint64_t icsb_fdblocks; + uint64_t icsb_ifree; + uint64_t icsb_icount; + unsigned long icsb_flags; +} xfs_icsb_cnts_t; + +#define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */ + +#define XFS_ICSB_SB_LOCKED (1 << 0) /* sb already locked */ +#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ + +extern int xfs_icsb_init_counters(struct xfs_mount *); +extern void xfs_icsb_sync_counters_lazy(struct xfs_mount *); + +#else +#define xfs_icsb_init_counters(mp) (0) +#define xfs_icsb_sync_counters_lazy(mp) do { } while (0) +#endif typedef struct xfs_mount { bhv_desc_t m_bhv; /* vfs xfs behavior */ @@ -372,6 +400,11 @@ typedef struct xfs_mount { struct xfs_qmops m_qm_ops; /* vector of XQM ops */ struct xfs_ioops m_io_ops; /* vector of I/O ops */ atomic_t m_active_trans; /* number trans frozen */ +#ifdef HAVE_PERCPU_SB + xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */ + unsigned long m_icsb_counters; /* disabled per-cpu counters */ + struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */ +#endif } xfs_mount_t; /* @@ -386,8 +419,6 @@ typedef struct xfs_mount { #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem operations, typically for disk errors in metadata */ -#define XFS_MOUNT_NOATIME (1ULL << 5) /* don't modify inode access - times on reads */ #define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to user */ #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment @@ -411,6 +442,8 @@ typedef struct xfs_mount { #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ #define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred * I/O size in stat() */ +#define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock + counters */ /* @@ -473,11 +506,6 @@ xfs_preferred_iosize(xfs_mount_t *mp) #define XFS_SHUTDOWN_REMOTE_REQ 0x10 /* Shutdown came from remote cell */ /* - * xflags for xfs_syncsub - */ -#define XFS_XSYNC_RELOC 0x01 - -/* * Flags for xfs_mountfs */ #define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */ @@ -548,6 +576,8 @@ extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *); extern int xfs_unmountfs_writesb(xfs_mount_t *); extern int xfs_unmount_flush(xfs_mount_t *, int); extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int, int); +extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t, + int, int); extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, uint, int); extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h index de85eefb7966..e63795644478 100644 --- a/fs/xfs/xfs_rw.h +++ b/fs/xfs/xfs_rw.h @@ -89,6 +89,7 @@ extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp, */ extern int xfs_rwlock(bhv_desc_t *bdp, vrwlock_t write_lock); extern void xfs_rwunlock(bhv_desc_t *bdp, vrwlock_t write_lock); +extern int xfs_setattr(bhv_desc_t *bdp, vattr_t *vap, int flags, cred_t *credp); extern int xfs_change_file_space(bhv_desc_t *bdp, int cmd, xfs_flock64_t *bf, xfs_off_t offset, cred_t *credp, int flags); extern int xfs_set_dmattrs(bhv_desc_t *bdp, u_int evmask, u_int16_t state, diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index d3d714e6b32a..2918956553a5 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -55,10 +55,141 @@ STATIC void xfs_trans_committed(xfs_trans_t *, int); STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); STATIC void xfs_trans_free(xfs_trans_t *); -kmem_zone_t *xfs_trans_zone; +kmem_zone_t *xfs_trans_zone; /* + * Reservation functions here avoid a huge stack in xfs_trans_init + * due to register overflow from temporaries in the calculations. + */ + +STATIC uint +xfs_calc_write_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_itruncate_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_rename_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_link_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_LINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_remove_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_symlink_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_create_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_mkdir_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_ifree_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_ichange_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_growdata_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_GROWDATA_LOG_RES(mp); +} + +STATIC uint +xfs_calc_growrtalloc_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_GROWRTALLOC_LOG_RES(mp); +} + +STATIC uint +xfs_calc_growrtzero_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_GROWRTZERO_LOG_RES(mp); +} + +STATIC uint +xfs_calc_growrtfree_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_GROWRTFREE_LOG_RES(mp); +} + +STATIC uint +xfs_calc_swrite_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_SWRITE_LOG_RES(mp); +} + +STATIC uint +xfs_calc_writeid_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_WRITEID_LOG_RES(mp); +} + +STATIC uint +xfs_calc_addafork_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_attrinval_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ATTRINVAL_LOG_RES(mp); +} + +STATIC uint +xfs_calc_attrset_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_attrrm_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); +} + +/* * Initialize the precomputed transaction reservation values * in the mount structure. */ @@ -69,39 +200,27 @@ xfs_trans_init( xfs_trans_reservations_t *resp; resp = &(mp->m_reservations); - resp->tr_write = - (uint)(XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_itruncate = - (uint)(XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_rename = - (uint)(XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_link = (uint)XFS_CALC_LINK_LOG_RES(mp); - resp->tr_remove = - (uint)(XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_symlink = - (uint)(XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_create = - (uint)(XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_mkdir = - (uint)(XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_ifree = - (uint)(XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_ichange = - (uint)(XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_growdata = (uint)XFS_CALC_GROWDATA_LOG_RES(mp); - resp->tr_swrite = (uint)XFS_CALC_SWRITE_LOG_RES(mp); - resp->tr_writeid = (uint)XFS_CALC_WRITEID_LOG_RES(mp); - resp->tr_addafork = - (uint)(XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_attrinval = (uint)XFS_CALC_ATTRINVAL_LOG_RES(mp); - resp->tr_attrset = - (uint)(XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_attrrm = - (uint)(XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_clearagi = (uint)XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); - resp->tr_growrtalloc = (uint)XFS_CALC_GROWRTALLOC_LOG_RES(mp); - resp->tr_growrtzero = (uint)XFS_CALC_GROWRTZERO_LOG_RES(mp); - resp->tr_growrtfree = (uint)XFS_CALC_GROWRTFREE_LOG_RES(mp); + resp->tr_write = xfs_calc_write_reservation(mp); + resp->tr_itruncate = xfs_calc_itruncate_reservation(mp); + resp->tr_rename = xfs_calc_rename_reservation(mp); + resp->tr_link = xfs_calc_link_reservation(mp); + resp->tr_remove = xfs_calc_remove_reservation(mp); + resp->tr_symlink = xfs_calc_symlink_reservation(mp); + resp->tr_create = xfs_calc_create_reservation(mp); + resp->tr_mkdir = xfs_calc_mkdir_reservation(mp); + resp->tr_ifree = xfs_calc_ifree_reservation(mp); + resp->tr_ichange = xfs_calc_ichange_reservation(mp); + resp->tr_growdata = xfs_calc_growdata_reservation(mp); + resp->tr_swrite = xfs_calc_swrite_reservation(mp); + resp->tr_writeid = xfs_calc_writeid_reservation(mp); + resp->tr_addafork = xfs_calc_addafork_reservation(mp); + resp->tr_attrinval = xfs_calc_attrinval_reservation(mp); + resp->tr_attrset = xfs_calc_attrset_reservation(mp); + resp->tr_attrrm = xfs_calc_attrrm_reservation(mp); + resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp); + resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp); + resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp); + resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp); } /* diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index d77901c07f63..e48befa4e337 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -380,7 +380,7 @@ typedef struct xfs_trans { xfs_trans_header_t t_header; /* header for in-log trans */ unsigned int t_busy_free; /* busy descs free */ xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ - xfs_pflags_t t_pflags; /* saved pflags state */ + unsigned long t_pflags; /* saved process flags state */ } xfs_trans_t; #endif /* __KERNEL__ */ diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index b6ad370fab3d..d4ec4dfaf19c 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -55,7 +55,7 @@ #include "xfs_clnt.h" #include "xfs_fsops.h" -STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); +STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); int xfs_init(void) @@ -77,11 +77,12 @@ xfs_init(void) "xfs_bmap_free_item"); xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), "xfs_btree_cur"); - xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode"); xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); + xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); + xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); /* * The size of the zone allocated buf log item is the maximum @@ -93,17 +94,30 @@ xfs_init(void) (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / NBWORD) * sizeof(int))), "xfs_buf_item"); - xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + - ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), + xfs_efd_zone = + kmem_zone_init((sizeof(xfs_efd_log_item_t) + + ((XFS_EFD_MAX_FAST_EXTENTS - 1) * + sizeof(xfs_extent_t))), "xfs_efd_item"); - xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + - ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), + xfs_efi_zone = + kmem_zone_init((sizeof(xfs_efi_log_item_t) + + ((XFS_EFI_MAX_FAST_EXTENTS - 1) * + sizeof(xfs_extent_t))), "xfs_efi_item"); - xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); - xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili"); - xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t), - "xfs_chashlist"); - xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); + + /* + * These zones warrant special memory allocator hints + */ + xfs_inode_zone = + kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", + KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | + KM_ZONE_SPREAD, NULL); + xfs_ili_zone = + kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", + KM_ZONE_SPREAD, NULL); + xfs_chashlist_zone = + kmem_zone_init_flags(sizeof(xfs_chashlist_t), "xfs_chashlist", + KM_ZONE_SPREAD, NULL); /* * Allocate global trace buffers. @@ -176,18 +190,18 @@ xfs_cleanup(void) ktrace_free(xfs_alloc_trace_buf); #endif - kmem_cache_destroy(xfs_bmap_free_item_zone); - kmem_cache_destroy(xfs_btree_cur_zone); - kmem_cache_destroy(xfs_inode_zone); - kmem_cache_destroy(xfs_trans_zone); - kmem_cache_destroy(xfs_da_state_zone); - kmem_cache_destroy(xfs_dabuf_zone); - kmem_cache_destroy(xfs_buf_item_zone); - kmem_cache_destroy(xfs_efd_zone); - kmem_cache_destroy(xfs_efi_zone); - kmem_cache_destroy(xfs_ifork_zone); - kmem_cache_destroy(xfs_ili_zone); - kmem_cache_destroy(xfs_chashlist_zone); + kmem_zone_destroy(xfs_bmap_free_item_zone); + kmem_zone_destroy(xfs_btree_cur_zone); + kmem_zone_destroy(xfs_inode_zone); + kmem_zone_destroy(xfs_trans_zone); + kmem_zone_destroy(xfs_da_state_zone); + kmem_zone_destroy(xfs_dabuf_zone); + kmem_zone_destroy(xfs_buf_item_zone); + kmem_zone_destroy(xfs_efd_zone); + kmem_zone_destroy(xfs_efi_zone); + kmem_zone_destroy(xfs_ifork_zone); + kmem_zone_destroy(xfs_ili_zone); + kmem_zone_destroy(xfs_chashlist_zone); } /* @@ -258,8 +272,6 @@ xfs_start_flags( mp->m_inoadd = XFS_INO64_OFFSET; } #endif - if (ap->flags & XFSMNT_NOATIME) - mp->m_flags |= XFS_MOUNT_NOATIME; if (ap->flags & XFSMNT_RETERR) mp->m_flags |= XFS_MOUNT_RETERR; if (ap->flags & XFSMNT_NOALIGN) @@ -620,7 +632,7 @@ xfs_quiesce_fs( xfs_mount_t *mp) { int count = 0, pincount; - + xfs_refcache_purge_mp(mp); xfs_flush_buftarg(mp->m_ddev_targp, 0); xfs_finish_reclaim_all(mp, 0); @@ -631,7 +643,7 @@ xfs_quiesce_fs( * meta data (typically directory updates). * Which then must be flushed and logged before * we can write the unmount record. - */ + */ do { xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, 0, NULL); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); @@ -654,11 +666,6 @@ xfs_mntupdate( xfs_mount_t *mp = XFS_BHVTOM(bdp); int error; - if (args->flags & XFSMNT_NOATIME) - mp->m_flags |= XFS_MOUNT_NOATIME; - else - mp->m_flags &= ~XFS_MOUNT_NOATIME; - if (args->flags & XFSMNT_BARRIER) mp->m_flags |= XFS_MOUNT_BARRIER; else @@ -814,6 +821,7 @@ xfs_statvfs( statp->f_type = XFS_SB_MAGIC; + xfs_icsb_sync_counters_lazy(mp); s = XFS_SB_LOCK(mp); statp->f_bsize = sbp->sb_blocksize; lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; @@ -1221,7 +1229,7 @@ xfs_sync_inodes( xfs_iunlock(ip, XFS_ILOCK_SHARED); error = xfs_itobp(mp, NULL, ip, - &dip, &bp, 0); + &dip, &bp, 0, 0); if (!error) { xfs_buf_relse(bp); } else { @@ -1690,10 +1698,7 @@ xfs_parseargs( int iosize; args->flags2 |= XFSMNT2_COMPAT_IOSIZE; - -#if 0 /* XXX: off by default, until some remaining issues ironed out */ - args->flags |= XFSMNT_IDELETE; /* default to on */ -#endif + args->flags |= XFSMNT_IDELETE; if (!options) goto done; @@ -1903,7 +1908,6 @@ xfs_showargs( { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, - { XFS_MOUNT_IDELETE, "," MNTOPT_NOIKEEP }, { 0, NULL } }; struct proc_xfs_info *xfs_infop; @@ -1939,6 +1943,8 @@ xfs_showargs( seq_printf(m, "," MNTOPT_SWIDTH "=%d", (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); + if (!(mp->m_flags & XFS_MOUNT_IDELETE)) + seq_printf(m, "," MNTOPT_IKEEP); if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)) seq_printf(m, "," MNTOPT_LARGEIO); if (mp->m_flags & XFS_MOUNT_BARRIER) diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index eaab355f5a89..a478f42e63ff 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -615,6 +615,7 @@ xfs_setattr( code = xfs_igrow_start(ip, vap->va_size, credp); } xfs_iunlock(ip, XFS_ILOCK_EXCL); + vn_iowait(vp); /* wait for the completion of any pending DIOs */ if (!code) code = xfs_itruncate_data(ip, vap->va_size); if (code) { @@ -1556,7 +1557,7 @@ xfs_release( if ((error = xfs_inactive_free_eofblocks(mp, ip))) return error; /* Update linux inode block count after free above */ - LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, + vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); } } @@ -1637,7 +1638,7 @@ xfs_inactive( if ((error = xfs_inactive_free_eofblocks(mp, ip))) return VN_INACTIVE_CACHE; /* Update linux inode block count after free above */ - LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, + vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); } goto out; @@ -3186,7 +3187,7 @@ xfs_rmdir( /* Fall through to std_return with error = 0 or the errno * from xfs_trans_commit. */ -std_return: + std_return: if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) { (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dir_vp, DM_RIGHT_NULL, @@ -3196,12 +3197,12 @@ std_return: } return error; -error1: + error1: xfs_bmap_cancel(&free_list); cancel_flags |= XFS_TRANS_ABORT; /* FALLTHROUGH */ -error_return: + error_return: xfs_trans_cancel(tp, cancel_flags); goto std_return; } @@ -4310,8 +4311,10 @@ xfs_free_file_space( ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1); if (attr_flags & ATTR_NOLOCK) need_iolock = 0; - if (need_iolock) + if (need_iolock) { xfs_ilock(ip, XFS_IOLOCK_EXCL); + vn_iowait(vp); /* wait for the completion of any pending DIOs */ + } rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog), (__uint8_t)NBPP); |