summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2009-12-17 21:24:23 -0500
committerEric Paris <eparis@redhat.com>2010-07-28 09:58:52 -0400
commit2823e04de4f1a49087b58ff2bb8f61361ffd9321 (patch)
tree0467ddf513cfb9ec76f3fe498bdc9b5084008c84 /kernel
parent3a9fb89f4cd04c23e16397befba92efb5d989b74 (diff)
downloadlinux-2823e04de4f1a49087b58ff2bb8f61361ffd9321.tar.gz
linux-2823e04de4f1a49087b58ff2bb8f61361ffd9321.tar.bz2
linux-2823e04de4f1a49087b58ff2bb8f61361ffd9321.zip
fsnotify: put inode specific fields in an fsnotify_mark in a union
The addition of marks on vfs mounts will be simplified if the inode specific parts of a mark and the vfsmnt specific parts of a mark are actually in a union so naming can be easy. This patch just implements the inode struct and the union. Signed-off-by: Eric Paris <eparis@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index ecf0bf260d09..c21b05d25224 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -179,9 +179,9 @@ static void insert_hash(struct audit_chunk *chunk)
struct fsnotify_mark_entry *entry = &chunk->mark;
struct list_head *list;
- if (!entry->inode)
+ if (!entry->i.inode)
return;
- list = chunk_hash(entry->inode);
+ list = chunk_hash(entry->i.inode);
list_add_rcu(&chunk->hash, list);
}
@@ -193,7 +193,7 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
list_for_each_entry_rcu(p, list, hash) {
/* mark.inode may have gone NULL, but who cares? */
- if (p->mark.inode == inode) {
+ if (p->mark.i.inode == inode) {
atomic_long_inc(&p->refs);
return p;
}
@@ -233,7 +233,7 @@ static void untag_chunk(struct node *p)
spin_unlock(&hash_lock);
spin_lock(&entry->lock);
- if (chunk->dead || !entry->inode) {
+ if (chunk->dead || !entry->i.inode) {
spin_unlock(&entry->lock);
goto out;
}
@@ -259,7 +259,7 @@ static void untag_chunk(struct node *p)
if (!new)
goto Fallback;
fsnotify_duplicate_mark(&new->mark, entry);
- if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, 1)) {
+ if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, 1)) {
free_chunk(new);
goto Fallback;
}
@@ -388,7 +388,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk_entry = &chunk->mark;
spin_lock(&old_entry->lock);
- if (!old_entry->inode) {
+ if (!old_entry->i.inode) {
/* old_entry is being shot, lets just lie */
spin_unlock(&old_entry->lock);
fsnotify_put_mark(old_entry);
@@ -397,7 +397,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
}
fsnotify_duplicate_mark(chunk_entry, old_entry);
- if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, 1)) {
+ if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, 1)) {
spin_unlock(&old_entry->lock);
free_chunk(chunk);
fsnotify_put_mark(old_entry);
@@ -605,7 +605,7 @@ void audit_trim_trees(void)
list_for_each_entry(node, &tree->chunks, list) {
struct audit_chunk *chunk = find_chunk(node);
/* this could be NULL if the watch is dieing else where... */
- struct inode *inode = chunk->mark.inode;
+ struct inode *inode = chunk->mark.i.inode;
node->index |= 1U<<31;
if (iterate_mounts(compare_root, inode, root_mnt))
node->index &= ~(1U<<31);