1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* descriptor table internals; you almost certainly want file.h instead.
*/
#ifndef __LINUX_FDTABLE_H
#define __LINUX_FDTABLE_H
#include <linux/posix_types.h>
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/nospec.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/atomic.h>
/*
* The default fd array needs to be at least BITS_PER_LONG,
* as this is the granularity returned by copy_fdset().
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
#define NR_OPEN_MAX ~0U
struct fdtable {
unsigned int max_fds;
struct file __rcu **fd; /* current fd array */
unsigned long *close_on_exec;
unsigned long *open_fds;
unsigned long *full_fds_bits;
struct rcu_head rcu;
};
static inline bool close_on_exec(unsigned int fd, const struct fdtable *fdt)
{
return test_bit(fd, fdt->close_on_exec);
}
static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
{
return test_bit(fd, fdt->open_fds);
}
/*
* Open file table structure
*/
struct files_struct {
/*
* read mostly part
*/
atomic_t count;
bool resize_in_progress;
wait_queue_head_t resize_wait;
struct fdtable __rcu *fdt;
struct fdtable fdtab;
/*
* written part on a separate cache line in SMP
*/
spinlock_t file_lock ____cacheline_aligned_in_smp;
unsigned int next_fd;
unsigned long close_on_exec_init[1];
unsigned long open_fds_init[1];
unsigned long full_fds_bits_init[1];
struct file __rcu * fd_array[NR_OPEN_DEFAULT];
};
struct file_operations;
struct vfsmount;
struct dentry;
#define rcu_dereference_check_fdtable(files, fdtfd) \
rcu_dereference_check((fdtfd), lockdep_is_held(&(files)->file_lock))
#define files_fdtable(files) \
rcu_dereference_check_fdtable((files), (files)->fdt)
/*
* The caller must ensure that fd table isn't shared or hold rcu or file lock
*/
static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
unsigned long mask = array_index_mask_nospec(fd, fdt->max_fds);
struct file *needs_masking;
/*
* 'mask' is zero for an out-of-bounds fd, all ones for ok.
* 'fd&mask' is 'fd' for ok, or 0 for out of bounds.
*
* Accessing fdt->fd[0] is ok, but needs masking of the result.
*/
needs_masking = rcu_dereference_raw(fdt->fd[fd&mask]);
return (struct file *)(mask & (unsigned long)needs_masking);
}
static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
{
RCU_LOCKDEP_WARN(!lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
return files_lookup_fd_raw(files, fd);
}
struct file *lookup_fdget_rcu(unsigned int fd);
struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd);
struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd);
struct task_struct;
void put_files_struct(struct files_struct *fs);
int unshare_files(void);
struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
const void *);
extern int close_fd(unsigned int fd);
extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
extern struct file *file_close_fd(unsigned int fd);
extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
struct files_struct **new_fdp);
extern struct kmem_cache *files_cachep;
#endif /* __LINUX_FDTABLE_H */
|