mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs pile 1 from Al Viro: "Unfortunately, this merge window it'll have a be a lot of small piles - my fault, actually, for not keeping #for-next in anything that would resemble a sane shape ;-/ This pile: assorted fixes (the first 3 are -stable fodder, IMO) and cleanups + %pd/%pD formats (dentry/file pathname, up to 4 last components) + several long-standing patches from various folks. There definitely will be a lot more (starting with Miklos' check_submount_and_drop() series)" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (26 commits) direct-io: Handle O_(D)SYNC AIO direct-io: Implement generic deferred AIO completions add formats for dentry/file pathnames kvm eventfd: switch to fdget powerpc kvm: use fdget switch fchmod() to fdget switch epoll_ctl() to fdget switch copy_module_from_fd() to fdget git simplify nilfs check for busy subtree ibmasmfs: don't bother passing superblock when not needed don't pass superblock to hypfs_{mkdir,create*} don't pass superblock to hypfs_diag_create_files don't pass superblock to hypfs_vm_create_files() oprofile: get rid of pointless forward declarations of struct super_block oprofilefs_create_...() do not need superblock argument oprofilefs_mkdir() doesn't need superblock argument don't bother with passing superblock to oprofile_create_stats_files() oprofile: don't bother with passing superblock to ->create_files() don't bother passing sb to oprofile_create_files() coh901318: don't open-code simple_read_from_buffer() ...
This commit is contained in:
commit
45d9a2220f
52 changed files with 737 additions and 501 deletions
|
@ -168,6 +168,15 @@ UUID/GUID addresses:
|
||||||
Where no additional specifiers are used the default little endian
|
Where no additional specifiers are used the default little endian
|
||||||
order with lower case hex characters will be printed.
|
order with lower case hex characters will be printed.
|
||||||
|
|
||||||
|
dentry names:
|
||||||
|
%pd{,2,3,4}
|
||||||
|
%pD{,2,3,4}
|
||||||
|
|
||||||
|
For printing dentry name; if we race with d_move(), the name might be
|
||||||
|
a mix of old and new ones, but it won't oops. %pd dentry is a safer
|
||||||
|
equivalent of %s dentry->d_name.name we used to use, %pd<n> prints
|
||||||
|
n last components. %pD does the same thing for struct file.
|
||||||
|
|
||||||
struct va_format:
|
struct va_format:
|
||||||
|
|
||||||
%pV
|
%pV
|
||||||
|
|
|
@ -106,7 +106,7 @@ op_axp_stop(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
op_axp_create_files(struct super_block *sb, struct dentry *root)
|
op_axp_create_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -115,23 +115,23 @@ op_axp_create_files(struct super_block *sb, struct dentry *root)
|
||||||
char buf[4];
|
char buf[4];
|
||||||
|
|
||||||
snprintf(buf, sizeof buf, "%d", i);
|
snprintf(buf, sizeof buf, "%d", i);
|
||||||
dir = oprofilefs_mkdir(sb, root, buf);
|
dir = oprofilefs_mkdir(root, buf);
|
||||||
|
|
||||||
oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
|
oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
|
oprofilefs_create_ulong(dir, "event", &ctr[i].event);
|
||||||
oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
|
oprofilefs_create_ulong(dir, "count", &ctr[i].count);
|
||||||
/* Dummies. */
|
/* Dummies. */
|
||||||
oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
|
oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
|
||||||
oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
|
oprofilefs_create_ulong(dir, "user", &ctr[i].user);
|
||||||
oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
|
oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (model->can_set_proc_mode) {
|
if (model->can_set_proc_mode) {
|
||||||
oprofilefs_create_ulong(sb, root, "enable_pal",
|
oprofilefs_create_ulong(root, "enable_pal",
|
||||||
&sys.enable_pal);
|
&sys.enable_pal);
|
||||||
oprofilefs_create_ulong(sb, root, "enable_kernel",
|
oprofilefs_create_ulong(root, "enable_kernel",
|
||||||
&sys.enable_kernel);
|
&sys.enable_kernel);
|
||||||
oprofilefs_create_ulong(sb, root, "enable_user",
|
oprofilefs_create_ulong(root, "enable_user",
|
||||||
&sys.enable_user);
|
&sys.enable_user);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -97,8 +97,7 @@ static irqreturn_t avr32_perf_counter_interrupt(int irq, void *dev_id)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int avr32_perf_counter_create_files(struct super_block *sb,
|
static int avr32_perf_counter_create_files(struct dentry *root)
|
||||||
struct dentry *root)
|
|
||||||
{
|
{
|
||||||
struct dentry *dir;
|
struct dentry *dir;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
@ -106,21 +105,21 @@ static int avr32_perf_counter_create_files(struct super_block *sb,
|
||||||
|
|
||||||
for (i = 0; i < NR_counter; i++) {
|
for (i = 0; i < NR_counter; i++) {
|
||||||
snprintf(filename, sizeof(filename), "%u", i);
|
snprintf(filename, sizeof(filename), "%u", i);
|
||||||
dir = oprofilefs_mkdir(sb, root, filename);
|
dir = oprofilefs_mkdir(root, filename);
|
||||||
|
|
||||||
oprofilefs_create_ulong(sb, dir, "enabled",
|
oprofilefs_create_ulong(dir, "enabled",
|
||||||
&counter[i].enabled);
|
&counter[i].enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "event",
|
oprofilefs_create_ulong(dir, "event",
|
||||||
&counter[i].event);
|
&counter[i].event);
|
||||||
oprofilefs_create_ulong(sb, dir, "count",
|
oprofilefs_create_ulong(dir, "count",
|
||||||
&counter[i].count);
|
&counter[i].count);
|
||||||
|
|
||||||
/* Dummy entries */
|
/* Dummy entries */
|
||||||
oprofilefs_create_ulong(sb, dir, "kernel",
|
oprofilefs_create_ulong(dir, "kernel",
|
||||||
&counter[i].kernel);
|
&counter[i].kernel);
|
||||||
oprofilefs_create_ulong(sb, dir, "user",
|
oprofilefs_create_ulong(dir, "user",
|
||||||
&counter[i].user);
|
&counter[i].user);
|
||||||
oprofilefs_create_ulong(sb, dir, "unit_mask",
|
oprofilefs_create_ulong(dir, "unit_mask",
|
||||||
&counter[i].unit_mask);
|
&counter[i].unit_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ static int op_mips_setup(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int op_mips_create_files(struct super_block *sb, struct dentry *root)
|
static int op_mips_create_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -42,16 +42,16 @@ static int op_mips_create_files(struct super_block *sb, struct dentry *root)
|
||||||
char buf[4];
|
char buf[4];
|
||||||
|
|
||||||
snprintf(buf, sizeof buf, "%d", i);
|
snprintf(buf, sizeof buf, "%d", i);
|
||||||
dir = oprofilefs_mkdir(sb, root, buf);
|
dir = oprofilefs_mkdir(root, buf);
|
||||||
|
|
||||||
oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
|
oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
|
oprofilefs_create_ulong(dir, "event", &ctr[i].event);
|
||||||
oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
|
oprofilefs_create_ulong(dir, "count", &ctr[i].count);
|
||||||
oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
|
oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
|
||||||
oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
|
oprofilefs_create_ulong(dir, "user", &ctr[i].user);
|
||||||
oprofilefs_create_ulong(sb, dir, "exl", &ctr[i].exl);
|
oprofilefs_create_ulong(dir, "exl", &ctr[i].exl);
|
||||||
/* Dummy. */
|
/* Dummy. */
|
||||||
oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
|
oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -825,39 +825,39 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_KVM_MPIC
|
#ifdef CONFIG_KVM_MPIC
|
||||||
case KVM_CAP_IRQ_MPIC: {
|
case KVM_CAP_IRQ_MPIC: {
|
||||||
struct file *filp;
|
struct fd f;
|
||||||
struct kvm_device *dev;
|
struct kvm_device *dev;
|
||||||
|
|
||||||
r = -EBADF;
|
r = -EBADF;
|
||||||
filp = fget(cap->args[0]);
|
f = fdget(cap->args[0]);
|
||||||
if (!filp)
|
if (!f.file)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
r = -EPERM;
|
r = -EPERM;
|
||||||
dev = kvm_device_from_filp(filp);
|
dev = kvm_device_from_filp(f.file);
|
||||||
if (dev)
|
if (dev)
|
||||||
r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
|
r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
|
||||||
|
|
||||||
fput(filp);
|
fdput(f);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
case KVM_CAP_IRQ_XICS: {
|
case KVM_CAP_IRQ_XICS: {
|
||||||
struct file *filp;
|
struct fd f;
|
||||||
struct kvm_device *dev;
|
struct kvm_device *dev;
|
||||||
|
|
||||||
r = -EBADF;
|
r = -EBADF;
|
||||||
filp = fget(cap->args[0]);
|
f = fdget(cap->args[0]);
|
||||||
if (!filp)
|
if (!f.file)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
r = -EPERM;
|
r = -EPERM;
|
||||||
dev = kvm_device_from_filp(filp);
|
dev = kvm_device_from_filp(f.file);
|
||||||
if (dev)
|
if (dev)
|
||||||
r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
|
r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
|
||||||
|
|
||||||
fput(filp);
|
fdput(f);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_KVM_XICS */
|
#endif /* CONFIG_KVM_XICS */
|
||||||
|
|
|
@ -119,7 +119,7 @@ static void op_powerpc_stop(void)
|
||||||
model->global_stop();
|
model->global_stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
|
static int op_powerpc_create_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -128,9 +128,9 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
|
||||||
* There is one mmcr0, mmcr1 and mmcra for setting the events for
|
* There is one mmcr0, mmcr1 and mmcra for setting the events for
|
||||||
* all of the counters.
|
* all of the counters.
|
||||||
*/
|
*/
|
||||||
oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
|
oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
|
||||||
oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
|
oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
|
||||||
oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
|
oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
|
||||||
#ifdef CONFIG_OPROFILE_CELL
|
#ifdef CONFIG_OPROFILE_CELL
|
||||||
/* create a file the user tool can check to see what level of profiling
|
/* create a file the user tool can check to see what level of profiling
|
||||||
* support exits with this kernel. Initialize bit mask to indicate
|
* support exits with this kernel. Initialize bit mask to indicate
|
||||||
|
@ -142,7 +142,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
|
||||||
* If the file does not exist, then the kernel only supports SPU
|
* If the file does not exist, then the kernel only supports SPU
|
||||||
* cycle profiling, PPU event and cycle profiling.
|
* cycle profiling, PPU event and cycle profiling.
|
||||||
*/
|
*/
|
||||||
oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support);
|
oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
|
||||||
sys.cell_support = 0x1; /* Note, the user OProfile tool must check
|
sys.cell_support = 0x1; /* Note, the user OProfile tool must check
|
||||||
* that this bit is set before attempting to
|
* that this bit is set before attempting to
|
||||||
* user SPU event profiling. Older kernels
|
* user SPU event profiling. Older kernels
|
||||||
|
@ -160,11 +160,11 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
|
||||||
char buf[4];
|
char buf[4];
|
||||||
|
|
||||||
snprintf(buf, sizeof buf, "%d", i);
|
snprintf(buf, sizeof buf, "%d", i);
|
||||||
dir = oprofilefs_mkdir(sb, root, buf);
|
dir = oprofilefs_mkdir(root, buf);
|
||||||
|
|
||||||
oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
|
oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
|
oprofilefs_create_ulong(dir, "event", &ctr[i].event);
|
||||||
oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
|
oprofilefs_create_ulong(dir, "count", &ctr[i].count);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Classic PowerPC doesn't support per-counter
|
* Classic PowerPC doesn't support per-counter
|
||||||
|
@ -173,14 +173,14 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
|
||||||
* Book-E style performance monitors, we do
|
* Book-E style performance monitors, we do
|
||||||
* support them.
|
* support them.
|
||||||
*/
|
*/
|
||||||
oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
|
oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
|
||||||
oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
|
oprofilefs_create_ulong(dir, "user", &ctr[i].user);
|
||||||
|
|
||||||
oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
|
oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
|
oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
|
||||||
oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
|
oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
|
||||||
|
|
||||||
/* Default to tracing both kernel and user */
|
/* Default to tracing both kernel and user */
|
||||||
sys.enable_kernel = 1;
|
sys.enable_kernel = 1;
|
||||||
|
|
|
@ -18,26 +18,23 @@
|
||||||
#define UPDATE_FILE_MODE 0220
|
#define UPDATE_FILE_MODE 0220
|
||||||
#define DIR_MODE 0550
|
#define DIR_MODE 0550
|
||||||
|
|
||||||
extern struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent,
|
extern struct dentry *hypfs_mkdir(struct dentry *parent, const char *name);
|
||||||
const char *name);
|
|
||||||
|
|
||||||
extern struct dentry *hypfs_create_u64(struct super_block *sb,
|
extern struct dentry *hypfs_create_u64(struct dentry *dir, const char *name,
|
||||||
struct dentry *dir, const char *name,
|
|
||||||
__u64 value);
|
__u64 value);
|
||||||
|
|
||||||
extern struct dentry *hypfs_create_str(struct super_block *sb,
|
extern struct dentry *hypfs_create_str(struct dentry *dir, const char *name,
|
||||||
struct dentry *dir, const char *name,
|
|
||||||
char *string);
|
char *string);
|
||||||
|
|
||||||
/* LPAR Hypervisor */
|
/* LPAR Hypervisor */
|
||||||
extern int hypfs_diag_init(void);
|
extern int hypfs_diag_init(void);
|
||||||
extern void hypfs_diag_exit(void);
|
extern void hypfs_diag_exit(void);
|
||||||
extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
|
extern int hypfs_diag_create_files(struct dentry *root);
|
||||||
|
|
||||||
/* VM Hypervisor */
|
/* VM Hypervisor */
|
||||||
extern int hypfs_vm_init(void);
|
extern int hypfs_vm_init(void);
|
||||||
extern void hypfs_vm_exit(void);
|
extern void hypfs_vm_exit(void);
|
||||||
extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
|
extern int hypfs_vm_create_files(struct dentry *root);
|
||||||
|
|
||||||
/* debugfs interface */
|
/* debugfs interface */
|
||||||
struct hypfs_dbfs_file;
|
struct hypfs_dbfs_file;
|
||||||
|
|
|
@ -623,8 +623,7 @@ void hypfs_diag_exit(void)
|
||||||
* *******************************************
|
* *******************************************
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int hypfs_create_cpu_files(struct super_block *sb,
|
static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
|
||||||
struct dentry *cpus_dir, void *cpu_info)
|
|
||||||
{
|
{
|
||||||
struct dentry *cpu_dir;
|
struct dentry *cpu_dir;
|
||||||
char buffer[TMP_SIZE];
|
char buffer[TMP_SIZE];
|
||||||
|
@ -632,30 +631,29 @@ static int hypfs_create_cpu_files(struct super_block *sb,
|
||||||
|
|
||||||
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type,
|
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type,
|
||||||
cpu_info));
|
cpu_info));
|
||||||
cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer);
|
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
|
||||||
rc = hypfs_create_u64(sb, cpu_dir, "mgmtime",
|
rc = hypfs_create_u64(cpu_dir, "mgmtime",
|
||||||
cpu_info__acc_time(diag204_info_type, cpu_info) -
|
cpu_info__acc_time(diag204_info_type, cpu_info) -
|
||||||
cpu_info__lp_time(diag204_info_type, cpu_info));
|
cpu_info__lp_time(diag204_info_type, cpu_info));
|
||||||
if (IS_ERR(rc))
|
if (IS_ERR(rc))
|
||||||
return PTR_ERR(rc);
|
return PTR_ERR(rc);
|
||||||
rc = hypfs_create_u64(sb, cpu_dir, "cputime",
|
rc = hypfs_create_u64(cpu_dir, "cputime",
|
||||||
cpu_info__lp_time(diag204_info_type, cpu_info));
|
cpu_info__lp_time(diag204_info_type, cpu_info));
|
||||||
if (IS_ERR(rc))
|
if (IS_ERR(rc))
|
||||||
return PTR_ERR(rc);
|
return PTR_ERR(rc);
|
||||||
if (diag204_info_type == INFO_EXT) {
|
if (diag204_info_type == INFO_EXT) {
|
||||||
rc = hypfs_create_u64(sb, cpu_dir, "onlinetime",
|
rc = hypfs_create_u64(cpu_dir, "onlinetime",
|
||||||
cpu_info__online_time(diag204_info_type,
|
cpu_info__online_time(diag204_info_type,
|
||||||
cpu_info));
|
cpu_info));
|
||||||
if (IS_ERR(rc))
|
if (IS_ERR(rc))
|
||||||
return PTR_ERR(rc);
|
return PTR_ERR(rc);
|
||||||
}
|
}
|
||||||
diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
|
diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
|
||||||
rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
|
rc = hypfs_create_str(cpu_dir, "type", buffer);
|
||||||
return PTR_RET(rc);
|
return PTR_RET(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *hypfs_create_lpar_files(struct super_block *sb,
|
static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
|
||||||
struct dentry *systems_dir, void *part_hdr)
|
|
||||||
{
|
{
|
||||||
struct dentry *cpus_dir;
|
struct dentry *cpus_dir;
|
||||||
struct dentry *lpar_dir;
|
struct dentry *lpar_dir;
|
||||||
|
@ -665,16 +663,16 @@ static void *hypfs_create_lpar_files(struct super_block *sb,
|
||||||
|
|
||||||
part_hdr__part_name(diag204_info_type, part_hdr, lpar_name);
|
part_hdr__part_name(diag204_info_type, part_hdr, lpar_name);
|
||||||
lpar_name[LPAR_NAME_LEN] = 0;
|
lpar_name[LPAR_NAME_LEN] = 0;
|
||||||
lpar_dir = hypfs_mkdir(sb, systems_dir, lpar_name);
|
lpar_dir = hypfs_mkdir(systems_dir, lpar_name);
|
||||||
if (IS_ERR(lpar_dir))
|
if (IS_ERR(lpar_dir))
|
||||||
return lpar_dir;
|
return lpar_dir;
|
||||||
cpus_dir = hypfs_mkdir(sb, lpar_dir, "cpus");
|
cpus_dir = hypfs_mkdir(lpar_dir, "cpus");
|
||||||
if (IS_ERR(cpus_dir))
|
if (IS_ERR(cpus_dir))
|
||||||
return cpus_dir;
|
return cpus_dir;
|
||||||
cpu_info = part_hdr + part_hdr__size(diag204_info_type);
|
cpu_info = part_hdr + part_hdr__size(diag204_info_type);
|
||||||
for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) {
|
for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) {
|
||||||
int rc;
|
int rc;
|
||||||
rc = hypfs_create_cpu_files(sb, cpus_dir, cpu_info);
|
rc = hypfs_create_cpu_files(cpus_dir, cpu_info);
|
||||||
if (rc)
|
if (rc)
|
||||||
return ERR_PTR(rc);
|
return ERR_PTR(rc);
|
||||||
cpu_info += cpu_info__size(diag204_info_type);
|
cpu_info += cpu_info__size(diag204_info_type);
|
||||||
|
@ -682,8 +680,7 @@ static void *hypfs_create_lpar_files(struct super_block *sb,
|
||||||
return cpu_info;
|
return cpu_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hypfs_create_phys_cpu_files(struct super_block *sb,
|
static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
|
||||||
struct dentry *cpus_dir, void *cpu_info)
|
|
||||||
{
|
{
|
||||||
struct dentry *cpu_dir;
|
struct dentry *cpu_dir;
|
||||||
char buffer[TMP_SIZE];
|
char buffer[TMP_SIZE];
|
||||||
|
@ -691,32 +688,31 @@ static int hypfs_create_phys_cpu_files(struct super_block *sb,
|
||||||
|
|
||||||
snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type,
|
snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type,
|
||||||
cpu_info));
|
cpu_info));
|
||||||
cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer);
|
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
|
||||||
if (IS_ERR(cpu_dir))
|
if (IS_ERR(cpu_dir))
|
||||||
return PTR_ERR(cpu_dir);
|
return PTR_ERR(cpu_dir);
|
||||||
rc = hypfs_create_u64(sb, cpu_dir, "mgmtime",
|
rc = hypfs_create_u64(cpu_dir, "mgmtime",
|
||||||
phys_cpu__mgm_time(diag204_info_type, cpu_info));
|
phys_cpu__mgm_time(diag204_info_type, cpu_info));
|
||||||
if (IS_ERR(rc))
|
if (IS_ERR(rc))
|
||||||
return PTR_ERR(rc);
|
return PTR_ERR(rc);
|
||||||
diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
|
diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
|
||||||
rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
|
rc = hypfs_create_str(cpu_dir, "type", buffer);
|
||||||
return PTR_RET(rc);
|
return PTR_RET(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *hypfs_create_phys_files(struct super_block *sb,
|
static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
|
||||||
struct dentry *parent_dir, void *phys_hdr)
|
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
void *cpu_info;
|
void *cpu_info;
|
||||||
struct dentry *cpus_dir;
|
struct dentry *cpus_dir;
|
||||||
|
|
||||||
cpus_dir = hypfs_mkdir(sb, parent_dir, "cpus");
|
cpus_dir = hypfs_mkdir(parent_dir, "cpus");
|
||||||
if (IS_ERR(cpus_dir))
|
if (IS_ERR(cpus_dir))
|
||||||
return cpus_dir;
|
return cpus_dir;
|
||||||
cpu_info = phys_hdr + phys_hdr__size(diag204_info_type);
|
cpu_info = phys_hdr + phys_hdr__size(diag204_info_type);
|
||||||
for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) {
|
for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) {
|
||||||
int rc;
|
int rc;
|
||||||
rc = hypfs_create_phys_cpu_files(sb, cpus_dir, cpu_info);
|
rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info);
|
||||||
if (rc)
|
if (rc)
|
||||||
return ERR_PTR(rc);
|
return ERR_PTR(rc);
|
||||||
cpu_info += phys_cpu__size(diag204_info_type);
|
cpu_info += phys_cpu__size(diag204_info_type);
|
||||||
|
@ -724,7 +720,7 @@ static void *hypfs_create_phys_files(struct super_block *sb,
|
||||||
return cpu_info;
|
return cpu_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
|
int hypfs_diag_create_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
struct dentry *systems_dir, *hyp_dir;
|
struct dentry *systems_dir, *hyp_dir;
|
||||||
void *time_hdr, *part_hdr;
|
void *time_hdr, *part_hdr;
|
||||||
|
@ -735,7 +731,7 @@ int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
|
||||||
if (IS_ERR(buffer))
|
if (IS_ERR(buffer))
|
||||||
return PTR_ERR(buffer);
|
return PTR_ERR(buffer);
|
||||||
|
|
||||||
systems_dir = hypfs_mkdir(sb, root, "systems");
|
systems_dir = hypfs_mkdir(root, "systems");
|
||||||
if (IS_ERR(systems_dir)) {
|
if (IS_ERR(systems_dir)) {
|
||||||
rc = PTR_ERR(systems_dir);
|
rc = PTR_ERR(systems_dir);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
@ -743,25 +739,25 @@ int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
|
||||||
time_hdr = (struct x_info_blk_hdr *)buffer;
|
time_hdr = (struct x_info_blk_hdr *)buffer;
|
||||||
part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type);
|
part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type);
|
||||||
for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) {
|
for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) {
|
||||||
part_hdr = hypfs_create_lpar_files(sb, systems_dir, part_hdr);
|
part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr);
|
||||||
if (IS_ERR(part_hdr)) {
|
if (IS_ERR(part_hdr)) {
|
||||||
rc = PTR_ERR(part_hdr);
|
rc = PTR_ERR(part_hdr);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (info_blk_hdr__flags(diag204_info_type, time_hdr) & LPAR_PHYS_FLG) {
|
if (info_blk_hdr__flags(diag204_info_type, time_hdr) & LPAR_PHYS_FLG) {
|
||||||
ptr = hypfs_create_phys_files(sb, root, part_hdr);
|
ptr = hypfs_create_phys_files(root, part_hdr);
|
||||||
if (IS_ERR(ptr)) {
|
if (IS_ERR(ptr)) {
|
||||||
rc = PTR_ERR(ptr);
|
rc = PTR_ERR(ptr);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
hyp_dir = hypfs_mkdir(sb, root, "hyp");
|
hyp_dir = hypfs_mkdir(root, "hyp");
|
||||||
if (IS_ERR(hyp_dir)) {
|
if (IS_ERR(hyp_dir)) {
|
||||||
rc = PTR_ERR(hyp_dir);
|
rc = PTR_ERR(hyp_dir);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
ptr = hypfs_create_str(sb, hyp_dir, "type", "LPAR Hypervisor");
|
ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor");
|
||||||
if (IS_ERR(ptr)) {
|
if (IS_ERR(ptr)) {
|
||||||
rc = PTR_ERR(ptr);
|
rc = PTR_ERR(ptr);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
|
@ -107,16 +107,15 @@ static void diag2fc_free(const void *data)
|
||||||
vfree(data);
|
vfree(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ATTRIBUTE(sb, dir, name, member) \
|
#define ATTRIBUTE(dir, name, member) \
|
||||||
do { \
|
do { \
|
||||||
void *rc; \
|
void *rc; \
|
||||||
rc = hypfs_create_u64(sb, dir, name, member); \
|
rc = hypfs_create_u64(dir, name, member); \
|
||||||
if (IS_ERR(rc)) \
|
if (IS_ERR(rc)) \
|
||||||
return PTR_ERR(rc); \
|
return PTR_ERR(rc); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
static int hpyfs_vm_create_guest(struct super_block *sb,
|
static int hpyfs_vm_create_guest(struct dentry *systems_dir,
|
||||||
struct dentry *systems_dir,
|
|
||||||
struct diag2fc_data *data)
|
struct diag2fc_data *data)
|
||||||
{
|
{
|
||||||
char guest_name[NAME_LEN + 1] = {};
|
char guest_name[NAME_LEN + 1] = {};
|
||||||
|
@ -130,46 +129,46 @@ static int hpyfs_vm_create_guest(struct super_block *sb,
|
||||||
memcpy(guest_name, data->guest_name, NAME_LEN);
|
memcpy(guest_name, data->guest_name, NAME_LEN);
|
||||||
EBCASC(guest_name, NAME_LEN);
|
EBCASC(guest_name, NAME_LEN);
|
||||||
strim(guest_name);
|
strim(guest_name);
|
||||||
guest_dir = hypfs_mkdir(sb, systems_dir, guest_name);
|
guest_dir = hypfs_mkdir(systems_dir, guest_name);
|
||||||
if (IS_ERR(guest_dir))
|
if (IS_ERR(guest_dir))
|
||||||
return PTR_ERR(guest_dir);
|
return PTR_ERR(guest_dir);
|
||||||
ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time);
|
ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time);
|
||||||
|
|
||||||
/* logical cpu information */
|
/* logical cpu information */
|
||||||
cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus");
|
cpus_dir = hypfs_mkdir(guest_dir, "cpus");
|
||||||
if (IS_ERR(cpus_dir))
|
if (IS_ERR(cpus_dir))
|
||||||
return PTR_ERR(cpus_dir);
|
return PTR_ERR(cpus_dir);
|
||||||
ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu);
|
ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu);
|
||||||
ATTRIBUTE(sb, cpus_dir, "capped", capped_value);
|
ATTRIBUTE(cpus_dir, "capped", capped_value);
|
||||||
ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag);
|
ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag);
|
||||||
ATTRIBUTE(sb, cpus_dir, "count", data->vcpus);
|
ATTRIBUTE(cpus_dir, "count", data->vcpus);
|
||||||
ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min);
|
ATTRIBUTE(cpus_dir, "weight_min", data->cpu_min);
|
||||||
ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max);
|
ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max);
|
||||||
ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares);
|
ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares);
|
||||||
|
|
||||||
/* memory information */
|
/* memory information */
|
||||||
mem_dir = hypfs_mkdir(sb, guest_dir, "mem");
|
mem_dir = hypfs_mkdir(guest_dir, "mem");
|
||||||
if (IS_ERR(mem_dir))
|
if (IS_ERR(mem_dir))
|
||||||
return PTR_ERR(mem_dir);
|
return PTR_ERR(mem_dir);
|
||||||
ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb);
|
ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb);
|
||||||
ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb);
|
ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb);
|
||||||
ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb);
|
ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb);
|
||||||
ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb);
|
ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb);
|
||||||
|
|
||||||
/* samples */
|
/* samples */
|
||||||
samples_dir = hypfs_mkdir(sb, guest_dir, "samples");
|
samples_dir = hypfs_mkdir(guest_dir, "samples");
|
||||||
if (IS_ERR(samples_dir))
|
if (IS_ERR(samples_dir))
|
||||||
return PTR_ERR(samples_dir);
|
return PTR_ERR(samples_dir);
|
||||||
ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp);
|
ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp);
|
||||||
ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp);
|
ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp);
|
||||||
ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp);
|
ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp);
|
||||||
ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp);
|
ATTRIBUTE(samples_dir, "idle", data->idle_samp);
|
||||||
ATTRIBUTE(sb, samples_dir, "other", data->other_samp);
|
ATTRIBUTE(samples_dir, "other", data->other_samp);
|
||||||
ATTRIBUTE(sb, samples_dir, "total", data->total_samp);
|
ATTRIBUTE(samples_dir, "total", data->total_samp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
|
int hypfs_vm_create_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
struct dentry *dir, *file;
|
struct dentry *dir, *file;
|
||||||
struct diag2fc_data *data;
|
struct diag2fc_data *data;
|
||||||
|
@ -181,38 +180,38 @@ int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
|
||||||
return PTR_ERR(data);
|
return PTR_ERR(data);
|
||||||
|
|
||||||
/* Hpervisor Info */
|
/* Hpervisor Info */
|
||||||
dir = hypfs_mkdir(sb, root, "hyp");
|
dir = hypfs_mkdir(root, "hyp");
|
||||||
if (IS_ERR(dir)) {
|
if (IS_ERR(dir)) {
|
||||||
rc = PTR_ERR(dir);
|
rc = PTR_ERR(dir);
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor");
|
file = hypfs_create_str(dir, "type", "z/VM Hypervisor");
|
||||||
if (IS_ERR(file)) {
|
if (IS_ERR(file)) {
|
||||||
rc = PTR_ERR(file);
|
rc = PTR_ERR(file);
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* physical cpus */
|
/* physical cpus */
|
||||||
dir = hypfs_mkdir(sb, root, "cpus");
|
dir = hypfs_mkdir(root, "cpus");
|
||||||
if (IS_ERR(dir)) {
|
if (IS_ERR(dir)) {
|
||||||
rc = PTR_ERR(dir);
|
rc = PTR_ERR(dir);
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
file = hypfs_create_u64(sb, dir, "count", data->lcpus);
|
file = hypfs_create_u64(dir, "count", data->lcpus);
|
||||||
if (IS_ERR(file)) {
|
if (IS_ERR(file)) {
|
||||||
rc = PTR_ERR(file);
|
rc = PTR_ERR(file);
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* guests */
|
/* guests */
|
||||||
dir = hypfs_mkdir(sb, root, "systems");
|
dir = hypfs_mkdir(root, "systems");
|
||||||
if (IS_ERR(dir)) {
|
if (IS_ERR(dir)) {
|
||||||
rc = PTR_ERR(dir);
|
rc = PTR_ERR(dir);
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
rc = hpyfs_vm_create_guest(sb, dir, &(data[i]));
|
rc = hpyfs_vm_create_guest(dir, &(data[i]));
|
||||||
if (rc)
|
if (rc)
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,8 +28,7 @@
|
||||||
#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
|
#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
|
||||||
#define TMP_SIZE 64 /* size of temporary buffers */
|
#define TMP_SIZE 64 /* size of temporary buffers */
|
||||||
|
|
||||||
static struct dentry *hypfs_create_update_file(struct super_block *sb,
|
static struct dentry *hypfs_create_update_file(struct dentry *dir);
|
||||||
struct dentry *dir);
|
|
||||||
|
|
||||||
struct hypfs_sb_info {
|
struct hypfs_sb_info {
|
||||||
kuid_t uid; /* uid used for files and dirs */
|
kuid_t uid; /* uid used for files and dirs */
|
||||||
|
@ -193,9 +192,9 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
}
|
}
|
||||||
hypfs_delete_tree(sb->s_root);
|
hypfs_delete_tree(sb->s_root);
|
||||||
if (MACHINE_IS_VM)
|
if (MACHINE_IS_VM)
|
||||||
rc = hypfs_vm_create_files(sb, sb->s_root);
|
rc = hypfs_vm_create_files(sb->s_root);
|
||||||
else
|
else
|
||||||
rc = hypfs_diag_create_files(sb, sb->s_root);
|
rc = hypfs_diag_create_files(sb->s_root);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
pr_err("Updating the hypfs tree failed\n");
|
pr_err("Updating the hypfs tree failed\n");
|
||||||
hypfs_delete_tree(sb->s_root);
|
hypfs_delete_tree(sb->s_root);
|
||||||
|
@ -302,12 +301,12 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
if (!root_dentry)
|
if (!root_dentry)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (MACHINE_IS_VM)
|
if (MACHINE_IS_VM)
|
||||||
rc = hypfs_vm_create_files(sb, root_dentry);
|
rc = hypfs_vm_create_files(root_dentry);
|
||||||
else
|
else
|
||||||
rc = hypfs_diag_create_files(sb, root_dentry);
|
rc = hypfs_diag_create_files(root_dentry);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
sbi->update_file = hypfs_create_update_file(sb, root_dentry);
|
sbi->update_file = hypfs_create_update_file(root_dentry);
|
||||||
if (IS_ERR(sbi->update_file))
|
if (IS_ERR(sbi->update_file))
|
||||||
return PTR_ERR(sbi->update_file);
|
return PTR_ERR(sbi->update_file);
|
||||||
hypfs_update_update(sb);
|
hypfs_update_update(sb);
|
||||||
|
@ -334,8 +333,7 @@ static void hypfs_kill_super(struct super_block *sb)
|
||||||
kill_litter_super(sb);
|
kill_litter_super(sb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dentry *hypfs_create_file(struct super_block *sb,
|
static struct dentry *hypfs_create_file(struct dentry *parent, const char *name,
|
||||||
struct dentry *parent, const char *name,
|
|
||||||
char *data, umode_t mode)
|
char *data, umode_t mode)
|
||||||
{
|
{
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
|
@ -347,7 +345,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb,
|
||||||
dentry = ERR_PTR(-ENOMEM);
|
dentry = ERR_PTR(-ENOMEM);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
inode = hypfs_make_inode(sb, mode);
|
inode = hypfs_make_inode(parent->d_sb, mode);
|
||||||
if (!inode) {
|
if (!inode) {
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
dentry = ERR_PTR(-ENOMEM);
|
dentry = ERR_PTR(-ENOMEM);
|
||||||
|
@ -373,24 +371,22 @@ fail:
|
||||||
return dentry;
|
return dentry;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent,
|
struct dentry *hypfs_mkdir(struct dentry *parent, const char *name)
|
||||||
const char *name)
|
|
||||||
{
|
{
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
|
|
||||||
dentry = hypfs_create_file(sb, parent, name, NULL, S_IFDIR | DIR_MODE);
|
dentry = hypfs_create_file(parent, name, NULL, S_IFDIR | DIR_MODE);
|
||||||
if (IS_ERR(dentry))
|
if (IS_ERR(dentry))
|
||||||
return dentry;
|
return dentry;
|
||||||
hypfs_add_dentry(dentry);
|
hypfs_add_dentry(dentry);
|
||||||
return dentry;
|
return dentry;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dentry *hypfs_create_update_file(struct super_block *sb,
|
static struct dentry *hypfs_create_update_file(struct dentry *dir)
|
||||||
struct dentry *dir)
|
|
||||||
{
|
{
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
|
|
||||||
dentry = hypfs_create_file(sb, dir, "update", NULL,
|
dentry = hypfs_create_file(dir, "update", NULL,
|
||||||
S_IFREG | UPDATE_FILE_MODE);
|
S_IFREG | UPDATE_FILE_MODE);
|
||||||
/*
|
/*
|
||||||
* We do not put the update file on the 'delete' list with
|
* We do not put the update file on the 'delete' list with
|
||||||
|
@ -400,7 +396,7 @@ static struct dentry *hypfs_create_update_file(struct super_block *sb,
|
||||||
return dentry;
|
return dentry;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
|
struct dentry *hypfs_create_u64(struct dentry *dir,
|
||||||
const char *name, __u64 value)
|
const char *name, __u64 value)
|
||||||
{
|
{
|
||||||
char *buffer;
|
char *buffer;
|
||||||
|
@ -412,7 +408,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
|
||||||
if (!buffer)
|
if (!buffer)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
dentry =
|
dentry =
|
||||||
hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE);
|
hypfs_create_file(dir, name, buffer, S_IFREG | REG_FILE_MODE);
|
||||||
if (IS_ERR(dentry)) {
|
if (IS_ERR(dentry)) {
|
||||||
kfree(buffer);
|
kfree(buffer);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
@ -421,7 +417,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
|
||||||
return dentry;
|
return dentry;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dentry *hypfs_create_str(struct super_block *sb, struct dentry *dir,
|
struct dentry *hypfs_create_str(struct dentry *dir,
|
||||||
const char *name, char *string)
|
const char *name, char *string)
|
||||||
{
|
{
|
||||||
char *buffer;
|
char *buffer;
|
||||||
|
@ -432,7 +428,7 @@ struct dentry *hypfs_create_str(struct super_block *sb, struct dentry *dir,
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
sprintf(buffer, "%s\n", string);
|
sprintf(buffer, "%s\n", string);
|
||||||
dentry =
|
dentry =
|
||||||
hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE);
|
hypfs_create_file(dir, name, buffer, S_IFREG | REG_FILE_MODE);
|
||||||
if (IS_ERR(dentry)) {
|
if (IS_ERR(dentry)) {
|
||||||
kfree(buffer);
|
kfree(buffer);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
|
@ -346,16 +346,15 @@ static const struct file_operations timer_enabled_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static int oprofile_create_hwsampling_files(struct super_block *sb,
|
static int oprofile_create_hwsampling_files(struct dentry *root)
|
||||||
struct dentry *root)
|
|
||||||
{
|
{
|
||||||
struct dentry *dir;
|
struct dentry *dir;
|
||||||
|
|
||||||
dir = oprofilefs_mkdir(sb, root, "timer");
|
dir = oprofilefs_mkdir(root, "timer");
|
||||||
if (!dir)
|
if (!dir)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops);
|
oprofilefs_create_file(dir, "enabled", &timer_enabled_fops);
|
||||||
|
|
||||||
if (!hwsampler_available)
|
if (!hwsampler_available)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -376,17 +375,17 @@ static int oprofile_create_hwsampling_files(struct super_block *sb,
|
||||||
* and can only be set to 0.
|
* and can only be set to 0.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
dir = oprofilefs_mkdir(sb, root, "0");
|
dir = oprofilefs_mkdir(root, "0");
|
||||||
if (!dir)
|
if (!dir)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops);
|
oprofilefs_create_file(dir, "enabled", &hwsampler_fops);
|
||||||
oprofilefs_create_file(sb, dir, "event", &zero_fops);
|
oprofilefs_create_file(dir, "event", &zero_fops);
|
||||||
oprofilefs_create_file(sb, dir, "count", &hw_interval_fops);
|
oprofilefs_create_file(dir, "count", &hw_interval_fops);
|
||||||
oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops);
|
oprofilefs_create_file(dir, "unit_mask", &zero_fops);
|
||||||
oprofilefs_create_file(sb, dir, "kernel", &kernel_fops);
|
oprofilefs_create_file(dir, "kernel", &kernel_fops);
|
||||||
oprofilefs_create_file(sb, dir, "user", &user_fops);
|
oprofilefs_create_file(dir, "user", &user_fops);
|
||||||
oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
|
oprofilefs_create_ulong(dir, "hw_sdbt_blocks",
|
||||||
&oprofile_sdbt_blocks);
|
&oprofile_sdbt_blocks);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -396,19 +395,19 @@ static int oprofile_create_hwsampling_files(struct super_block *sb,
|
||||||
* space tools. The /dev/oprofile/hwsampling fs is
|
* space tools. The /dev/oprofile/hwsampling fs is
|
||||||
* provided in that case.
|
* provided in that case.
|
||||||
*/
|
*/
|
||||||
dir = oprofilefs_mkdir(sb, root, "hwsampling");
|
dir = oprofilefs_mkdir(root, "hwsampling");
|
||||||
if (!dir)
|
if (!dir)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
oprofilefs_create_file(sb, dir, "hwsampler",
|
oprofilefs_create_file(dir, "hwsampler",
|
||||||
&hwsampler_fops);
|
&hwsampler_fops);
|
||||||
oprofilefs_create_file(sb, dir, "hw_interval",
|
oprofilefs_create_file(dir, "hw_interval",
|
||||||
&hw_interval_fops);
|
&hw_interval_fops);
|
||||||
oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval",
|
oprofilefs_create_ro_ulong(dir, "hw_min_interval",
|
||||||
&oprofile_min_interval);
|
&oprofile_min_interval);
|
||||||
oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval",
|
oprofilefs_create_ro_ulong(dir, "hw_max_interval",
|
||||||
&oprofile_max_interval);
|
&oprofile_max_interval);
|
||||||
oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
|
oprofilefs_create_ulong(dir, "hw_sdbt_blocks",
|
||||||
&oprofile_sdbt_blocks);
|
&oprofile_sdbt_blocks);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -403,7 +403,7 @@ static void nmi_cpu_down(void *dummy)
|
||||||
nmi_cpu_shutdown(dummy);
|
nmi_cpu_shutdown(dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nmi_create_files(struct super_block *sb, struct dentry *root)
|
static int nmi_create_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
@ -420,14 +420,14 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
snprintf(buf, sizeof(buf), "%d", i);
|
snprintf(buf, sizeof(buf), "%d", i);
|
||||||
dir = oprofilefs_mkdir(sb, root, buf);
|
dir = oprofilefs_mkdir(root, buf);
|
||||||
oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
|
oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
|
oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
|
||||||
oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
|
oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
|
||||||
oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
|
oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
|
||||||
oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
|
oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
|
||||||
oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
|
oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
|
||||||
oprofilefs_create_ulong(sb, dir, "extra", &counter_config[i].extra);
|
oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -454,16 +454,16 @@ static void init_ibs(void)
|
||||||
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
|
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
|
static int (*create_arch_files)(struct dentry *root);
|
||||||
|
|
||||||
static int setup_ibs_files(struct super_block *sb, struct dentry *root)
|
static int setup_ibs_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
struct dentry *dir;
|
struct dentry *dir;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* architecture specific files */
|
/* architecture specific files */
|
||||||
if (create_arch_files)
|
if (create_arch_files)
|
||||||
ret = create_arch_files(sb, root);
|
ret = create_arch_files(root);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -479,26 +479,26 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
|
||||||
ibs_config.max_cnt_op = 250000;
|
ibs_config.max_cnt_op = 250000;
|
||||||
|
|
||||||
if (ibs_caps & IBS_CAPS_FETCHSAM) {
|
if (ibs_caps & IBS_CAPS_FETCHSAM) {
|
||||||
dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
|
dir = oprofilefs_mkdir(root, "ibs_fetch");
|
||||||
oprofilefs_create_ulong(sb, dir, "enable",
|
oprofilefs_create_ulong(dir, "enable",
|
||||||
&ibs_config.fetch_enabled);
|
&ibs_config.fetch_enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "max_count",
|
oprofilefs_create_ulong(dir, "max_count",
|
||||||
&ibs_config.max_cnt_fetch);
|
&ibs_config.max_cnt_fetch);
|
||||||
oprofilefs_create_ulong(sb, dir, "rand_enable",
|
oprofilefs_create_ulong(dir, "rand_enable",
|
||||||
&ibs_config.rand_en);
|
&ibs_config.rand_en);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ibs_caps & IBS_CAPS_OPSAM) {
|
if (ibs_caps & IBS_CAPS_OPSAM) {
|
||||||
dir = oprofilefs_mkdir(sb, root, "ibs_op");
|
dir = oprofilefs_mkdir(root, "ibs_op");
|
||||||
oprofilefs_create_ulong(sb, dir, "enable",
|
oprofilefs_create_ulong(dir, "enable",
|
||||||
&ibs_config.op_enabled);
|
&ibs_config.op_enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "max_count",
|
oprofilefs_create_ulong(dir, "max_count",
|
||||||
&ibs_config.max_cnt_op);
|
&ibs_config.max_cnt_op);
|
||||||
if (ibs_caps & IBS_CAPS_OPCNT)
|
if (ibs_caps & IBS_CAPS_OPCNT)
|
||||||
oprofilefs_create_ulong(sb, dir, "dispatched_ops",
|
oprofilefs_create_ulong(dir, "dispatched_ops",
|
||||||
&ibs_config.dispatched_ops);
|
&ibs_config.dispatched_ops);
|
||||||
if (ibs_caps & IBS_CAPS_BRNTRGT)
|
if (ibs_caps & IBS_CAPS_BRNTRGT)
|
||||||
oprofilefs_create_ulong(sb, dir, "branch_target",
|
oprofilefs_create_ulong(dir, "branch_target",
|
||||||
&ibs_config.branch_target);
|
&ibs_config.branch_target);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1339,15 +1339,14 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
|
||||||
{
|
{
|
||||||
u64 started_channels = debugfs_dma_base->pm.started_channels;
|
u64 started_channels = debugfs_dma_base->pm.started_channels;
|
||||||
int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
|
int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
|
||||||
int i;
|
|
||||||
int ret = 0;
|
|
||||||
char *dev_buf;
|
char *dev_buf;
|
||||||
char *tmp;
|
char *tmp;
|
||||||
int dev_size;
|
int ret;
|
||||||
|
int i;
|
||||||
|
|
||||||
dev_buf = kmalloc(4*1024, GFP_KERNEL);
|
dev_buf = kmalloc(4*1024, GFP_KERNEL);
|
||||||
if (dev_buf == NULL)
|
if (dev_buf == NULL)
|
||||||
goto err_kmalloc;
|
return -ENOMEM;
|
||||||
tmp = dev_buf;
|
tmp = dev_buf;
|
||||||
|
|
||||||
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
|
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
|
||||||
|
@ -1357,26 +1356,11 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
|
||||||
tmp += sprintf(tmp, "channel %d\n", i);
|
tmp += sprintf(tmp, "channel %d\n", i);
|
||||||
|
|
||||||
tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
|
tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
|
||||||
dev_size = tmp - dev_buf;
|
|
||||||
|
|
||||||
/* No more to read if offset != 0 */
|
ret = simple_read_from_buffer(buf, count, f_pos, dev_buf,
|
||||||
if (*f_pos > dev_size)
|
tmp - dev_buf);
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (count > dev_size - *f_pos)
|
|
||||||
count = dev_size - *f_pos;
|
|
||||||
|
|
||||||
if (copy_to_user(buf, dev_buf + *f_pos, count))
|
|
||||||
ret = -EINVAL;
|
|
||||||
ret = count;
|
|
||||||
*f_pos += count;
|
|
||||||
|
|
||||||
out:
|
|
||||||
kfree(dev_buf);
|
kfree(dev_buf);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
err_kmalloc:
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations coh901318_debugfs_status_operations = {
|
static const struct file_operations coh901318_debugfs_status_operations = {
|
||||||
|
|
|
@ -149,8 +149,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dentry *ibmasmfs_create_file (struct super_block *sb,
|
static struct dentry *ibmasmfs_create_file(struct dentry *parent,
|
||||||
struct dentry *parent,
|
|
||||||
const char *name,
|
const char *name,
|
||||||
const struct file_operations *fops,
|
const struct file_operations *fops,
|
||||||
void *data,
|
void *data,
|
||||||
|
@ -163,7 +162,7 @@ static struct dentry *ibmasmfs_create_file (struct super_block *sb,
|
||||||
if (!dentry)
|
if (!dentry)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
inode = ibmasmfs_make_inode(sb, S_IFREG | mode);
|
inode = ibmasmfs_make_inode(parent->d_sb, S_IFREG | mode);
|
||||||
if (!inode) {
|
if (!inode) {
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -176,8 +175,7 @@ static struct dentry *ibmasmfs_create_file (struct super_block *sb,
|
||||||
return dentry;
|
return dentry;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dentry *ibmasmfs_create_dir (struct super_block *sb,
|
static struct dentry *ibmasmfs_create_dir(struct dentry *parent,
|
||||||
struct dentry *parent,
|
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
|
@ -187,7 +185,7 @@ static struct dentry *ibmasmfs_create_dir (struct super_block *sb,
|
||||||
if (!dentry)
|
if (!dentry)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
inode = ibmasmfs_make_inode(sb, S_IFDIR | 0500);
|
inode = ibmasmfs_make_inode(parent->d_sb, S_IFDIR | 0500);
|
||||||
if (!inode) {
|
if (!inode) {
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -612,20 +610,20 @@ static void ibmasmfs_create_files (struct super_block *sb)
|
||||||
struct dentry *dir;
|
struct dentry *dir;
|
||||||
struct dentry *remote_dir;
|
struct dentry *remote_dir;
|
||||||
sp = list_entry(entry, struct service_processor, node);
|
sp = list_entry(entry, struct service_processor, node);
|
||||||
dir = ibmasmfs_create_dir(sb, sb->s_root, sp->dirname);
|
dir = ibmasmfs_create_dir(sb->s_root, sp->dirname);
|
||||||
if (!dir)
|
if (!dir)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ibmasmfs_create_file(sb, dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR);
|
ibmasmfs_create_file(dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR);
|
||||||
ibmasmfs_create_file(sb, dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR);
|
ibmasmfs_create_file(dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR);
|
||||||
ibmasmfs_create_file(sb, dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR);
|
ibmasmfs_create_file(dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR);
|
||||||
|
|
||||||
remote_dir = ibmasmfs_create_dir(sb, dir, "remote_video");
|
remote_dir = ibmasmfs_create_dir(dir, "remote_video");
|
||||||
if (!remote_dir)
|
if (!remote_dir)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ibmasmfs_create_file(sb, remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR);
|
ibmasmfs_create_file(remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR);
|
||||||
ibmasmfs_create_file(sb, remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR);
|
ibmasmfs_create_file(remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR);
|
||||||
ibmasmfs_create_file(sb, remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR);
|
ibmasmfs_create_file(remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,10 +30,9 @@ extern struct oprofile_operations oprofile_ops;
|
||||||
extern unsigned long oprofile_started;
|
extern unsigned long oprofile_started;
|
||||||
extern unsigned long oprofile_backtrace_depth;
|
extern unsigned long oprofile_backtrace_depth;
|
||||||
|
|
||||||
struct super_block;
|
|
||||||
struct dentry;
|
struct dentry;
|
||||||
|
|
||||||
void oprofile_create_files(struct super_block *sb, struct dentry *root);
|
void oprofile_create_files(struct dentry *root);
|
||||||
int oprofile_timer_init(struct oprofile_operations *ops);
|
int oprofile_timer_init(struct oprofile_operations *ops);
|
||||||
#ifdef CONFIG_OPROFILE_NMI_TIMER
|
#ifdef CONFIG_OPROFILE_NMI_TIMER
|
||||||
int op_nmi_timer_init(struct oprofile_operations *ops);
|
int op_nmi_timer_init(struct oprofile_operations *ops);
|
||||||
|
|
|
@ -175,7 +175,7 @@ static const struct file_operations dump_fops = {
|
||||||
.llseek = noop_llseek,
|
.llseek = noop_llseek,
|
||||||
};
|
};
|
||||||
|
|
||||||
void oprofile_create_files(struct super_block *sb, struct dentry *root)
|
void oprofile_create_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
/* reinitialize default values */
|
/* reinitialize default values */
|
||||||
oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
|
oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
|
||||||
|
@ -183,19 +183,19 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
|
||||||
oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
|
oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
|
||||||
oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
|
oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
|
||||||
|
|
||||||
oprofilefs_create_file(sb, root, "enable", &enable_fops);
|
oprofilefs_create_file(root, "enable", &enable_fops);
|
||||||
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
|
oprofilefs_create_file_perm(root, "dump", &dump_fops, 0666);
|
||||||
oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
|
oprofilefs_create_file(root, "buffer", &event_buffer_fops);
|
||||||
oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
|
oprofilefs_create_ulong(root, "buffer_size", &oprofile_buffer_size);
|
||||||
oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
|
oprofilefs_create_ulong(root, "buffer_watershed", &oprofile_buffer_watershed);
|
||||||
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
|
oprofilefs_create_ulong(root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
|
||||||
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
|
oprofilefs_create_file(root, "cpu_type", &cpu_type_fops);
|
||||||
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
|
oprofilefs_create_file(root, "backtrace_depth", &depth_fops);
|
||||||
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
|
oprofilefs_create_file(root, "pointer_size", &pointer_size_fops);
|
||||||
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
||||||
oprofilefs_create_file(sb, root, "time_slice", &timeout_fops);
|
oprofilefs_create_file(root, "time_slice", &timeout_fops);
|
||||||
#endif
|
#endif
|
||||||
oprofile_create_stats_files(sb, root);
|
oprofile_create_stats_files(root);
|
||||||
if (oprofile_ops.create_files)
|
if (oprofile_ops.create_files)
|
||||||
oprofile_ops.create_files(sb, root);
|
oprofile_ops.create_files(root);
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,7 +138,7 @@ static void op_perf_stop(void)
|
||||||
op_destroy_counter(cpu, event);
|
op_destroy_counter(cpu, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
|
static int oprofile_perf_create_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
@ -147,13 +147,13 @@ static int oprofile_perf_create_files(struct super_block *sb, struct dentry *roo
|
||||||
char buf[4];
|
char buf[4];
|
||||||
|
|
||||||
snprintf(buf, sizeof buf, "%d", i);
|
snprintf(buf, sizeof buf, "%d", i);
|
||||||
dir = oprofilefs_mkdir(sb, root, buf);
|
dir = oprofilefs_mkdir(root, buf);
|
||||||
oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
|
oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
|
oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
|
||||||
oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
|
oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
|
||||||
oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
|
oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
|
||||||
oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
|
oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
|
||||||
oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
|
oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -38,7 +38,7 @@ void oprofile_reset_stats(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
|
void oprofile_create_stats_files(struct dentry *root)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer *cpu_buf;
|
struct oprofile_cpu_buffer *cpu_buf;
|
||||||
struct dentry *cpudir;
|
struct dentry *cpudir;
|
||||||
|
@ -46,39 +46,39 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
|
||||||
char buf[10];
|
char buf[10];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
dir = oprofilefs_mkdir(sb, root, "stats");
|
dir = oprofilefs_mkdir(root, "stats");
|
||||||
if (!dir)
|
if (!dir)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
cpu_buf = &per_cpu(op_cpu_buffer, i);
|
cpu_buf = &per_cpu(op_cpu_buffer, i);
|
||||||
snprintf(buf, 10, "cpu%d", i);
|
snprintf(buf, 10, "cpu%d", i);
|
||||||
cpudir = oprofilefs_mkdir(sb, dir, buf);
|
cpudir = oprofilefs_mkdir(dir, buf);
|
||||||
|
|
||||||
/* Strictly speaking access to these ulongs is racy,
|
/* Strictly speaking access to these ulongs is racy,
|
||||||
* but we can't simply lock them, and they are
|
* but we can't simply lock them, and they are
|
||||||
* informational only.
|
* informational only.
|
||||||
*/
|
*/
|
||||||
oprofilefs_create_ro_ulong(sb, cpudir, "sample_received",
|
oprofilefs_create_ro_ulong(cpudir, "sample_received",
|
||||||
&cpu_buf->sample_received);
|
&cpu_buf->sample_received);
|
||||||
oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow",
|
oprofilefs_create_ro_ulong(cpudir, "sample_lost_overflow",
|
||||||
&cpu_buf->sample_lost_overflow);
|
&cpu_buf->sample_lost_overflow);
|
||||||
oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted",
|
oprofilefs_create_ro_ulong(cpudir, "backtrace_aborted",
|
||||||
&cpu_buf->backtrace_aborted);
|
&cpu_buf->backtrace_aborted);
|
||||||
oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
|
oprofilefs_create_ro_ulong(cpudir, "sample_invalid_eip",
|
||||||
&cpu_buf->sample_invalid_eip);
|
&cpu_buf->sample_invalid_eip);
|
||||||
}
|
}
|
||||||
|
|
||||||
oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
|
oprofilefs_create_ro_atomic(dir, "sample_lost_no_mm",
|
||||||
&oprofile_stats.sample_lost_no_mm);
|
&oprofile_stats.sample_lost_no_mm);
|
||||||
oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
|
oprofilefs_create_ro_atomic(dir, "sample_lost_no_mapping",
|
||||||
&oprofile_stats.sample_lost_no_mapping);
|
&oprofile_stats.sample_lost_no_mapping);
|
||||||
oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow",
|
oprofilefs_create_ro_atomic(dir, "event_lost_overflow",
|
||||||
&oprofile_stats.event_lost_overflow);
|
&oprofile_stats.event_lost_overflow);
|
||||||
oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
|
oprofilefs_create_ro_atomic(dir, "bt_lost_no_mapping",
|
||||||
&oprofile_stats.bt_lost_no_mapping);
|
&oprofile_stats.bt_lost_no_mapping);
|
||||||
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
||||||
oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter",
|
oprofilefs_create_ro_atomic(dir, "multiplex_counter",
|
||||||
&oprofile_stats.multiplex_counter);
|
&oprofile_stats.multiplex_counter);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,10 +25,9 @@ extern struct oprofile_stat_struct oprofile_stats;
|
||||||
/* reset all stats to zero */
|
/* reset all stats to zero */
|
||||||
void oprofile_reset_stats(void);
|
void oprofile_reset_stats(void);
|
||||||
|
|
||||||
struct super_block;
|
|
||||||
struct dentry;
|
struct dentry;
|
||||||
|
|
||||||
/* create the stats/ dir */
|
/* create the stats/ dir */
|
||||||
void oprofile_create_stats_files(struct super_block *sb, struct dentry *root);
|
void oprofile_create_stats_files(struct dentry *root);
|
||||||
|
|
||||||
#endif /* OPROFILE_STATS_H */
|
#endif /* OPROFILE_STATS_H */
|
||||||
|
|
|
@ -132,9 +132,8 @@ static const struct file_operations ulong_ro_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static int __oprofilefs_create_file(struct super_block *sb,
|
static int __oprofilefs_create_file(struct dentry *root, char const *name,
|
||||||
struct dentry *root, char const *name, const struct file_operations *fops,
|
const struct file_operations *fops, int perm, void *priv)
|
||||||
int perm, void *priv)
|
|
||||||
{
|
{
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
@ -145,7 +144,7 @@ static int __oprofilefs_create_file(struct super_block *sb,
|
||||||
mutex_unlock(&root->d_inode->i_mutex);
|
mutex_unlock(&root->d_inode->i_mutex);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
inode = oprofilefs_get_inode(sb, S_IFREG | perm);
|
inode = oprofilefs_get_inode(root->d_sb, S_IFREG | perm);
|
||||||
if (!inode) {
|
if (!inode) {
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
mutex_unlock(&root->d_inode->i_mutex);
|
mutex_unlock(&root->d_inode->i_mutex);
|
||||||
|
@ -159,18 +158,18 @@ static int __oprofilefs_create_file(struct super_block *sb,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
|
int oprofilefs_create_ulong(struct dentry *root,
|
||||||
char const *name, unsigned long *val)
|
char const *name, unsigned long *val)
|
||||||
{
|
{
|
||||||
return __oprofilefs_create_file(sb, root, name,
|
return __oprofilefs_create_file(root, name,
|
||||||
&ulong_fops, 0644, val);
|
&ulong_fops, 0644, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
|
int oprofilefs_create_ro_ulong(struct dentry *root,
|
||||||
char const *name, unsigned long *val)
|
char const *name, unsigned long *val)
|
||||||
{
|
{
|
||||||
return __oprofilefs_create_file(sb, root, name,
|
return __oprofilefs_create_file(root, name,
|
||||||
&ulong_ro_fops, 0444, val);
|
&ulong_ro_fops, 0444, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,50 +188,49 @@ static const struct file_operations atomic_ro_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
|
int oprofilefs_create_ro_atomic(struct dentry *root,
|
||||||
char const *name, atomic_t *val)
|
char const *name, atomic_t *val)
|
||||||
{
|
{
|
||||||
return __oprofilefs_create_file(sb, root, name,
|
return __oprofilefs_create_file(root, name,
|
||||||
&atomic_ro_fops, 0444, val);
|
&atomic_ro_fops, 0444, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
|
int oprofilefs_create_file(struct dentry *root,
|
||||||
char const *name, const struct file_operations *fops)
|
char const *name, const struct file_operations *fops)
|
||||||
{
|
{
|
||||||
return __oprofilefs_create_file(sb, root, name, fops, 0644, NULL);
|
return __oprofilefs_create_file(root, name, fops, 0644, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
|
int oprofilefs_create_file_perm(struct dentry *root,
|
||||||
char const *name, const struct file_operations *fops, int perm)
|
char const *name, const struct file_operations *fops, int perm)
|
||||||
{
|
{
|
||||||
return __oprofilefs_create_file(sb, root, name, fops, perm, NULL);
|
return __oprofilefs_create_file(root, name, fops, perm, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct dentry *oprofilefs_mkdir(struct super_block *sb,
|
struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name)
|
||||||
struct dentry *root, char const *name)
|
|
||||||
{
|
{
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
mutex_lock(&root->d_inode->i_mutex);
|
mutex_lock(&parent->d_inode->i_mutex);
|
||||||
dentry = d_alloc_name(root, name);
|
dentry = d_alloc_name(parent, name);
|
||||||
if (!dentry) {
|
if (!dentry) {
|
||||||
mutex_unlock(&root->d_inode->i_mutex);
|
mutex_unlock(&parent->d_inode->i_mutex);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
|
inode = oprofilefs_get_inode(parent->d_sb, S_IFDIR | 0755);
|
||||||
if (!inode) {
|
if (!inode) {
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
mutex_unlock(&root->d_inode->i_mutex);
|
mutex_unlock(&parent->d_inode->i_mutex);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
inode->i_op = &simple_dir_inode_operations;
|
inode->i_op = &simple_dir_inode_operations;
|
||||||
inode->i_fop = &simple_dir_operations;
|
inode->i_fop = &simple_dir_operations;
|
||||||
d_add(dentry, inode);
|
d_add(dentry, inode);
|
||||||
mutex_unlock(&root->d_inode->i_mutex);
|
mutex_unlock(&parent->d_inode->i_mutex);
|
||||||
return dentry;
|
return dentry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,7 +254,7 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
if (!sb->s_root)
|
if (!sb->s_root)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
oprofile_create_files(sb, sb->s_root);
|
oprofile_create_files(sb->s_root);
|
||||||
|
|
||||||
// FIXME: verify kill_litter_super removes our dentries
|
// FIXME: verify kill_litter_super removes our dentries
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1519,7 +1519,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
|
|
||||||
blk_start_plug(&plug);
|
blk_start_plug(&plug);
|
||||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||||
if (ret > 0 || ret == -EIOCBQUEUED) {
|
if (ret > 0) {
|
||||||
ssize_t err;
|
ssize_t err;
|
||||||
|
|
||||||
err = generic_write_sync(file, pos, ret);
|
err = generic_write_sync(file, pos, ret);
|
||||||
|
|
|
@ -1727,7 +1727,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
||||||
*/
|
*/
|
||||||
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
|
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
|
||||||
BTRFS_I(inode)->last_sub_trans = root->log_transid;
|
BTRFS_I(inode)->last_sub_trans = root->log_transid;
|
||||||
if (num_written > 0 || num_written == -EIOCBQUEUED) {
|
if (num_written > 0) {
|
||||||
err = generic_write_sync(file, pos, num_written);
|
err = generic_write_sync(file, pos, num_written);
|
||||||
if (err < 0 && num_written > 0)
|
if (err < 0 && num_written > 0)
|
||||||
num_written = err;
|
num_written = err;
|
||||||
|
|
|
@ -2553,7 +2553,7 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
|
||||||
mutex_unlock(&inode->i_mutex);
|
mutex_unlock(&inode->i_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rc > 0 || rc == -EIOCBQUEUED) {
|
if (rc > 0) {
|
||||||
ssize_t err;
|
ssize_t err;
|
||||||
|
|
||||||
err = generic_write_sync(file, pos, rc);
|
err = generic_write_sync(file, pos, rc);
|
||||||
|
|
13
fs/dcache.c
13
fs/dcache.c
|
@ -472,7 +472,7 @@ relock:
|
||||||
* inform the fs via d_prune that this dentry is about to be
|
* inform the fs via d_prune that this dentry is about to be
|
||||||
* unhashed and destroyed.
|
* unhashed and destroyed.
|
||||||
*/
|
*/
|
||||||
if (dentry->d_flags & DCACHE_OP_PRUNE)
|
if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
|
||||||
dentry->d_op->d_prune(dentry);
|
dentry->d_op->d_prune(dentry);
|
||||||
|
|
||||||
dentry_lru_del(dentry);
|
dentry_lru_del(dentry);
|
||||||
|
@ -727,6 +727,14 @@ restart:
|
||||||
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
||||||
spin_lock(&dentry->d_lock);
|
spin_lock(&dentry->d_lock);
|
||||||
if (!dentry->d_lockref.count) {
|
if (!dentry->d_lockref.count) {
|
||||||
|
/*
|
||||||
|
* inform the fs via d_prune that this dentry
|
||||||
|
* is about to be unhashed and destroyed.
|
||||||
|
*/
|
||||||
|
if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
|
||||||
|
!d_unhashed(dentry))
|
||||||
|
dentry->d_op->d_prune(dentry);
|
||||||
|
|
||||||
__dget_dlock(dentry);
|
__dget_dlock(dentry);
|
||||||
__d_drop(dentry);
|
__d_drop(dentry);
|
||||||
spin_unlock(&dentry->d_lock);
|
spin_unlock(&dentry->d_lock);
|
||||||
|
@ -911,7 +919,8 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
||||||
* inform the fs that this dentry is about to be
|
* inform the fs that this dentry is about to be
|
||||||
* unhashed and destroyed.
|
* unhashed and destroyed.
|
||||||
*/
|
*/
|
||||||
if (dentry->d_flags & DCACHE_OP_PRUNE)
|
if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
|
||||||
|
!d_unhashed(dentry))
|
||||||
dentry->d_op->d_prune(dentry);
|
dentry->d_op->d_prune(dentry);
|
||||||
|
|
||||||
dentry_lru_del(dentry);
|
dentry_lru_del(dentry);
|
||||||
|
|
118
fs/direct-io.c
118
fs/direct-io.c
|
@ -127,6 +127,7 @@ struct dio {
|
||||||
spinlock_t bio_lock; /* protects BIO fields below */
|
spinlock_t bio_lock; /* protects BIO fields below */
|
||||||
int page_errors; /* errno from get_user_pages() */
|
int page_errors; /* errno from get_user_pages() */
|
||||||
int is_async; /* is IO async ? */
|
int is_async; /* is IO async ? */
|
||||||
|
bool defer_completion; /* defer AIO completion to workqueue? */
|
||||||
int io_error; /* IO error in completion path */
|
int io_error; /* IO error in completion path */
|
||||||
unsigned long refcount; /* direct_io_worker() and bios */
|
unsigned long refcount; /* direct_io_worker() and bios */
|
||||||
struct bio *bio_list; /* singly linked via bi_private */
|
struct bio *bio_list; /* singly linked via bi_private */
|
||||||
|
@ -141,7 +142,10 @@ struct dio {
|
||||||
* allocation time. Don't add new fields after pages[] unless you
|
* allocation time. Don't add new fields after pages[] unless you
|
||||||
* wish that they not be zeroed.
|
* wish that they not be zeroed.
|
||||||
*/
|
*/
|
||||||
|
union {
|
||||||
struct page *pages[DIO_PAGES]; /* page buffer */
|
struct page *pages[DIO_PAGES]; /* page buffer */
|
||||||
|
struct work_struct complete_work;/* deferred AIO completion */
|
||||||
|
};
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
static struct kmem_cache *dio_cache __read_mostly;
|
static struct kmem_cache *dio_cache __read_mostly;
|
||||||
|
@ -221,16 +225,16 @@ static inline struct page *dio_get_page(struct dio *dio,
|
||||||
* dio_complete() - called when all DIO BIO I/O has been completed
|
* dio_complete() - called when all DIO BIO I/O has been completed
|
||||||
* @offset: the byte offset in the file of the completed operation
|
* @offset: the byte offset in the file of the completed operation
|
||||||
*
|
*
|
||||||
* This releases locks as dictated by the locking type, lets interested parties
|
* This drops i_dio_count, lets interested parties know that a DIO operation
|
||||||
* know that a DIO operation has completed, and calculates the resulting return
|
* has completed, and calculates the resulting return code for the operation.
|
||||||
* code for the operation.
|
|
||||||
*
|
*
|
||||||
* It lets the filesystem know if it registered an interest earlier via
|
* It lets the filesystem know if it registered an interest earlier via
|
||||||
* get_block. Pass the private field of the map buffer_head so that
|
* get_block. Pass the private field of the map buffer_head so that
|
||||||
* filesystems can use it to hold additional state between get_block calls and
|
* filesystems can use it to hold additional state between get_block calls and
|
||||||
* dio_complete.
|
* dio_complete.
|
||||||
*/
|
*/
|
||||||
static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is_async)
|
static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
|
||||||
|
bool is_async)
|
||||||
{
|
{
|
||||||
ssize_t transferred = 0;
|
ssize_t transferred = 0;
|
||||||
|
|
||||||
|
@ -258,19 +262,36 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = transferred;
|
ret = transferred;
|
||||||
|
|
||||||
if (dio->end_io && dio->result) {
|
if (dio->end_io && dio->result)
|
||||||
dio->end_io(dio->iocb, offset, transferred,
|
dio->end_io(dio->iocb, offset, transferred, dio->private);
|
||||||
dio->private, ret, is_async);
|
|
||||||
} else {
|
|
||||||
inode_dio_done(dio->inode);
|
inode_dio_done(dio->inode);
|
||||||
if (is_async)
|
if (is_async) {
|
||||||
|
if (dio->rw & WRITE) {
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = generic_write_sync(dio->iocb->ki_filp, offset,
|
||||||
|
transferred);
|
||||||
|
if (err < 0 && ret > 0)
|
||||||
|
ret = err;
|
||||||
|
}
|
||||||
|
|
||||||
aio_complete(dio->iocb, ret, 0);
|
aio_complete(dio->iocb, ret, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kmem_cache_free(dio_cache, dio);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dio_aio_complete_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct dio *dio = container_of(work, struct dio, complete_work);
|
||||||
|
|
||||||
|
dio_complete(dio, dio->iocb->ki_pos, 0, true);
|
||||||
|
}
|
||||||
|
|
||||||
static int dio_bio_complete(struct dio *dio, struct bio *bio);
|
static int dio_bio_complete(struct dio *dio, struct bio *bio);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Asynchronous IO callback.
|
* Asynchronous IO callback.
|
||||||
*/
|
*/
|
||||||
|
@ -290,8 +311,13 @@ static void dio_bio_end_aio(struct bio *bio, int error)
|
||||||
spin_unlock_irqrestore(&dio->bio_lock, flags);
|
spin_unlock_irqrestore(&dio->bio_lock, flags);
|
||||||
|
|
||||||
if (remaining == 0) {
|
if (remaining == 0) {
|
||||||
|
if (dio->result && dio->defer_completion) {
|
||||||
|
INIT_WORK(&dio->complete_work, dio_aio_complete_work);
|
||||||
|
queue_work(dio->inode->i_sb->s_dio_done_wq,
|
||||||
|
&dio->complete_work);
|
||||||
|
} else {
|
||||||
dio_complete(dio, dio->iocb->ki_pos, 0, true);
|
dio_complete(dio, dio->iocb->ki_pos, 0, true);
|
||||||
kmem_cache_free(dio_cache, dio);
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,6 +536,41 @@ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create workqueue for deferred direct IO completions. We allocate the
|
||||||
|
* workqueue when it's first needed. This avoids creating workqueue for
|
||||||
|
* filesystems that don't need it and also allows us to create the workqueue
|
||||||
|
* late enough so the we can include s_id in the name of the workqueue.
|
||||||
|
*/
|
||||||
|
static int sb_init_dio_done_wq(struct super_block *sb)
|
||||||
|
{
|
||||||
|
struct workqueue_struct *wq = alloc_workqueue("dio/%s",
|
||||||
|
WQ_MEM_RECLAIM, 0,
|
||||||
|
sb->s_id);
|
||||||
|
if (!wq)
|
||||||
|
return -ENOMEM;
|
||||||
|
/*
|
||||||
|
* This has to be atomic as more DIOs can race to create the workqueue
|
||||||
|
*/
|
||||||
|
cmpxchg(&sb->s_dio_done_wq, NULL, wq);
|
||||||
|
/* Someone created workqueue before us? Free ours... */
|
||||||
|
if (wq != sb->s_dio_done_wq)
|
||||||
|
destroy_workqueue(wq);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dio_set_defer_completion(struct dio *dio)
|
||||||
|
{
|
||||||
|
struct super_block *sb = dio->inode->i_sb;
|
||||||
|
|
||||||
|
if (dio->defer_completion)
|
||||||
|
return 0;
|
||||||
|
dio->defer_completion = true;
|
||||||
|
if (!sb->s_dio_done_wq)
|
||||||
|
return sb_init_dio_done_wq(sb);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call into the fs to map some more disk blocks. We record the current number
|
* Call into the fs to map some more disk blocks. We record the current number
|
||||||
* of available blocks at sdio->blocks_available. These are in units of the
|
* of available blocks at sdio->blocks_available. These are in units of the
|
||||||
|
@ -581,6 +642,9 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
|
||||||
|
|
||||||
/* Store for completion */
|
/* Store for completion */
|
||||||
dio->private = map_bh->b_private;
|
dio->private = map_bh->b_private;
|
||||||
|
|
||||||
|
if (ret == 0 && buffer_defer_completion(map_bh))
|
||||||
|
ret = dio_set_defer_completion(dio);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1128,11 +1192,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Will be decremented at I/O completion time.
|
|
||||||
*/
|
|
||||||
atomic_inc(&inode->i_dio_count);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For file extending writes updating i_size before data
|
* For file extending writes updating i_size before data
|
||||||
* writeouts complete can expose uninitialized blocks. So
|
* writeouts complete can expose uninitialized blocks. So
|
||||||
|
@ -1141,11 +1200,33 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||||
*/
|
*/
|
||||||
dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
|
dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
|
||||||
(end > i_size_read(inode)));
|
(end > i_size_read(inode)));
|
||||||
|
|
||||||
retval = 0;
|
|
||||||
|
|
||||||
dio->inode = inode;
|
dio->inode = inode;
|
||||||
dio->rw = rw;
|
dio->rw = rw;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
|
||||||
|
* so that we can call ->fsync.
|
||||||
|
*/
|
||||||
|
if (dio->is_async && (rw & WRITE) &&
|
||||||
|
((iocb->ki_filp->f_flags & O_DSYNC) ||
|
||||||
|
IS_SYNC(iocb->ki_filp->f_mapping->host))) {
|
||||||
|
retval = dio_set_defer_completion(dio);
|
||||||
|
if (retval) {
|
||||||
|
/*
|
||||||
|
* We grab i_mutex only for reads so we don't have
|
||||||
|
* to release it here
|
||||||
|
*/
|
||||||
|
kmem_cache_free(dio_cache, dio);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Will be decremented at I/O completion time.
|
||||||
|
*/
|
||||||
|
atomic_inc(&inode->i_dio_count);
|
||||||
|
|
||||||
|
retval = 0;
|
||||||
sdio.blkbits = blkbits;
|
sdio.blkbits = blkbits;
|
||||||
sdio.blkfactor = i_blkbits - blkbits;
|
sdio.blkfactor = i_blkbits - blkbits;
|
||||||
sdio.block_in_file = offset >> blkbits;
|
sdio.block_in_file = offset >> blkbits;
|
||||||
|
@ -1269,7 +1350,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||||
|
|
||||||
if (drop_refcount(dio) == 0) {
|
if (drop_refcount(dio) == 0) {
|
||||||
retval = dio_complete(dio, offset, retval, false);
|
retval = dio_complete(dio, offset, retval, false);
|
||||||
kmem_cache_free(dio_cache, dio);
|
|
||||||
} else
|
} else
|
||||||
BUG_ON(retval != -EIOCBQUEUED);
|
BUG_ON(retval != -EIOCBQUEUED);
|
||||||
|
|
||||||
|
|
|
@ -1792,7 +1792,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
int did_lock_epmutex = 0;
|
int did_lock_epmutex = 0;
|
||||||
struct file *file, *tfile;
|
struct fd f, tf;
|
||||||
struct eventpoll *ep;
|
struct eventpoll *ep;
|
||||||
struct epitem *epi;
|
struct epitem *epi;
|
||||||
struct epoll_event epds;
|
struct epoll_event epds;
|
||||||
|
@ -1802,20 +1802,19 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
|
||||||
copy_from_user(&epds, event, sizeof(struct epoll_event)))
|
copy_from_user(&epds, event, sizeof(struct epoll_event)))
|
||||||
goto error_return;
|
goto error_return;
|
||||||
|
|
||||||
/* Get the "struct file *" for the eventpoll file */
|
|
||||||
error = -EBADF;
|
error = -EBADF;
|
||||||
file = fget(epfd);
|
f = fdget(epfd);
|
||||||
if (!file)
|
if (!f.file)
|
||||||
goto error_return;
|
goto error_return;
|
||||||
|
|
||||||
/* Get the "struct file *" for the target file */
|
/* Get the "struct file *" for the target file */
|
||||||
tfile = fget(fd);
|
tf = fdget(fd);
|
||||||
if (!tfile)
|
if (!tf.file)
|
||||||
goto error_fput;
|
goto error_fput;
|
||||||
|
|
||||||
/* The target file descriptor must support poll */
|
/* The target file descriptor must support poll */
|
||||||
error = -EPERM;
|
error = -EPERM;
|
||||||
if (!tfile->f_op || !tfile->f_op->poll)
|
if (!tf.file->f_op || !tf.file->f_op->poll)
|
||||||
goto error_tgt_fput;
|
goto error_tgt_fput;
|
||||||
|
|
||||||
/* Check if EPOLLWAKEUP is allowed */
|
/* Check if EPOLLWAKEUP is allowed */
|
||||||
|
@ -1828,14 +1827,14 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
|
||||||
* adding an epoll file descriptor inside itself.
|
* adding an epoll file descriptor inside itself.
|
||||||
*/
|
*/
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
if (file == tfile || !is_file_epoll(file))
|
if (f.file == tf.file || !is_file_epoll(f.file))
|
||||||
goto error_tgt_fput;
|
goto error_tgt_fput;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point it is safe to assume that the "private_data" contains
|
* At this point it is safe to assume that the "private_data" contains
|
||||||
* our own data structure.
|
* our own data structure.
|
||||||
*/
|
*/
|
||||||
ep = file->private_data;
|
ep = f.file->private_data;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When we insert an epoll file descriptor, inside another epoll file
|
* When we insert an epoll file descriptor, inside another epoll file
|
||||||
|
@ -1854,14 +1853,14 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
|
||||||
did_lock_epmutex = 1;
|
did_lock_epmutex = 1;
|
||||||
}
|
}
|
||||||
if (op == EPOLL_CTL_ADD) {
|
if (op == EPOLL_CTL_ADD) {
|
||||||
if (is_file_epoll(tfile)) {
|
if (is_file_epoll(tf.file)) {
|
||||||
error = -ELOOP;
|
error = -ELOOP;
|
||||||
if (ep_loop_check(ep, tfile) != 0) {
|
if (ep_loop_check(ep, tf.file) != 0) {
|
||||||
clear_tfile_check_list();
|
clear_tfile_check_list();
|
||||||
goto error_tgt_fput;
|
goto error_tgt_fput;
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
list_add(&tfile->f_tfile_llink, &tfile_check_list);
|
list_add(&tf.file->f_tfile_llink, &tfile_check_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock_nested(&ep->mtx, 0);
|
mutex_lock_nested(&ep->mtx, 0);
|
||||||
|
@ -1871,14 +1870,14 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
|
||||||
* above, we can be sure to be able to use the item looked up by
|
* above, we can be sure to be able to use the item looked up by
|
||||||
* ep_find() till we release the mutex.
|
* ep_find() till we release the mutex.
|
||||||
*/
|
*/
|
||||||
epi = ep_find(ep, tfile, fd);
|
epi = ep_find(ep, tf.file, fd);
|
||||||
|
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case EPOLL_CTL_ADD:
|
case EPOLL_CTL_ADD:
|
||||||
if (!epi) {
|
if (!epi) {
|
||||||
epds.events |= POLLERR | POLLHUP;
|
epds.events |= POLLERR | POLLHUP;
|
||||||
error = ep_insert(ep, &epds, tfile, fd);
|
error = ep_insert(ep, &epds, tf.file, fd);
|
||||||
} else
|
} else
|
||||||
error = -EEXIST;
|
error = -EEXIST;
|
||||||
clear_tfile_check_list();
|
clear_tfile_check_list();
|
||||||
|
@ -1903,9 +1902,9 @@ error_tgt_fput:
|
||||||
if (did_lock_epmutex)
|
if (did_lock_epmutex)
|
||||||
mutex_unlock(&epmutex);
|
mutex_unlock(&epmutex);
|
||||||
|
|
||||||
fput(tfile);
|
fdput(tf);
|
||||||
error_fput:
|
error_fput:
|
||||||
fput(file);
|
fdput(f);
|
||||||
error_return:
|
error_return:
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
|
|
|
@ -180,7 +180,6 @@ struct ext4_map_blocks {
|
||||||
* Flags for ext4_io_end->flags
|
* Flags for ext4_io_end->flags
|
||||||
*/
|
*/
|
||||||
#define EXT4_IO_END_UNWRITTEN 0x0001
|
#define EXT4_IO_END_UNWRITTEN 0x0001
|
||||||
#define EXT4_IO_END_DIRECT 0x0002
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For converting uninitialized extents on a work queue. 'handle' is used for
|
* For converting uninitialized extents on a work queue. 'handle' is used for
|
||||||
|
@ -196,8 +195,6 @@ typedef struct ext4_io_end {
|
||||||
unsigned int flag; /* unwritten or not */
|
unsigned int flag; /* unwritten or not */
|
||||||
loff_t offset; /* offset in the file */
|
loff_t offset; /* offset in the file */
|
||||||
ssize_t size; /* size of the extent */
|
ssize_t size; /* size of the extent */
|
||||||
struct kiocb *iocb; /* iocb struct for AIO */
|
|
||||||
int result; /* error value for AIO */
|
|
||||||
atomic_t count; /* reference counter */
|
atomic_t count; /* reference counter */
|
||||||
} ext4_io_end_t;
|
} ext4_io_end_t;
|
||||||
|
|
||||||
|
@ -914,11 +911,9 @@ struct ext4_inode_info {
|
||||||
* Completed IOs that need unwritten extents handling and don't have
|
* Completed IOs that need unwritten extents handling and don't have
|
||||||
* transaction reserved
|
* transaction reserved
|
||||||
*/
|
*/
|
||||||
struct list_head i_unrsv_conversion_list;
|
|
||||||
atomic_t i_ioend_count; /* Number of outstanding io_end structs */
|
atomic_t i_ioend_count; /* Number of outstanding io_end structs */
|
||||||
atomic_t i_unwritten; /* Nr. of inflight conversions pending */
|
atomic_t i_unwritten; /* Nr. of inflight conversions pending */
|
||||||
struct work_struct i_rsv_conversion_work;
|
struct work_struct i_rsv_conversion_work;
|
||||||
struct work_struct i_unrsv_conversion_work;
|
|
||||||
|
|
||||||
spinlock_t i_block_reservation_lock;
|
spinlock_t i_block_reservation_lock;
|
||||||
|
|
||||||
|
@ -1290,8 +1285,6 @@ struct ext4_sb_info {
|
||||||
struct flex_groups *s_flex_groups;
|
struct flex_groups *s_flex_groups;
|
||||||
ext4_group_t s_flex_groups_allocated;
|
ext4_group_t s_flex_groups_allocated;
|
||||||
|
|
||||||
/* workqueue for unreserved extent convertions (dio) */
|
|
||||||
struct workqueue_struct *unrsv_conversion_wq;
|
|
||||||
/* workqueue for reserved extent conversions (buffered io) */
|
/* workqueue for reserved extent conversions (buffered io) */
|
||||||
struct workqueue_struct *rsv_conversion_wq;
|
struct workqueue_struct *rsv_conversion_wq;
|
||||||
|
|
||||||
|
@ -1354,9 +1347,6 @@ static inline void ext4_set_io_unwritten_flag(struct inode *inode,
|
||||||
struct ext4_io_end *io_end)
|
struct ext4_io_end *io_end)
|
||||||
{
|
{
|
||||||
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
|
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
|
||||||
/* Writeback has to have coversion transaction reserved */
|
|
||||||
WARN_ON(EXT4_SB(inode->i_sb)->s_journal && !io_end->handle &&
|
|
||||||
!(io_end->flag & EXT4_IO_END_DIRECT));
|
|
||||||
io_end->flag |= EXT4_IO_END_UNWRITTEN;
|
io_end->flag |= EXT4_IO_END_UNWRITTEN;
|
||||||
atomic_inc(&EXT4_I(inode)->i_unwritten);
|
atomic_inc(&EXT4_I(inode)->i_unwritten);
|
||||||
}
|
}
|
||||||
|
@ -2760,7 +2750,6 @@ extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
|
||||||
extern void ext4_io_submit_init(struct ext4_io_submit *io,
|
extern void ext4_io_submit_init(struct ext4_io_submit *io,
|
||||||
struct writeback_control *wbc);
|
struct writeback_control *wbc);
|
||||||
extern void ext4_end_io_rsv_work(struct work_struct *work);
|
extern void ext4_end_io_rsv_work(struct work_struct *work);
|
||||||
extern void ext4_end_io_unrsv_work(struct work_struct *work);
|
|
||||||
extern void ext4_io_submit(struct ext4_io_submit *io);
|
extern void ext4_io_submit(struct ext4_io_submit *io);
|
||||||
extern int ext4_bio_write_page(struct ext4_io_submit *io,
|
extern int ext4_bio_write_page(struct ext4_io_submit *io,
|
||||||
struct page *page,
|
struct page *page,
|
||||||
|
|
|
@ -149,7 +149,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||||
mutex_unlock(&inode->i_mutex);
|
mutex_unlock(&inode->i_mutex);
|
||||||
|
|
||||||
if (ret > 0 || ret == -EIOCBQUEUED) {
|
if (ret > 0) {
|
||||||
ssize_t err;
|
ssize_t err;
|
||||||
|
|
||||||
err = generic_write_sync(file, pos, ret);
|
err = generic_write_sync(file, pos, ret);
|
||||||
|
|
|
@ -727,8 +727,12 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
|
||||||
|
|
||||||
ret = ext4_map_blocks(handle, inode, &map, flags);
|
ret = ext4_map_blocks(handle, inode, &map, flags);
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
|
ext4_io_end_t *io_end = ext4_inode_aio(inode);
|
||||||
|
|
||||||
map_bh(bh, inode->i_sb, map.m_pblk);
|
map_bh(bh, inode->i_sb, map.m_pblk);
|
||||||
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
|
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
|
||||||
|
if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
|
||||||
|
set_buffer_defer_completion(bh);
|
||||||
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
|
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
@ -3024,19 +3028,13 @@ static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
|
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
|
||||||
ssize_t size, void *private, int ret,
|
ssize_t size, void *private)
|
||||||
bool is_async)
|
|
||||||
{
|
{
|
||||||
struct inode *inode = file_inode(iocb->ki_filp);
|
|
||||||
ext4_io_end_t *io_end = iocb->private;
|
ext4_io_end_t *io_end = iocb->private;
|
||||||
|
|
||||||
/* if not async direct IO just return */
|
/* if not async direct IO just return */
|
||||||
if (!io_end) {
|
if (!io_end)
|
||||||
inode_dio_done(inode);
|
|
||||||
if (is_async)
|
|
||||||
aio_complete(iocb, ret, 0);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
ext_debug("ext4_end_io_dio(): io_end 0x%p "
|
ext_debug("ext4_end_io_dio(): io_end 0x%p "
|
||||||
"for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
|
"for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
|
||||||
|
@ -3046,11 +3044,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
|
||||||
iocb->private = NULL;
|
iocb->private = NULL;
|
||||||
io_end->offset = offset;
|
io_end->offset = offset;
|
||||||
io_end->size = size;
|
io_end->size = size;
|
||||||
if (is_async) {
|
ext4_put_io_end(io_end);
|
||||||
io_end->iocb = iocb;
|
|
||||||
io_end->result = ret;
|
|
||||||
}
|
|
||||||
ext4_put_io_end_defer(io_end);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3135,7 +3129,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto retake_lock;
|
goto retake_lock;
|
||||||
}
|
}
|
||||||
io_end->flag |= EXT4_IO_END_DIRECT;
|
|
||||||
/*
|
/*
|
||||||
* Grab reference for DIO. Will be dropped in ext4_end_io_dio()
|
* Grab reference for DIO. Will be dropped in ext4_end_io_dio()
|
||||||
*/
|
*/
|
||||||
|
@ -3180,13 +3173,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
|
||||||
if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
|
if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
|
||||||
WARN_ON(iocb->private != io_end);
|
WARN_ON(iocb->private != io_end);
|
||||||
WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
|
WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
|
||||||
WARN_ON(io_end->iocb);
|
|
||||||
/*
|
|
||||||
* Generic code already did inode_dio_done() so we
|
|
||||||
* have to clear EXT4_IO_END_DIRECT to not do it for
|
|
||||||
* the second time.
|
|
||||||
*/
|
|
||||||
io_end->flag = 0;
|
|
||||||
ext4_put_io_end(io_end);
|
ext4_put_io_end(io_end);
|
||||||
iocb->private = NULL;
|
iocb->private = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,10 +123,6 @@ static void ext4_release_io_end(ext4_io_end_t *io_end)
|
||||||
ext4_finish_bio(bio);
|
ext4_finish_bio(bio);
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
}
|
}
|
||||||
if (io_end->flag & EXT4_IO_END_DIRECT)
|
|
||||||
inode_dio_done(io_end->inode);
|
|
||||||
if (io_end->iocb)
|
|
||||||
aio_complete(io_end->iocb, io_end->result, 0);
|
|
||||||
kmem_cache_free(io_end_cachep, io_end);
|
kmem_cache_free(io_end_cachep, io_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,19 +200,14 @@ static void ext4_add_complete_io(ext4_io_end_t *io_end)
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
|
/* Only reserved conversions from writeback should enter here */
|
||||||
|
WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
|
||||||
|
WARN_ON(!io_end->handle);
|
||||||
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
|
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
|
||||||
if (io_end->handle) {
|
|
||||||
wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
|
wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
|
||||||
if (list_empty(&ei->i_rsv_conversion_list))
|
if (list_empty(&ei->i_rsv_conversion_list))
|
||||||
queue_work(wq, &ei->i_rsv_conversion_work);
|
queue_work(wq, &ei->i_rsv_conversion_work);
|
||||||
list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
|
list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
|
||||||
} else {
|
|
||||||
wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq;
|
|
||||||
if (list_empty(&ei->i_unrsv_conversion_list))
|
|
||||||
queue_work(wq, &ei->i_unrsv_conversion_work);
|
|
||||||
list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list);
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
|
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,13 +247,6 @@ void ext4_end_io_rsv_work(struct work_struct *work)
|
||||||
ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
|
ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ext4_end_io_unrsv_work(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
|
|
||||||
i_unrsv_conversion_work);
|
|
||||||
ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
|
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
|
||||||
{
|
{
|
||||||
ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
|
ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
|
||||||
|
|
|
@ -762,9 +762,7 @@ static void ext4_put_super(struct super_block *sb)
|
||||||
ext4_unregister_li_request(sb);
|
ext4_unregister_li_request(sb);
|
||||||
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
|
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
|
||||||
|
|
||||||
flush_workqueue(sbi->unrsv_conversion_wq);
|
|
||||||
flush_workqueue(sbi->rsv_conversion_wq);
|
flush_workqueue(sbi->rsv_conversion_wq);
|
||||||
destroy_workqueue(sbi->unrsv_conversion_wq);
|
|
||||||
destroy_workqueue(sbi->rsv_conversion_wq);
|
destroy_workqueue(sbi->rsv_conversion_wq);
|
||||||
|
|
||||||
if (sbi->s_journal) {
|
if (sbi->s_journal) {
|
||||||
|
@ -875,14 +873,12 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
|
||||||
#endif
|
#endif
|
||||||
ei->jinode = NULL;
|
ei->jinode = NULL;
|
||||||
INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
|
INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
|
||||||
INIT_LIST_HEAD(&ei->i_unrsv_conversion_list);
|
|
||||||
spin_lock_init(&ei->i_completed_io_lock);
|
spin_lock_init(&ei->i_completed_io_lock);
|
||||||
ei->i_sync_tid = 0;
|
ei->i_sync_tid = 0;
|
||||||
ei->i_datasync_tid = 0;
|
ei->i_datasync_tid = 0;
|
||||||
atomic_set(&ei->i_ioend_count, 0);
|
atomic_set(&ei->i_ioend_count, 0);
|
||||||
atomic_set(&ei->i_unwritten, 0);
|
atomic_set(&ei->i_unwritten, 0);
|
||||||
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
|
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
|
||||||
INIT_WORK(&ei->i_unrsv_conversion_work, ext4_end_io_unrsv_work);
|
|
||||||
|
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
}
|
}
|
||||||
|
@ -3995,14 +3991,6 @@ no_journal:
|
||||||
goto failed_mount4;
|
goto failed_mount4;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXT4_SB(sb)->unrsv_conversion_wq =
|
|
||||||
alloc_workqueue("ext4-unrsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
|
|
||||||
if (!EXT4_SB(sb)->unrsv_conversion_wq) {
|
|
||||||
printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto failed_mount4;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The jbd2_journal_load will have done any necessary log recovery,
|
* The jbd2_journal_load will have done any necessary log recovery,
|
||||||
* so we can safely mount the rest of the filesystem now.
|
* so we can safely mount the rest of the filesystem now.
|
||||||
|
@ -4156,8 +4144,6 @@ failed_mount4:
|
||||||
ext4_msg(sb, KERN_ERR, "mount failed");
|
ext4_msg(sb, KERN_ERR, "mount failed");
|
||||||
if (EXT4_SB(sb)->rsv_conversion_wq)
|
if (EXT4_SB(sb)->rsv_conversion_wq)
|
||||||
destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
|
destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
|
||||||
if (EXT4_SB(sb)->unrsv_conversion_wq)
|
|
||||||
destroy_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
|
|
||||||
failed_mount_wq:
|
failed_mount_wq:
|
||||||
if (sbi->s_journal) {
|
if (sbi->s_journal) {
|
||||||
jbd2_journal_destroy(sbi->s_journal);
|
jbd2_journal_destroy(sbi->s_journal);
|
||||||
|
@ -4605,7 +4591,6 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
|
||||||
|
|
||||||
trace_ext4_sync_fs(sb, wait);
|
trace_ext4_sync_fs(sb, wait);
|
||||||
flush_workqueue(sbi->rsv_conversion_wq);
|
flush_workqueue(sbi->rsv_conversion_wq);
|
||||||
flush_workqueue(sbi->unrsv_conversion_wq);
|
|
||||||
/*
|
/*
|
||||||
* Writeback quota in non-journalled quota case - journalled quota has
|
* Writeback quota in non-journalled quota case - journalled quota has
|
||||||
* no dirty dquots
|
* no dirty dquots
|
||||||
|
@ -4641,7 +4626,6 @@ static int ext4_sync_fs_nojournal(struct super_block *sb, int wait)
|
||||||
|
|
||||||
trace_ext4_sync_fs(sb, wait);
|
trace_ext4_sync_fs(sb, wait);
|
||||||
flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
|
flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
|
||||||
flush_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
|
|
||||||
dquot_writeback_dquots(sb, -1);
|
dquot_writeback_dquots(sb, -1);
|
||||||
if (wait && test_opt(sb, BARRIER))
|
if (wait && test_opt(sb, BARRIER))
|
||||||
ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
|
ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
|
||||||
|
|
|
@ -385,6 +385,10 @@ static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
|
||||||
*/
|
*/
|
||||||
void file_sb_list_add(struct file *file, struct super_block *sb)
|
void file_sb_list_add(struct file *file, struct super_block *sb)
|
||||||
{
|
{
|
||||||
|
if (likely(!(file->f_mode & FMODE_WRITE)))
|
||||||
|
return;
|
||||||
|
if (!S_ISREG(file_inode(file)->i_mode))
|
||||||
|
return;
|
||||||
lg_local_lock(&files_lglock);
|
lg_local_lock(&files_lglock);
|
||||||
__file_sb_list_add(file, sb);
|
__file_sb_list_add(file, sb);
|
||||||
lg_local_unlock(&files_lglock);
|
lg_local_unlock(&files_lglock);
|
||||||
|
@ -450,8 +454,6 @@ void mark_files_ro(struct super_block *sb)
|
||||||
|
|
||||||
lg_global_lock(&files_lglock);
|
lg_global_lock(&files_lglock);
|
||||||
do_file_list_for_each_entry(sb, f) {
|
do_file_list_for_each_entry(sb, f) {
|
||||||
if (!S_ISREG(file_inode(f)->i_mode))
|
|
||||||
continue;
|
|
||||||
if (!file_count(f))
|
if (!file_count(f))
|
||||||
continue;
|
continue;
|
||||||
if (!(f->f_mode & FMODE_WRITE))
|
if (!(f->f_mode & FMODE_WRITE))
|
||||||
|
|
|
@ -1525,7 +1525,7 @@ static int update_time(struct inode *inode, struct timespec *time, int flags)
|
||||||
* This function automatically handles read only file systems and media,
|
* This function automatically handles read only file systems and media,
|
||||||
* as well as the "noatime" flag and inode specific "noatime" markers.
|
* as well as the "noatime" flag and inode specific "noatime" markers.
|
||||||
*/
|
*/
|
||||||
void touch_atime(struct path *path)
|
void touch_atime(const struct path *path)
|
||||||
{
|
{
|
||||||
struct vfsmount *mnt = path->mnt;
|
struct vfsmount *mnt = path->mnt;
|
||||||
struct inode *inode = path->dentry->d_inode;
|
struct inode *inode = path->dentry->d_inode;
|
||||||
|
|
182
fs/namei.c
182
fs/namei.c
|
@ -2222,6 +2222,188 @@ user_path_parent(int dfd, const char __user *path, struct nameidata *nd,
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* umount_lookup_last - look up last component for umount
|
||||||
|
* @nd: pathwalk nameidata - currently pointing at parent directory of "last"
|
||||||
|
* @path: pointer to container for result
|
||||||
|
*
|
||||||
|
* This is a special lookup_last function just for umount. In this case, we
|
||||||
|
* need to resolve the path without doing any revalidation.
|
||||||
|
*
|
||||||
|
* The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since
|
||||||
|
* mountpoints are always pinned in the dcache, their ancestors are too. Thus,
|
||||||
|
* in almost all cases, this lookup will be served out of the dcache. The only
|
||||||
|
* cases where it won't are if nd->last refers to a symlink or the path is
|
||||||
|
* bogus and it doesn't exist.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* -error: if there was an error during lookup. This includes -ENOENT if the
|
||||||
|
* lookup found a negative dentry. The nd->path reference will also be
|
||||||
|
* put in this case.
|
||||||
|
*
|
||||||
|
* 0: if we successfully resolved nd->path and found it to not to be a
|
||||||
|
* symlink that needs to be followed. "path" will also be populated.
|
||||||
|
* The nd->path reference will also be put.
|
||||||
|
*
|
||||||
|
* 1: if we successfully resolved nd->last and found it to be a symlink
|
||||||
|
* that needs to be followed. "path" will be populated with the path
|
||||||
|
* to the link, and nd->path will *not* be put.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
umount_lookup_last(struct nameidata *nd, struct path *path)
|
||||||
|
{
|
||||||
|
int error = 0;
|
||||||
|
struct dentry *dentry;
|
||||||
|
struct dentry *dir = nd->path.dentry;
|
||||||
|
|
||||||
|
if (unlikely(nd->flags & LOOKUP_RCU)) {
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
error = -ECHILD;
|
||||||
|
goto error_check;
|
||||||
|
}
|
||||||
|
|
||||||
|
nd->flags &= ~LOOKUP_PARENT;
|
||||||
|
|
||||||
|
if (unlikely(nd->last_type != LAST_NORM)) {
|
||||||
|
error = handle_dots(nd, nd->last_type);
|
||||||
|
if (!error)
|
||||||
|
dentry = dget(nd->path.dentry);
|
||||||
|
goto error_check;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&dir->d_inode->i_mutex);
|
||||||
|
dentry = d_lookup(dir, &nd->last);
|
||||||
|
if (!dentry) {
|
||||||
|
/*
|
||||||
|
* No cached dentry. Mounted dentries are pinned in the cache,
|
||||||
|
* so that means that this dentry is probably a symlink or the
|
||||||
|
* path doesn't actually point to a mounted dentry.
|
||||||
|
*/
|
||||||
|
dentry = d_alloc(dir, &nd->last);
|
||||||
|
if (!dentry) {
|
||||||
|
error = -ENOMEM;
|
||||||
|
} else {
|
||||||
|
dentry = lookup_real(dir->d_inode, dentry, nd->flags);
|
||||||
|
if (IS_ERR(dentry))
|
||||||
|
error = PTR_ERR(dentry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&dir->d_inode->i_mutex);
|
||||||
|
|
||||||
|
error_check:
|
||||||
|
if (!error) {
|
||||||
|
if (!dentry->d_inode) {
|
||||||
|
error = -ENOENT;
|
||||||
|
dput(dentry);
|
||||||
|
} else {
|
||||||
|
path->dentry = dentry;
|
||||||
|
path->mnt = mntget(nd->path.mnt);
|
||||||
|
if (should_follow_link(dentry->d_inode,
|
||||||
|
nd->flags & LOOKUP_FOLLOW))
|
||||||
|
return 1;
|
||||||
|
follow_mount(path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
terminate_walk(nd);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* path_umountat - look up a path to be umounted
|
||||||
|
* @dfd: directory file descriptor to start walk from
|
||||||
|
* @name: full pathname to walk
|
||||||
|
* @flags: lookup flags
|
||||||
|
* @nd: pathwalk nameidata
|
||||||
|
*
|
||||||
|
* Look up the given name, but don't attempt to revalidate the last component.
|
||||||
|
* Returns 0 and "path" will be valid on success; Retuns error otherwise.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
path_umountat(int dfd, const char *name, struct path *path, unsigned int flags)
|
||||||
|
{
|
||||||
|
struct file *base = NULL;
|
||||||
|
struct nameidata nd;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = path_init(dfd, name, flags | LOOKUP_PARENT, &nd, &base);
|
||||||
|
if (unlikely(err))
|
||||||
|
return err;
|
||||||
|
|
||||||
|
current->total_link_count = 0;
|
||||||
|
err = link_path_walk(name, &nd);
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/* If we're in rcuwalk, drop out of it to handle last component */
|
||||||
|
if (nd.flags & LOOKUP_RCU) {
|
||||||
|
err = unlazy_walk(&nd, NULL);
|
||||||
|
if (err) {
|
||||||
|
terminate_walk(&nd);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = umount_lookup_last(&nd, path);
|
||||||
|
while (err > 0) {
|
||||||
|
void *cookie;
|
||||||
|
struct path link = *path;
|
||||||
|
err = may_follow_link(&link, &nd);
|
||||||
|
if (unlikely(err))
|
||||||
|
break;
|
||||||
|
nd.flags |= LOOKUP_PARENT;
|
||||||
|
err = follow_link(&link, &nd, &cookie);
|
||||||
|
if (err)
|
||||||
|
break;
|
||||||
|
err = umount_lookup_last(&nd, path);
|
||||||
|
put_link(&nd, &link, cookie);
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
if (base)
|
||||||
|
fput(base);
|
||||||
|
|
||||||
|
if (nd.root.mnt && !(nd.flags & LOOKUP_ROOT))
|
||||||
|
path_put(&nd.root);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* user_path_umountat - lookup a path from userland in order to umount it
|
||||||
|
* @dfd: directory file descriptor
|
||||||
|
* @name: pathname from userland
|
||||||
|
* @flags: lookup flags
|
||||||
|
* @path: pointer to container to hold result
|
||||||
|
*
|
||||||
|
* A umount is a special case for path walking. We're not actually interested
|
||||||
|
* in the inode in this situation, and ESTALE errors can be a problem. We
|
||||||
|
* simply want track down the dentry and vfsmount attached at the mountpoint
|
||||||
|
* and avoid revalidating the last component.
|
||||||
|
*
|
||||||
|
* Returns 0 and populates "path" on success.
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
user_path_umountat(int dfd, const char __user *name, unsigned int flags,
|
||||||
|
struct path *path)
|
||||||
|
{
|
||||||
|
struct filename *s = getname(name);
|
||||||
|
int error;
|
||||||
|
|
||||||
|
if (IS_ERR(s))
|
||||||
|
return PTR_ERR(s);
|
||||||
|
|
||||||
|
error = path_umountat(dfd, s->name, path, flags | LOOKUP_RCU);
|
||||||
|
if (unlikely(error == -ECHILD))
|
||||||
|
error = path_umountat(dfd, s->name, path, flags);
|
||||||
|
if (unlikely(error == -ESTALE))
|
||||||
|
error = path_umountat(dfd, s->name, path, flags | LOOKUP_REVAL);
|
||||||
|
|
||||||
|
if (likely(!error))
|
||||||
|
audit_inode(s, path->dentry, 0);
|
||||||
|
|
||||||
|
putname(s);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It's inline, so penalty for filesystems that don't use sticky bit is
|
* It's inline, so penalty for filesystems that don't use sticky bit is
|
||||||
* minimal.
|
* minimal.
|
||||||
|
|
|
@ -1318,7 +1318,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
|
||||||
if (!(flags & UMOUNT_NOFOLLOW))
|
if (!(flags & UMOUNT_NOFOLLOW))
|
||||||
lookup_flags |= LOOKUP_FOLLOW;
|
lookup_flags |= LOOKUP_FOLLOW;
|
||||||
|
|
||||||
retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
|
retval = user_path_umountat(AT_FDCWD, name, lookup_flags, &path);
|
||||||
if (retval)
|
if (retval)
|
||||||
goto out;
|
goto out;
|
||||||
mnt = real_mount(path.mnt);
|
mnt = real_mount(path.mnt);
|
||||||
|
|
|
@ -1816,10 +1816,7 @@ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
|
||||||
static __be32 nfsd4_encode_path(const struct path *root,
|
static __be32 nfsd4_encode_path(const struct path *root,
|
||||||
const struct path *path, __be32 **pp, int *buflen)
|
const struct path *path, __be32 **pp, int *buflen)
|
||||||
{
|
{
|
||||||
struct path cur = {
|
struct path cur = *path;
|
||||||
.mnt = path->mnt,
|
|
||||||
.dentry = path->dentry,
|
|
||||||
};
|
|
||||||
__be32 *p = *pp;
|
__be32 *p = *pp;
|
||||||
struct dentry **components = NULL;
|
struct dentry **components = NULL;
|
||||||
unsigned int ncomponents = 0;
|
unsigned int ncomponents = 0;
|
||||||
|
@ -1859,14 +1856,19 @@ static __be32 nfsd4_encode_path(const struct path *root,
|
||||||
|
|
||||||
while (ncomponents) {
|
while (ncomponents) {
|
||||||
struct dentry *dentry = components[ncomponents - 1];
|
struct dentry *dentry = components[ncomponents - 1];
|
||||||
unsigned int len = dentry->d_name.len;
|
unsigned int len;
|
||||||
|
|
||||||
|
spin_lock(&dentry->d_lock);
|
||||||
|
len = dentry->d_name.len;
|
||||||
*buflen -= 4 + (XDR_QUADLEN(len) << 2);
|
*buflen -= 4 + (XDR_QUADLEN(len) << 2);
|
||||||
if (*buflen < 0)
|
if (*buflen < 0) {
|
||||||
|
spin_unlock(&dentry->d_lock);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
}
|
||||||
WRITE32(len);
|
WRITE32(len);
|
||||||
WRITEMEM(dentry->d_name.name, len);
|
WRITEMEM(dentry->d_name.name, len);
|
||||||
dprintk("/%s", dentry->d_name.name);
|
dprintk("/%s", dentry->d_name.name);
|
||||||
|
spin_unlock(&dentry->d_lock);
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
ncomponents--;
|
ncomponents--;
|
||||||
}
|
}
|
||||||
|
|
|
@ -994,23 +994,16 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nilfs_tree_was_touched(struct dentry *root_dentry)
|
|
||||||
{
|
|
||||||
return d_count(root_dentry) > 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nilfs_try_to_shrink_tree() - try to shrink dentries of a checkpoint
|
* nilfs_tree_is_busy() - try to shrink dentries of a checkpoint
|
||||||
* @root_dentry: root dentry of the tree to be shrunk
|
* @root_dentry: root dentry of the tree to be shrunk
|
||||||
*
|
*
|
||||||
* This function returns true if the tree was in-use.
|
* This function returns true if the tree was in-use.
|
||||||
*/
|
*/
|
||||||
static int nilfs_try_to_shrink_tree(struct dentry *root_dentry)
|
static bool nilfs_tree_is_busy(struct dentry *root_dentry)
|
||||||
{
|
{
|
||||||
if (have_submounts(root_dentry))
|
|
||||||
return true;
|
|
||||||
shrink_dcache_parent(root_dentry);
|
shrink_dcache_parent(root_dentry);
|
||||||
return nilfs_tree_was_touched(root_dentry);
|
return d_count(root_dentry) > 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
|
int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
|
||||||
|
@ -1034,8 +1027,7 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
|
||||||
if (inode) {
|
if (inode) {
|
||||||
dentry = d_find_alias(inode);
|
dentry = d_find_alias(inode);
|
||||||
if (dentry) {
|
if (dentry) {
|
||||||
if (nilfs_tree_was_touched(dentry))
|
ret = nilfs_tree_is_busy(dentry);
|
||||||
ret = nilfs_try_to_shrink_tree(dentry);
|
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
}
|
}
|
||||||
iput(inode);
|
iput(inode);
|
||||||
|
@ -1331,11 +1323,8 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
|
||||||
|
|
||||||
s->s_flags |= MS_ACTIVE;
|
s->s_flags |= MS_ACTIVE;
|
||||||
} else if (!sd.cno) {
|
} else if (!sd.cno) {
|
||||||
int busy = false;
|
if (nilfs_tree_is_busy(s->s_root)) {
|
||||||
|
if ((flags ^ s->s_flags) & MS_RDONLY) {
|
||||||
if (nilfs_tree_was_touched(s->s_root)) {
|
|
||||||
busy = nilfs_try_to_shrink_tree(s->s_root);
|
|
||||||
if (busy && (flags ^ s->s_flags) & MS_RDONLY) {
|
|
||||||
printk(KERN_ERR "NILFS: the device already "
|
printk(KERN_ERR "NILFS: the device already "
|
||||||
"has a %s mount.\n",
|
"has a %s mount.\n",
|
||||||
(s->s_flags & MS_RDONLY) ?
|
(s->s_flags & MS_RDONLY) ?
|
||||||
|
@ -1343,8 +1332,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
goto failed_super;
|
goto failed_super;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
if (!busy) {
|
|
||||||
/*
|
/*
|
||||||
* Try remount to setup mount states if the current
|
* Try remount to setup mount states if the current
|
||||||
* tree is not mounted and only snapshots use this sb.
|
* tree is not mounted and only snapshots use this sb.
|
||||||
|
|
|
@ -565,9 +565,7 @@ bail:
|
||||||
static void ocfs2_dio_end_io(struct kiocb *iocb,
|
static void ocfs2_dio_end_io(struct kiocb *iocb,
|
||||||
loff_t offset,
|
loff_t offset,
|
||||||
ssize_t bytes,
|
ssize_t bytes,
|
||||||
void *private,
|
void *private)
|
||||||
int ret,
|
|
||||||
bool is_async)
|
|
||||||
{
|
{
|
||||||
struct inode *inode = file_inode(iocb->ki_filp);
|
struct inode *inode = file_inode(iocb->ki_filp);
|
||||||
int level;
|
int level;
|
||||||
|
@ -592,10 +590,6 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
|
||||||
|
|
||||||
level = ocfs2_iocb_rw_locked_level(iocb);
|
level = ocfs2_iocb_rw_locked_level(iocb);
|
||||||
ocfs2_rw_unlock(inode, level);
|
ocfs2_rw_unlock(inode, level);
|
||||||
|
|
||||||
inode_dio_done(inode);
|
|
||||||
if (is_async)
|
|
||||||
aio_complete(iocb, ret, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
11
fs/open.c
11
fs/open.c
|
@ -485,14 +485,13 @@ out_unlock:
|
||||||
|
|
||||||
SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
|
SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
|
||||||
{
|
{
|
||||||
struct file * file;
|
struct fd f = fdget(fd);
|
||||||
int err = -EBADF;
|
int err = -EBADF;
|
||||||
|
|
||||||
file = fget(fd);
|
if (f.file) {
|
||||||
if (file) {
|
audit_inode(NULL, f.file->f_path.dentry, 0);
|
||||||
audit_inode(NULL, file->f_path.dentry, 0);
|
err = chmod_common(&f.file->f_path, mode);
|
||||||
err = chmod_common(&file->f_path, mode);
|
fdput(f);
|
||||||
fput(file);
|
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
18
fs/super.c
18
fs/super.c
|
@ -152,15 +152,9 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
|
||||||
static const struct super_operations default_op;
|
static const struct super_operations default_op;
|
||||||
|
|
||||||
if (s) {
|
if (s) {
|
||||||
if (security_sb_alloc(s)) {
|
if (security_sb_alloc(s))
|
||||||
/*
|
goto out_free_sb;
|
||||||
* We cannot call security_sb_free() without
|
|
||||||
* security_sb_alloc() succeeding. So bail out manually
|
|
||||||
*/
|
|
||||||
kfree(s);
|
|
||||||
s = NULL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
s->s_files = alloc_percpu(struct list_head);
|
s->s_files = alloc_percpu(struct list_head);
|
||||||
if (!s->s_files)
|
if (!s->s_files)
|
||||||
|
@ -228,6 +222,7 @@ err_out:
|
||||||
free_percpu(s->s_files);
|
free_percpu(s->s_files);
|
||||||
#endif
|
#endif
|
||||||
destroy_sb_writers(s);
|
destroy_sb_writers(s);
|
||||||
|
out_free_sb:
|
||||||
kfree(s);
|
kfree(s);
|
||||||
s = NULL;
|
s = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -414,6 +409,11 @@ void generic_shutdown_super(struct super_block *sb)
|
||||||
|
|
||||||
evict_inodes(sb);
|
evict_inodes(sb);
|
||||||
|
|
||||||
|
if (sb->s_dio_done_wq) {
|
||||||
|
destroy_workqueue(sb->s_dio_done_wq);
|
||||||
|
sb->s_dio_done_wq = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (sop->put_super)
|
if (sop->put_super)
|
||||||
sop->put_super(sb);
|
sop->put_super(sb);
|
||||||
|
|
||||||
|
|
|
@ -86,14 +86,6 @@ xfs_destroy_ioend(
|
||||||
bh->b_end_io(bh, !ioend->io_error);
|
bh->b_end_io(bh, !ioend->io_error);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ioend->io_iocb) {
|
|
||||||
inode_dio_done(ioend->io_inode);
|
|
||||||
if (ioend->io_isasync) {
|
|
||||||
aio_complete(ioend->io_iocb, ioend->io_error ?
|
|
||||||
ioend->io_error : ioend->io_result, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mempool_free(ioend, xfs_ioend_pool);
|
mempool_free(ioend, xfs_ioend_pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -281,7 +273,6 @@ xfs_alloc_ioend(
|
||||||
* all the I/O from calling the completion routine too early.
|
* all the I/O from calling the completion routine too early.
|
||||||
*/
|
*/
|
||||||
atomic_set(&ioend->io_remaining, 1);
|
atomic_set(&ioend->io_remaining, 1);
|
||||||
ioend->io_isasync = 0;
|
|
||||||
ioend->io_isdirect = 0;
|
ioend->io_isdirect = 0;
|
||||||
ioend->io_error = 0;
|
ioend->io_error = 0;
|
||||||
ioend->io_list = NULL;
|
ioend->io_list = NULL;
|
||||||
|
@ -291,8 +282,6 @@ xfs_alloc_ioend(
|
||||||
ioend->io_buffer_tail = NULL;
|
ioend->io_buffer_tail = NULL;
|
||||||
ioend->io_offset = 0;
|
ioend->io_offset = 0;
|
||||||
ioend->io_size = 0;
|
ioend->io_size = 0;
|
||||||
ioend->io_iocb = NULL;
|
|
||||||
ioend->io_result = 0;
|
|
||||||
ioend->io_append_trans = NULL;
|
ioend->io_append_trans = NULL;
|
||||||
|
|
||||||
INIT_WORK(&ioend->io_work, xfs_end_io);
|
INIT_WORK(&ioend->io_work, xfs_end_io);
|
||||||
|
@ -1292,8 +1281,10 @@ __xfs_get_blocks(
|
||||||
if (create || !ISUNWRITTEN(&imap))
|
if (create || !ISUNWRITTEN(&imap))
|
||||||
xfs_map_buffer(inode, bh_result, &imap, offset);
|
xfs_map_buffer(inode, bh_result, &imap, offset);
|
||||||
if (create && ISUNWRITTEN(&imap)) {
|
if (create && ISUNWRITTEN(&imap)) {
|
||||||
if (direct)
|
if (direct) {
|
||||||
bh_result->b_private = inode;
|
bh_result->b_private = inode;
|
||||||
|
set_buffer_defer_completion(bh_result);
|
||||||
|
}
|
||||||
set_buffer_unwritten(bh_result);
|
set_buffer_unwritten(bh_result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1390,9 +1381,7 @@ xfs_end_io_direct_write(
|
||||||
struct kiocb *iocb,
|
struct kiocb *iocb,
|
||||||
loff_t offset,
|
loff_t offset,
|
||||||
ssize_t size,
|
ssize_t size,
|
||||||
void *private,
|
void *private)
|
||||||
int ret,
|
|
||||||
bool is_async)
|
|
||||||
{
|
{
|
||||||
struct xfs_ioend *ioend = iocb->private;
|
struct xfs_ioend *ioend = iocb->private;
|
||||||
|
|
||||||
|
@ -1414,17 +1403,10 @@ xfs_end_io_direct_write(
|
||||||
|
|
||||||
ioend->io_offset = offset;
|
ioend->io_offset = offset;
|
||||||
ioend->io_size = size;
|
ioend->io_size = size;
|
||||||
ioend->io_iocb = iocb;
|
|
||||||
ioend->io_result = ret;
|
|
||||||
if (private && size > 0)
|
if (private && size > 0)
|
||||||
ioend->io_type = XFS_IO_UNWRITTEN;
|
ioend->io_type = XFS_IO_UNWRITTEN;
|
||||||
|
|
||||||
if (is_async) {
|
|
||||||
ioend->io_isasync = 1;
|
|
||||||
xfs_finish_ioend(ioend);
|
|
||||||
} else {
|
|
||||||
xfs_finish_ioend_sync(ioend);
|
xfs_finish_ioend_sync(ioend);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC ssize_t
|
STATIC ssize_t
|
||||||
|
|
|
@ -45,7 +45,6 @@ typedef struct xfs_ioend {
|
||||||
unsigned int io_type; /* delalloc / unwritten */
|
unsigned int io_type; /* delalloc / unwritten */
|
||||||
int io_error; /* I/O error code */
|
int io_error; /* I/O error code */
|
||||||
atomic_t io_remaining; /* hold count */
|
atomic_t io_remaining; /* hold count */
|
||||||
unsigned int io_isasync : 1; /* needs aio_complete */
|
|
||||||
unsigned int io_isdirect : 1;/* direct I/O */
|
unsigned int io_isdirect : 1;/* direct I/O */
|
||||||
struct inode *io_inode; /* file being written to */
|
struct inode *io_inode; /* file being written to */
|
||||||
struct buffer_head *io_buffer_head;/* buffer linked list head */
|
struct buffer_head *io_buffer_head;/* buffer linked list head */
|
||||||
|
@ -54,8 +53,6 @@ typedef struct xfs_ioend {
|
||||||
xfs_off_t io_offset; /* offset in the file */
|
xfs_off_t io_offset; /* offset in the file */
|
||||||
struct work_struct io_work; /* xfsdatad work queue */
|
struct work_struct io_work; /* xfsdatad work queue */
|
||||||
struct xfs_trans *io_append_trans;/* xact. for size update */
|
struct xfs_trans *io_append_trans;/* xact. for size update */
|
||||||
struct kiocb *io_iocb;
|
|
||||||
int io_result;
|
|
||||||
} xfs_ioend_t;
|
} xfs_ioend_t;
|
||||||
|
|
||||||
extern const struct address_space_operations xfs_address_space_operations;
|
extern const struct address_space_operations xfs_address_space_operations;
|
||||||
|
|
|
@ -36,6 +36,7 @@ enum bh_state_bits {
|
||||||
BH_Quiet, /* Buffer Error Prinks to be quiet */
|
BH_Quiet, /* Buffer Error Prinks to be quiet */
|
||||||
BH_Meta, /* Buffer contains metadata */
|
BH_Meta, /* Buffer contains metadata */
|
||||||
BH_Prio, /* Buffer should be submitted with REQ_PRIO */
|
BH_Prio, /* Buffer should be submitted with REQ_PRIO */
|
||||||
|
BH_Defer_Completion, /* Defer AIO completion to workqueue */
|
||||||
|
|
||||||
BH_PrivateStart,/* not a state bit, but the first bit available
|
BH_PrivateStart,/* not a state bit, but the first bit available
|
||||||
* for private allocation by other entities
|
* for private allocation by other entities
|
||||||
|
@ -128,6 +129,7 @@ BUFFER_FNS(Write_EIO, write_io_error)
|
||||||
BUFFER_FNS(Unwritten, unwritten)
|
BUFFER_FNS(Unwritten, unwritten)
|
||||||
BUFFER_FNS(Meta, meta)
|
BUFFER_FNS(Meta, meta)
|
||||||
BUFFER_FNS(Prio, prio)
|
BUFFER_FNS(Prio, prio)
|
||||||
|
BUFFER_FNS(Defer_Completion, defer_completion)
|
||||||
|
|
||||||
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
|
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
|
||||||
|
|
||||||
|
|
|
@ -46,6 +46,7 @@ struct vfsmount;
|
||||||
struct cred;
|
struct cred;
|
||||||
struct swap_info_struct;
|
struct swap_info_struct;
|
||||||
struct seq_file;
|
struct seq_file;
|
||||||
|
struct workqueue_struct;
|
||||||
|
|
||||||
extern void __init inode_init(void);
|
extern void __init inode_init(void);
|
||||||
extern void __init inode_init_early(void);
|
extern void __init inode_init_early(void);
|
||||||
|
@ -63,8 +64,7 @@ struct buffer_head;
|
||||||
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
|
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
|
||||||
struct buffer_head *bh_result, int create);
|
struct buffer_head *bh_result, int create);
|
||||||
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||||
ssize_t bytes, void *private, int ret,
|
ssize_t bytes, void *private);
|
||||||
bool is_async);
|
|
||||||
|
|
||||||
#define MAY_EXEC 0x00000001
|
#define MAY_EXEC 0x00000001
|
||||||
#define MAY_WRITE 0x00000002
|
#define MAY_WRITE 0x00000002
|
||||||
|
@ -1328,6 +1328,9 @@ struct super_block {
|
||||||
|
|
||||||
/* Being remounted read-only */
|
/* Being remounted read-only */
|
||||||
int s_readonly_remount;
|
int s_readonly_remount;
|
||||||
|
|
||||||
|
/* AIO completions deferred from interrupt context */
|
||||||
|
struct workqueue_struct *s_dio_done_wq;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* superblock cache pruning functions */
|
/* superblock cache pruning functions */
|
||||||
|
@ -1804,7 +1807,7 @@ enum file_time_flags {
|
||||||
S_VERSION = 8,
|
S_VERSION = 8,
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void touch_atime(struct path *);
|
extern void touch_atime(const struct path *);
|
||||||
static inline void file_accessed(struct file *file)
|
static inline void file_accessed(struct file *file)
|
||||||
{
|
{
|
||||||
if (!(file->f_flags & O_NOATIME))
|
if (!(file->f_flags & O_NOATIME))
|
||||||
|
|
|
@ -58,6 +58,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
|
||||||
|
|
||||||
extern int user_path_at(int, const char __user *, unsigned, struct path *);
|
extern int user_path_at(int, const char __user *, unsigned, struct path *);
|
||||||
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
|
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
|
||||||
|
extern int user_path_umountat(int, const char __user *, unsigned int, struct path *);
|
||||||
|
|
||||||
#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
|
#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
|
||||||
#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
|
#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
|
||||||
|
|
|
@ -42,7 +42,6 @@
|
||||||
#define IBS_FETCH_CODE 13
|
#define IBS_FETCH_CODE 13
|
||||||
#define IBS_OP_CODE 14
|
#define IBS_OP_CODE 14
|
||||||
|
|
||||||
struct super_block;
|
|
||||||
struct dentry;
|
struct dentry;
|
||||||
struct file_operations;
|
struct file_operations;
|
||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
|
@ -51,7 +50,7 @@ struct pt_regs;
|
||||||
struct oprofile_operations {
|
struct oprofile_operations {
|
||||||
/* create any necessary configuration files in the oprofile fs.
|
/* create any necessary configuration files in the oprofile fs.
|
||||||
* Optional. */
|
* Optional. */
|
||||||
int (*create_files)(struct super_block * sb, struct dentry * root);
|
int (*create_files)(struct dentry * root);
|
||||||
/* Do any necessary interrupt setup. Optional. */
|
/* Do any necessary interrupt setup. Optional. */
|
||||||
int (*setup)(void);
|
int (*setup)(void);
|
||||||
/* Do any necessary interrupt shutdown. Optional. */
|
/* Do any necessary interrupt shutdown. Optional. */
|
||||||
|
@ -125,27 +124,26 @@ void oprofile_add_trace(unsigned long eip);
|
||||||
* Create a file of the given name as a child of the given root, with
|
* Create a file of the given name as a child of the given root, with
|
||||||
* the specified file operations.
|
* the specified file operations.
|
||||||
*/
|
*/
|
||||||
int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_file(struct dentry * root,
|
||||||
char const * name, const struct file_operations * fops);
|
char const * name, const struct file_operations * fops);
|
||||||
|
|
||||||
int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_file_perm(struct dentry * root,
|
||||||
char const * name, const struct file_operations * fops, int perm);
|
char const * name, const struct file_operations * fops, int perm);
|
||||||
|
|
||||||
/** Create a file for read/write access to an unsigned long. */
|
/** Create a file for read/write access to an unsigned long. */
|
||||||
int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_ulong(struct dentry * root,
|
||||||
char const * name, ulong * val);
|
char const * name, ulong * val);
|
||||||
|
|
||||||
/** Create a file for read-only access to an unsigned long. */
|
/** Create a file for read-only access to an unsigned long. */
|
||||||
int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_ro_ulong(struct dentry * root,
|
||||||
char const * name, ulong * val);
|
char const * name, ulong * val);
|
||||||
|
|
||||||
/** Create a file for read-only access to an atomic_t. */
|
/** Create a file for read-only access to an atomic_t. */
|
||||||
int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_ro_atomic(struct dentry * root,
|
||||||
char const * name, atomic_t * val);
|
char const * name, atomic_t * val);
|
||||||
|
|
||||||
/** create a directory */
|
/** create a directory */
|
||||||
struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
|
struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
|
||||||
char const * name);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write the given asciz string to the given user buffer @buf, updating *offset
|
* Write the given asciz string to the given user buffer @buf, updating *offset
|
||||||
|
|
|
@ -2549,21 +2549,20 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
|
||||||
/* Sets info->hdr and info->len. */
|
/* Sets info->hdr and info->len. */
|
||||||
static int copy_module_from_fd(int fd, struct load_info *info)
|
static int copy_module_from_fd(int fd, struct load_info *info)
|
||||||
{
|
{
|
||||||
struct file *file;
|
struct fd f = fdget(fd);
|
||||||
int err;
|
int err;
|
||||||
struct kstat stat;
|
struct kstat stat;
|
||||||
loff_t pos;
|
loff_t pos;
|
||||||
ssize_t bytes = 0;
|
ssize_t bytes = 0;
|
||||||
|
|
||||||
file = fget(fd);
|
if (!f.file)
|
||||||
if (!file)
|
|
||||||
return -ENOEXEC;
|
return -ENOEXEC;
|
||||||
|
|
||||||
err = security_kernel_module_from_file(file);
|
err = security_kernel_module_from_file(f.file);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err = vfs_getattr(&file->f_path, &stat);
|
err = vfs_getattr(&f.file->f_path, &stat);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -2586,7 +2585,7 @@ static int copy_module_from_fd(int fd, struct load_info *info)
|
||||||
|
|
||||||
pos = 0;
|
pos = 0;
|
||||||
while (pos < stat.size) {
|
while (pos < stat.size) {
|
||||||
bytes = kernel_read(file, pos, (char *)(info->hdr) + pos,
|
bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
|
||||||
stat.size - pos);
|
stat.size - pos);
|
||||||
if (bytes < 0) {
|
if (bytes < 0) {
|
||||||
vfree(info->hdr);
|
vfree(info->hdr);
|
||||||
|
@ -2600,7 +2599,7 @@ static int copy_module_from_fd(int fd, struct load_info *info)
|
||||||
info->len = pos;
|
info->len = pos;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
fput(file);
|
fdput(f);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <linux/math64.h>
|
#include <linux/math64.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
|
#include <linux/dcache.h>
|
||||||
#include <net/addrconf.h>
|
#include <net/addrconf.h>
|
||||||
|
|
||||||
#include <asm/page.h> /* for PAGE_SIZE */
|
#include <asm/page.h> /* for PAGE_SIZE */
|
||||||
|
@ -532,6 +533,81 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void widen(char *buf, char *end, unsigned len, unsigned spaces)
|
||||||
|
{
|
||||||
|
size_t size;
|
||||||
|
if (buf >= end) /* nowhere to put anything */
|
||||||
|
return;
|
||||||
|
size = end - buf;
|
||||||
|
if (size <= spaces) {
|
||||||
|
memset(buf, ' ', size);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (len) {
|
||||||
|
if (len > size - spaces)
|
||||||
|
len = size - spaces;
|
||||||
|
memmove(buf + spaces, buf, len);
|
||||||
|
}
|
||||||
|
memset(buf, ' ', spaces);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline_for_stack
|
||||||
|
char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
|
||||||
|
const char *fmt)
|
||||||
|
{
|
||||||
|
const char *array[4], *s;
|
||||||
|
const struct dentry *p;
|
||||||
|
int depth;
|
||||||
|
int i, n;
|
||||||
|
|
||||||
|
switch (fmt[1]) {
|
||||||
|
case '2': case '3': case '4':
|
||||||
|
depth = fmt[1] - '0';
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
depth = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
for (i = 0; i < depth; i++, d = p) {
|
||||||
|
p = ACCESS_ONCE(d->d_parent);
|
||||||
|
array[i] = ACCESS_ONCE(d->d_name.name);
|
||||||
|
if (p == d) {
|
||||||
|
if (i)
|
||||||
|
array[i] = "";
|
||||||
|
i++;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s = array[--i];
|
||||||
|
for (n = 0; n != spec.precision; n++, buf++) {
|
||||||
|
char c = *s++;
|
||||||
|
if (!c) {
|
||||||
|
if (!i)
|
||||||
|
break;
|
||||||
|
c = '/';
|
||||||
|
s = array[--i];
|
||||||
|
}
|
||||||
|
if (buf < end)
|
||||||
|
*buf = c;
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
if (n < spec.field_width) {
|
||||||
|
/* we want to pad the sucker */
|
||||||
|
unsigned spaces = spec.field_width - n;
|
||||||
|
if (!(spec.flags & LEFT)) {
|
||||||
|
widen(buf - n, end, n, spaces);
|
||||||
|
return buf + spaces;
|
||||||
|
}
|
||||||
|
while (spaces--) {
|
||||||
|
if (buf < end)
|
||||||
|
*buf = ' ';
|
||||||
|
++buf;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
static noinline_for_stack
|
static noinline_for_stack
|
||||||
char *symbol_string(char *buf, char *end, void *ptr,
|
char *symbol_string(char *buf, char *end, void *ptr,
|
||||||
struct printf_spec spec, const char *fmt)
|
struct printf_spec spec, const char *fmt)
|
||||||
|
@ -1253,6 +1329,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
|
||||||
spec.base = 16;
|
spec.base = 16;
|
||||||
return number(buf, end,
|
return number(buf, end,
|
||||||
(unsigned long long) *((phys_addr_t *)ptr), spec);
|
(unsigned long long) *((phys_addr_t *)ptr), spec);
|
||||||
|
case 'd':
|
||||||
|
return dentry_name(buf, end, ptr, spec, fmt);
|
||||||
|
case 'D':
|
||||||
|
return dentry_name(buf, end,
|
||||||
|
((const struct file *)ptr)->f_path.dentry,
|
||||||
|
spec, fmt);
|
||||||
}
|
}
|
||||||
spec.flags |= SMALL;
|
spec.flags |= SMALL;
|
||||||
if (spec.field_width == -1) {
|
if (spec.field_width == -1) {
|
||||||
|
|
|
@ -2550,7 +2550,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||||
mutex_unlock(&inode->i_mutex);
|
mutex_unlock(&inode->i_mutex);
|
||||||
|
|
||||||
if (ret > 0 || ret == -EIOCBQUEUED) {
|
if (ret > 0) {
|
||||||
ssize_t err;
|
ssize_t err;
|
||||||
|
|
||||||
err = generic_write_sync(file, pos, ret);
|
err = generic_write_sync(file, pos, ret);
|
||||||
|
|
|
@ -2615,13 +2615,15 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
* tmpfs instance, limiting inodes to one per page of lowmem;
|
* tmpfs instance, limiting inodes to one per page of lowmem;
|
||||||
* but the internal instance is left unlimited.
|
* but the internal instance is left unlimited.
|
||||||
*/
|
*/
|
||||||
if (!(sb->s_flags & MS_NOUSER)) {
|
if (!(sb->s_flags & MS_KERNMOUNT)) {
|
||||||
sbinfo->max_blocks = shmem_default_max_blocks();
|
sbinfo->max_blocks = shmem_default_max_blocks();
|
||||||
sbinfo->max_inodes = shmem_default_max_inodes();
|
sbinfo->max_inodes = shmem_default_max_inodes();
|
||||||
if (shmem_parse_options(data, sbinfo, false)) {
|
if (shmem_parse_options(data, sbinfo, false)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
sb->s_flags |= MS_NOUSER;
|
||||||
}
|
}
|
||||||
sb->s_export_op = &shmem_export_ops;
|
sb->s_export_op = &shmem_export_ops;
|
||||||
sb->s_flags |= MS_NOSEC;
|
sb->s_flags |= MS_NOSEC;
|
||||||
|
@ -2831,8 +2833,7 @@ int __init shmem_init(void)
|
||||||
goto out2;
|
goto out2;
|
||||||
}
|
}
|
||||||
|
|
||||||
shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
|
shm_mnt = kern_mount(&shmem_fs_type);
|
||||||
shmem_fs_type.name, NULL);
|
|
||||||
if (IS_ERR(shm_mnt)) {
|
if (IS_ERR(shm_mnt)) {
|
||||||
error = PTR_ERR(shm_mnt);
|
error = PTR_ERR(shm_mnt);
|
||||||
printk(KERN_ERR "Could not kern_mount tmpfs\n");
|
printk(KERN_ERR "Could not kern_mount tmpfs\n");
|
||||||
|
|
|
@ -291,7 +291,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
|
||||||
{
|
{
|
||||||
struct kvm_irq_routing_table *irq_rt;
|
struct kvm_irq_routing_table *irq_rt;
|
||||||
struct _irqfd *irqfd, *tmp;
|
struct _irqfd *irqfd, *tmp;
|
||||||
struct file *file = NULL;
|
struct fd f;
|
||||||
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
|
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned int events;
|
unsigned int events;
|
||||||
|
@ -306,13 +306,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
|
||||||
INIT_WORK(&irqfd->inject, irqfd_inject);
|
INIT_WORK(&irqfd->inject, irqfd_inject);
|
||||||
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
|
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
|
||||||
|
|
||||||
file = eventfd_fget(args->fd);
|
f = fdget(args->fd);
|
||||||
if (IS_ERR(file)) {
|
if (!f.file) {
|
||||||
ret = PTR_ERR(file);
|
ret = -EBADF;
|
||||||
goto fail;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
eventfd = eventfd_ctx_fileget(file);
|
eventfd = eventfd_ctx_fileget(f.file);
|
||||||
if (IS_ERR(eventfd)) {
|
if (IS_ERR(eventfd)) {
|
||||||
ret = PTR_ERR(eventfd);
|
ret = PTR_ERR(eventfd);
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -391,7 +391,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
|
||||||
lockdep_is_held(&kvm->irqfds.lock));
|
lockdep_is_held(&kvm->irqfds.lock));
|
||||||
irqfd_update(kvm, irqfd, irq_rt);
|
irqfd_update(kvm, irqfd, irq_rt);
|
||||||
|
|
||||||
events = file->f_op->poll(file, &irqfd->pt);
|
events = f.file->f_op->poll(f.file, &irqfd->pt);
|
||||||
|
|
||||||
list_add_tail(&irqfd->list, &kvm->irqfds.items);
|
list_add_tail(&irqfd->list, &kvm->irqfds.items);
|
||||||
|
|
||||||
|
@ -408,7 +408,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
|
||||||
* do not drop the file until the irqfd is fully initialized, otherwise
|
* do not drop the file until the irqfd is fully initialized, otherwise
|
||||||
* we might race against the POLLHUP
|
* we might race against the POLLHUP
|
||||||
*/
|
*/
|
||||||
fput(file);
|
fdput(f);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -422,9 +422,9 @@ fail:
|
||||||
if (eventfd && !IS_ERR(eventfd))
|
if (eventfd && !IS_ERR(eventfd))
|
||||||
eventfd_ctx_put(eventfd);
|
eventfd_ctx_put(eventfd);
|
||||||
|
|
||||||
if (!IS_ERR(file))
|
fdput(f);
|
||||||
fput(file);
|
|
||||||
|
|
||||||
|
out:
|
||||||
kfree(irqfd);
|
kfree(irqfd);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue