summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/cxl/features.h5
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/buffer_head.h9
-rw-r--r--include/linux/ceph/osd_client.h6
-rw-r--r--include/linux/cpufreq.h83
-rw-r--r--include/linux/dma-mapping.h12
-rw-r--r--include/linux/file_ref.h19
-rw-r--r--include/linux/fsnotify_backend.h15
-rw-r--r--include/linux/fwnode.h5
-rw-r--r--include/linux/iommu.h8
-rw-r--r--include/linux/module.h2
-rw-r--r--include/sound/soc_sdw_utils.h1
-rw-r--r--include/sound/ump_convert.h2
-rw-r--r--include/uapi/linux/landlock.h87
-rw-r--r--include/ufs/ufs_quirks.h6
15 files changed, 157 insertions, 108 deletions
diff --git a/include/cxl/features.h b/include/cxl/features.h
index a3bb34694c06..5f7f842765a5 100644
--- a/include/cxl/features.h
+++ b/include/cxl/features.h
@@ -66,7 +66,7 @@ struct cxl_memdev;
#ifdef CONFIG_CXL_FEATURES
inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds);
int devm_cxl_setup_features(struct cxl_dev_state *cxlds);
-int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd);
+int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd);
#else
static inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
{
@@ -78,7 +78,8 @@ static inline int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
return -EOPNOTSUPP;
}
-static inline int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd)
+static inline int devm_cxl_setup_fwctl(struct device *host,
+ struct cxl_memdev *cxlmd)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2b8875a82ff8..9a1f0ee40b56 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1637,6 +1637,7 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
+int bdev_validate_blocksize(struct block_device *bdev, int block_size);
int set_blocksize(struct file *file, int size);
int lookup_bdev(const char *pathname, dev_t *dev);
@@ -1693,10 +1694,6 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
-/* just for blk-cgroup, don't use elsewhere */
-struct block_device *blkdev_get_no_open(dev_t dev);
-void blkdev_put_no_open(struct block_device *bdev);
-
struct block_device *I_BDEV(struct inode *inode);
struct block_device *file_bdev(struct file *bdev_file);
bool disk_live(struct gendisk *disk);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index f0a4ad7839b6..0029ff880e27 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -34,6 +34,7 @@ enum bh_state_bits {
BH_Meta, /* Buffer contains metadata */
BH_Prio, /* Buffer should be submitted with REQ_PRIO */
BH_Defer_Completion, /* Defer AIO completion to workqueue */
+ BH_Migrate, /* Buffer is being migrated (norefs) */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
@@ -222,6 +223,8 @@ void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
+struct buffer_head *__find_get_block_nonatomic(struct block_device *bdev,
+ sector_t block, unsigned size);
struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
unsigned size, gfp_t gfp);
void __brelse(struct buffer_head *);
@@ -397,6 +400,12 @@ sb_find_get_block(struct super_block *sb, sector_t block)
return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
}
+static inline struct buffer_head *
+sb_find_get_block_nonatomic(struct super_block *sb, sector_t block)
+{
+ return __find_get_block_nonatomic(sb->s_bdev, block, sb->s_blocksize);
+}
+
static inline void
map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
{
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index d55b30057a45..50b14a5661c7 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -490,9 +490,6 @@ extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
- unsigned int which,
- struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
unsigned int which,
@@ -509,9 +506,6 @@ void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
unsigned int which, struct iov_iter *iter);
-extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
- unsigned int which,
- struct ceph_pagelist *pagelist);
extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
unsigned int which,
struct page **pages, u64 length,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 400fee6427a5..7a5b391dcc01 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -776,8 +776,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation);
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq);
@@ -840,12 +840,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
- unsigned int target_freq,
- bool efficiencies)
+static inline int find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_al(policy, target_freq,
@@ -855,6 +855,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
efficiencies);
}
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -908,12 +916,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
- unsigned int target_freq,
- bool efficiencies)
+static inline int find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_ah(policy, target_freq,
@@ -923,6 +931,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
efficiencies);
}
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -993,12 +1009,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
- unsigned int target_freq,
- bool efficiencies)
+static inline int find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_ac(policy, target_freq,
@@ -1008,7 +1024,17 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
efficiencies);
}
-static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
+static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
+ unsigned int min, unsigned int max,
+ int idx)
{
unsigned int freq;
@@ -1017,11 +1043,13 @@ static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
freq = policy->freq_table[idx].frequency;
- return freq == clamp_val(freq, policy->min, policy->max);
+ return freq == clamp_val(freq, min, max);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
+ unsigned int min,
+ unsigned int max,
unsigned int relation)
{
bool efficiencies = policy->efficiencies_available &&
@@ -1032,29 +1060,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
relation &= ~CPUFREQ_RELATION_E;
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
- return cpufreq_table_index_unsorted(policy, target_freq,
- relation);
+ return cpufreq_table_index_unsorted(policy, target_freq, min,
+ max, relation);
retry:
switch (relation) {
case CPUFREQ_RELATION_L:
- idx = cpufreq_table_find_index_l(policy, target_freq,
- efficiencies);
+ idx = find_index_l(policy, target_freq, min, max, efficiencies);
break;
case CPUFREQ_RELATION_H:
- idx = cpufreq_table_find_index_h(policy, target_freq,
- efficiencies);
+ idx = find_index_h(policy, target_freq, min, max, efficiencies);
break;
case CPUFREQ_RELATION_C:
- idx = cpufreq_table_find_index_c(policy, target_freq,
- efficiencies);
+ idx = find_index_c(policy, target_freq, min, max, efficiencies);
break;
default:
WARN_ON_ONCE(1);
return 0;
}
- /* Limit frequency index to honor policy->min/max */
- if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
+ /* Limit frequency index to honor min and max */
+ if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
efficiencies = false;
goto retry;
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index b79925b1c433..85ab710ec0e7 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -629,10 +629,14 @@ static inline int dma_mmap_wc(struct device *dev,
#else
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
-#define dma_unmap_addr(PTR, ADDR_NAME) (0)
-#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define dma_unmap_len(PTR, LEN_NAME) (0)
-#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+#define dma_unmap_addr(PTR, ADDR_NAME) \
+ ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
+#define dma_unmap_len(PTR, LEN_NAME) \
+ ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \
+ do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
#endif
#endif /* _LINUX_DMA_MAPPING_H */
diff --git a/include/linux/file_ref.h b/include/linux/file_ref.h
index 7db62fbc0500..31551e4cb8f3 100644
--- a/include/linux/file_ref.h
+++ b/include/linux/file_ref.h
@@ -61,7 +61,6 @@ static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
atomic_long_set(&ref->refcnt, cnt - 1);
}
-bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt);
bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
/**
@@ -178,20 +177,14 @@ static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
*/
static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
{
- long old, new;
+ long old;
old = atomic_long_read(&ref->refcnt);
- do {
- if (unlikely(old < 0))
- return __file_ref_put_badval(ref, old);
-
- if (old == FILE_REF_ONEREF)
- new = FILE_REF_DEAD;
- else
- new = old - 1;
- } while (!atomic_long_try_cmpxchg(&ref->refcnt, &old, new));
-
- return new == FILE_REF_DEAD;
+ if (likely(old == FILE_REF_ONEREF)) {
+ if (likely(atomic_long_try_cmpxchg(&ref->refcnt, &old, FILE_REF_DEAD)))
+ return true;
+ }
+ return file_ref_put(ref);
}
/**
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 6cd8d1d28b8b..fc27b53c58c2 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -907,21 +907,6 @@ extern void fsnotify_wait_marks_destroyed(void);
/* Clear all of the marks of a group attached to a given object type */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
unsigned int obj_type);
-/* run all the marks in a group, and clear all of the vfsmount marks */
-static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
-}
-/* run all the marks in a group, and clear all of the inode marks */
-static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
-}
-/* run all the marks in a group, and clear all of the sn marks */
-static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB);
-}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 6fa0a268d538..097be89487bf 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -2,6 +2,11 @@
/*
* fwnode.h - Firmware device node object handle type definition.
*
+ * This header file provides low-level data types and definitions for firmware
+ * and device property providers. The respective API header files supplied by
+ * them should contain all of the requisite data types and definitions for end
+ * users, so including it directly should not be necessary.
+ *
* Copyright (C) 2015, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index ccce8a751e2a..3a8d35d41fda 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -440,10 +440,10 @@ static inline int __iommu_copy_struct_from_user(
void *dst_data, const struct iommu_user_data *src_data,
unsigned int data_type, size_t data_len, size_t min_len)
{
- if (src_data->type != data_type)
- return -EINVAL;
if (WARN_ON(!dst_data || !src_data))
return -EINVAL;
+ if (src_data->type != data_type)
+ return -EINVAL;
if (src_data->len < min_len || data_len < src_data->len)
return -EINVAL;
return copy_struct_from_user(dst_data, data_len, src_data->uptr,
@@ -456,8 +456,8 @@ static inline int __iommu_copy_struct_from_user(
* include/uapi/linux/iommufd.h
* @user_data: Pointer to a struct iommu_user_data for user space data info
* @data_type: The data type of the @kdst. Must match with @user_data->type
- * @min_last: The last memember of the data structure @kdst points in the
- * initial version.
+ * @min_last: The last member of the data structure @kdst points in the initial
+ * version.
* Return 0 for success, otherwise -error.
*/
#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
diff --git a/include/linux/module.h b/include/linux/module.h
index d94b196d5a34..b3329110d668 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -162,6 +162,8 @@ extern void cleanup_module(void);
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
+struct module_kobject *lookup_or_create_module_kobject(const char *name);
+
/* Generic info of form tag = "info" */
#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
index 36a4a1e1d8ca..d8bd5d37131a 100644
--- a/include/sound/soc_sdw_utils.h
+++ b/include/sound/soc_sdw_utils.h
@@ -226,6 +226,7 @@ int asoc_sdw_cs_amp_init(struct snd_soc_card *card,
bool playback);
int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *dai);
+int asoc_sdw_cs35l56_volume_limit(struct snd_soc_card *card, const char *name_prefix);
/* MAXIM codec support */
int asoc_sdw_maxim_init(struct snd_soc_card *card,
diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h
index d099ae27f849..682499b871ea 100644
--- a/include/sound/ump_convert.h
+++ b/include/sound/ump_convert.h
@@ -19,7 +19,7 @@ struct ump_cvt_to_ump_bank {
/* context for converting from MIDI1 byte stream to UMP packet */
struct ump_cvt_to_ump {
/* MIDI1 intermediate buffer */
- unsigned char buf[4];
+ unsigned char buf[6]; /* up to 6 bytes for SysEx */
int len;
int cmd_bytes;
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index d9d0cb827117..f030adc462ee 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -53,43 +53,70 @@ struct landlock_ruleset_attr {
__u64 scoped;
};
-/*
- * sys_landlock_create_ruleset() flags:
+/**
+ * DOC: landlock_create_ruleset_flags
+ *
+ * **Flags**
*
- * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI
- * version.
- * - %LANDLOCK_CREATE_RULESET_ERRATA: Get a bitmask of fixed issues.
+ * %LANDLOCK_CREATE_RULESET_VERSION
+ * Get the highest supported Landlock ABI version (starting at 1).
+ *
+ * %LANDLOCK_CREATE_RULESET_ERRATA
+ * Get a bitmask of fixed issues for the current Landlock ABI version.
*/
/* clang-format off */
#define LANDLOCK_CREATE_RULESET_VERSION (1U << 0)
#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1)
/* clang-format on */
-/*
- * sys_landlock_restrict_self() flags:
- *
- * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF: Do not create any log related to the
- * enforced restrictions. This should only be set by tools launching unknown
- * or untrusted programs (e.g. a sandbox tool, container runtime, system
- * service manager). Because programs sandboxing themselves should fix any
- * denied access, they should not set this flag to be aware of potential
- * issues reported by system's logs (i.e. audit).
- * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON: Explicitly ask to continue
- * logging denied access requests even after an :manpage:`execve(2)` call.
- * This flag should only be set if all the programs than can legitimately be
- * executed will not try to request a denied access (which could spam audit
- * logs).
- * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF: Do not create any log related
- * to the enforced restrictions coming from future nested domains created by
- * the caller or its descendants. This should only be set according to a
- * runtime configuration (i.e. not hardcoded) by programs launching other
- * unknown or untrusted programs that may create their own Landlock domains
- * and spam logs. The main use case is for container runtimes to enable users
- * to mute buggy sandboxed programs for a specific container image. Other use
- * cases include sandboxer tools and init systems. Unlike
- * %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF,
- * %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF does not impact the requested
- * restriction (if any) but only the future nested domains.
+/**
+ * DOC: landlock_restrict_self_flags
+ *
+ * **Flags**
+ *
+ * By default, denied accesses originating from programs that sandbox themselves
+ * are logged via the audit subsystem. Such events typically indicate unexpected
+ * behavior, such as bugs or exploitation attempts. However, to avoid excessive
+ * logging, access requests denied by a domain not created by the originating
+ * program are not logged by default. The rationale is that programs should know
+ * their own behavior, but not necessarily the behavior of other programs. This
+ * default configuration is suitable for most programs that sandbox themselves.
+ * For specific use cases, the following flags allow programs to modify this
+ * default logging behavior.
+ *
+ * The %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF and
+ * %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON flags apply to the newly created
+ * Landlock domain.
+ *
+ * %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF
+ * Disables logging of denied accesses originating from the thread creating
+ * the Landlock domain, as well as its children, as long as they continue
+ * running the same executable code (i.e., without an intervening
+ * :manpage:`execve(2)` call). This is intended for programs that execute
+ * unknown code without invoking :manpage:`execve(2)`, such as script
+ * interpreters. Programs that only sandbox themselves should not set this
+ * flag, so users can be notified of unauthorized access attempts via system
+ * logs.
+ *
+ * %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON
+ * Enables logging of denied accesses after an :manpage:`execve(2)` call,
+ * providing visibility into unauthorized access attempts by newly executed
+ * programs within the created Landlock domain. This flag is recommended
+ * only when all potential executables in the domain are expected to comply
+ * with the access restrictions, as excessive audit log entries could make
+ * it more difficult to identify critical events.
+ *
+ * %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
+ * Disables logging of denied accesses originating from nested Landlock
+ * domains created by the caller or its descendants. This flag should be set
+ * according to runtime configuration, not hardcoded, to avoid suppressing
+ * important security events. It is useful for container runtimes or
+ * sandboxing tools that may launch programs which themselves create
+ * Landlock domains and could otherwise generate excessive logs. Unlike
+ * ``LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF``, this flag only affects
+ * future nested domains, not the one being created. It can also be used
+ * with a @ruleset_fd value of -1 to mute subdomain logs without creating a
+ * domain.
*/
/* clang-format off */
#define LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF (1U << 0)
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index 41ff44dfa1db..f52de5ed1b3b 100644
--- a/include/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -107,4 +107,10 @@ struct ufs_dev_quirk {
*/
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
+/*
+ * Some ufs devices may need more time to be in hibern8 before exiting.
+ * Enable this quirk to give it an additional 100us.
+ */
+#define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 12)
+
#endif /* UFS_QUIRKS_H_ */