Files
Greg Kroah-Hartman fe51d37c6c Merge branch 'android12-5.10' into android12-5.10-lts
Sync up with android12-5.10 for the following commits:

976d98e9aa ANDROID: ABI: Add page_pinner_inited into symbols list
16c2b1d94f ANDROID: page_pinner: prevent pp_buffer access before initialization
cd1d9c42a2 UPSTREAM: hwrng: virtio - add an internal buffer
05fa7d8eee ANDROID: fix ABI by undoing atomic64_t -> u64 type conversion
cda90416c0 UPSTREAM: net: retrieve netns cookie via getsocketopt
78a559e2a9 UPSTREAM: net: initialize net->net_cookie at netns setup
fb0cece721 Merge tag 'android12-5.10.168_r00' into android12-5.10
989d4c69a9 UPSTREAM: ext4: fix another off-by-one fsmap error on 1k block filesystems
b0d829f27f UPSTREAM: ext4: block range must be validated before use in ext4_mb_clear_bb()
0301fe419a UPSTREAM: ext4: add strict range checks while freeing blocks
1d4b2a4ad7 UPSTREAM: ext4: add ext4_sb_block_valid() refactored out of ext4_inode_block_valid()
8ddbd3df93 UPSTREAM: ext4: refactor ext4_free_blocks() to pull out ext4_mb_clear_bb()
370cb1c270 UPSTREAM: usb: dwc3: core: do not use 3.0 clock when operating in 2.0 mode
eb53a59b4d ANDROID: GKI: rockchip: Add symbols for clk api
a13e8447e8 BACKPORT: arm64: mte: move register initialization to C
eddac45546 UPSTREAM: rcu: Remove __read_mostly annotations from rcu_scheduler_active externs
afff17f583 ANDROID: GKI: Update symbol list for mtk
62f5fae173 UPSTREAM: ext4: refuse to create ea block when umounted
33245a0eac UPSTREAM: ext4: optimize ea_inode block expansion
09e5cc649d UPSTREAM: ext4: allocate extended attribute value in vmalloc area
8926771f7e UPSTREAM: usb: gadget: composite: Draw 100mA current if not configured
87a065fb94 UPSTREAM: usb: dwc3: gadget: Change condition for processing suspend event
26638f8e54 ANDROID: GKI: update xiaomi symbol list
193b312b2f UPSTREAM: net/sched: tcindex: update imperfect hash filters respecting rcu
9a1be9a472 FROMGIT: KVM: arm64: Ignore kvm-arm.mode if !is_hyp_mode_available()
dbcd8cb535 UPSTREAM: KVM: arm64: Allow KVM to be disabled from the command line
631630d75f ANDROID: ABI: Cuttlefish Symbol update
278dfb09d7 Revert "ANDROID: dma-ops: Add restricted vendor hook"
c2e3f757d3 UPSTREAM: io_uring: ensure that io_init_req() passes in the right issue_flags
9abdacf47f FROMGIT: usb: gadget: configfs: Restrict symlink creation is UDC already binded
d415c6e56f UPSTREAM: io_uring: add missing lock in io_get_file_fixed
52cc662810 ANDROID: ABI: Update oplus symbol list
d01f7e1269 ANDROID: vendor_hooks: Add hooks for mutex and rwsem optimistic spin
d4d05c6e6e ANDROID: dma-buf: heaps: Don't lock unused dmabuf_page_pool mutex
1d05213028 ANDROID: mm/filemap: Fix missing put_page() for speculative page fault
fda8a58faa UPSTREAM: KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS
5692e2bb4e UPSTREAM: net: qrtr: combine nameservice into main module
4b9d11ae5f ANDROID: GKI: Update symbol list for mtk
b086cc7361 FROMLIST: rcu-tasks: Fix build error
7fd4fbe615 ANDROID: incremental fs: Move throttling to outside page lock
5d9b0e83e3 ANDROID: incremental fs: Fix race between truncate and write last block
6a8037d4eb UPSTREAM: usb: gadget: u_serial: Add null pointer check in gserial_resume
f0be4b9779 Revert "ANDROID: GKI: loadavg: Export for get_avenrun"
781e1c83ef ANDROID: ABI: Update allowed list for QCOM
579f8bf863 ANDROID: Update symbol list for mtk
80b27def69 UPSTREAM: ext4: add inode table check in __ext4_get_inode_loc to aovid possible infinite loop
a4d6d4d1e7 UPSTREAM: net_sched: reject TCF_EM_SIMPLE case for complex ematch module
fb952695c8 UPSTREAM: io_uring/rw: remove leftover debug statement
ca331f289a UPSTREAM: io_uring/rw: ensure kiocb_end_write() is always called
d54d41716d UPSTREAM: io_uring: fix double poll leak on repolling
fc978be7b2 UPSTREAM: io_uring: Clean up a false-positive warning from GCC 9.3.0
827f8fcb29 UPSTREAM: io_uring/net: fix fast_iov assignment in io_setup_async_msg()
403642c036 UPSTREAM: io_uring: io_kiocb_update_pos() should not touch file for non -1 offset
0c50a117bf UPSTREAM: io_uring/rw: defer fsnotify calls to task context
b29c357309 UPSTREAM: io_uring: do not recalculate ppos unnecessarily
84e34d2ef5 UPSTREAM: io_uring: update kiocb->ki_pos at execution time
b543e0d210 UPSTREAM: io_uring: remove duplicated calls to io_kiocb_ppos
9166f5418a UPSTREAM: io_uring: ensure that cached task references are always put on exit
fee5372abf UPSTREAM: io_uring: fix CQ waiting timeout handling
a4d056e350 UPSTREAM: io_uring: lock overflowing for IOPOLL
0dfe72e890 UPSTREAM: io_uring: check for valid register opcode earlier
1b735b5eb2 UPSTREAM: io_uring: fix async accept on O_NONBLOCK sockets
63bf975936 UPSTREAM: io_uring: allow re-poll if we made progress
a64d6ea01b UPSTREAM: io_uring: support MSG_WAITALL for IORING_OP_SEND(MSG)
cf7ef78842 UPSTREAM: io_uring: add flag for disabling provided buffer recycling
45b2a34e21 UPSTREAM: io_uring: ensure recv and recvmsg handle MSG_WAITALL correctly
4b912a635e UPSTREAM: io_uring: improve send/recv error handling
ef0c71d0f1 UPSTREAM: io_uring: don't gate task_work run on TIF_NOTIFY_SIGNAL
1531e1fb8d BACKPORT: iommu: Avoid races around device probe
60944bdddc UPSTREAM: io_uring/io-wq: only free worker if it was allocated for creation
ac06912075 UPSTREAM: io_uring/io-wq: free worker if task_work creation is canceled
98a15feed0 UPSTREAM: io_uring: Fix unsigned 'res' comparison with zero in io_fixup_rw_res()
a234cc4e55 UPSTREAM: um: Increase stack frame size threshold for signal.c
d40d310e5e ANDROID: GKI: Enable ARM64_ERRATUM_2454944
9d2ec2e0b6 ANDROID: dma-ops: Add restricted vendor hook
3c75a6fb7f ANDROID: arm64: Work around Cortex-A510 erratum 2454944
865f370bf9 ANDROID: mm/vmalloc: Add override for lazy vunmap
1eb5992d60 ANDROID: cpuidle-psci: Fix suspicious RCU usage
d6b2899ce6 ANDROID: ABI: update allowed list for galaxy
3fcc69ca4d FROMGIT: f2fs: add sysfs nodes to set last_age_weight
899476c3af FROMGIT: f2fs: fix wrong calculation of block age
d0f788b8fa ANDROID: struct io_uring ABI preservation hack for 5.10.162 changes
fef924db72 ANDROID: fix up struct task_struct ABI change in 5.10.162
d369ac0b2a ANDROID: add flags variable back to struct proto_ops
5756328b3f UPSTREAM: io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups
72d1c48675 UPSTREAM: eventfd: provide a eventfd_signal_mask() helper
d7a47b29d5 UPSTREAM: eventpoll: add EPOLL_URING_WAKE poll wakeup flag
7c9f38c09b UPSTREAM: Revert "proc: don't allow async path resolution of /proc/self components"
498b35b3c4 UPSTREAM: Revert "proc: don't allow async path resolution of /proc/thread-self components"
4b17dea786 UPSTREAM: net: remove cmsg restriction from io_uring based send/recvmsg calls
d10f30da0d UPSTREAM: task_work: unconditionally run task_work from get_signal()
62822bf630 UPSTREAM: signal: kill JOBCTL_TASK_WORK
5e6347b586 UPSTREAM: io_uring: import 5.15-stable io_uring
518e02ed06 UPSTREAM: task_work: add helper for more targeted task_work canceling
86acb6a529 UPSTREAM: kernel: don't call do_exit() for PF_IO_WORKER threads
52f564e57b UPSTREAM: kernel: stop masking signals in create_io_thread()
bcb749b0b1 UPSTREAM: x86/process: setup io_threads more like normal user space threads
1f4eb35546 UPSTREAM: arch: ensure parisc/powerpc handle PF_IO_WORKER in copy_thread()
150dea15cb UPSTREAM: arch: setup PF_IO_WORKER threads like PF_KTHREAD
cf487d3c6a UPSTREAM: entry/kvm: Exit to user mode when TIF_NOTIFY_SIGNAL is set
6e4362caf9 UPSTREAM: kernel: allow fork with TIF_NOTIFY_SIGNAL pending
b25b8c55ba UPSTREAM: coredump: Limit what can interrupt coredumps
723de95c0c UPSTREAM: kernel: remove checking for TIF_NOTIFY_SIGNAL
8492c5dd3b UPSTREAM: task_work: remove legacy TWA_SIGNAL path
1987566815 UPSTREAM: alpha: fix TIF_NOTIFY_SIGNAL handling
ad4ba3038a UPSTREAM: ARC: unbork 5.11 bootup: fix snafu in _TIF_NOTIFY_SIGNAL handling
bb855b51a9 UPSTREAM: ia64: don't call handle_signal() unless there's actually a signal queued
7140fddd84 UPSTREAM: sparc: add support for TIF_NOTIFY_SIGNAL
c9c70c8cb6 UPSTREAM: riscv: add support for TIF_NOTIFY_SIGNAL
52a756bf17 UPSTREAM: nds32: add support for TIF_NOTIFY_SIGNAL
6eaa6653e4 UPSTREAM: ia64: add support for TIF_NOTIFY_SIGNAL
1dcd12493b UPSTREAM: h8300: add support for TIF_NOTIFY_SIGNAL
b265cdb085 UPSTREAM: c6x: add support for TIF_NOTIFY_SIGNAL
f4ece56973 UPSTREAM: alpha: add support for TIF_NOTIFY_SIGNAL
01af0730c9 UPSTREAM: xtensa: add support for TIF_NOTIFY_SIGNAL
29420dc96b UPSTREAM: arm: add support for TIF_NOTIFY_SIGNAL
6c3e852b4f UPSTREAM: microblaze: add support for TIF_NOTIFY_SIGNAL
8c81f539a0 UPSTREAM: hexagon: add support for TIF_NOTIFY_SIGNAL
175cc59b9c UPSTREAM: csky: add support for TIF_NOTIFY_SIGNAL
2b94543d45 UPSTREAM: openrisc: add support for TIF_NOTIFY_SIGNAL
e2e4fbbceb UPSTREAM: sh: add support for TIF_NOTIFY_SIGNAL
8548375354 UPSTREAM: um: add support for TIF_NOTIFY_SIGNAL
eae40ee91c UPSTREAM: s390: add support for TIF_NOTIFY_SIGNAL
8489c86344 UPSTREAM: mips: add support for TIF_NOTIFY_SIGNAL
b1f0e1159f UPSTREAM: powerpc: add support for TIF_NOTIFY_SIGNAL
98031aa870 UPSTREAM: parisc: add support for TIF_NOTIFY_SIGNAL
470c17bd71 UPSTREAM: nios32: add support for TIF_NOTIFY_SIGNAL
c5825095c4 UPSTREAM: m68k: add support for TIF_NOTIFY_SIGNAL
fcf75a019e UPSTREAM: arm64: add support for TIF_NOTIFY_SIGNAL
d6b63ac444 UPSTREAM: arc: add support for TIF_NOTIFY_SIGNAL
109ccff96d UPSTREAM: x86: Wire up TIF_NOTIFY_SIGNAL
862aa233e7 UPSTREAM: task_work: Use TIF_NOTIFY_SIGNAL if available
a14b028722 UPSTREAM: entry: Add support for TIF_NOTIFY_SIGNAL
00af4b88ad UPSTREAM: fs: provide locked helper variant of close_fd_get_file()
82c3becbef UPSTREAM: file: Rename __close_fd_get_file close_fd_get_file
98006a0a15 UPSTREAM: fs: make do_renameat2() take struct filename
661bc0f679 UPSTREAM: signal: Add task_sigpending() helper
13f03f5275 UPSTREAM: net: add accept helper not installing fd
af091af9db UPSTREAM: net: provide __sys_shutdown_sock() that takes a socket
9505ff1a81 UPSTREAM: tools headers UAPI: Sync openat2.h with the kernel sources
2507b99d9a UPSTREAM: fs: expose LOOKUP_CACHED through openat2() RESOLVE_CACHED
6b92128557 UPSTREAM: Make sure nd->path.mnt and nd->path.dentry are always valid pointers
eaf736aa71 UPSTREAM: fix handling of nd->depth on LOOKUP_CACHED failures in try_to_unlazy*
7928a1689b UPSTREAM: fs: add support for LOOKUP_CACHED
72d2f4c1cd UPSTREAM: saner calling conventions for unlazy_child()
ee44bd07c4 UPSTREAM: iov_iter: add helper to save iov_iter state
463a74a83b UPSTREAM: kernel: provide create_io_thread() helper
8e993eabeb UPSTREAM: net: loopback: use NET_NAME_PREDICTABLE for name_assign_type
4373e5def3 UPSTREAM: Bluetooth: L2CAP: Fix u8 overflow
5278199031 UPSTREAM: HID: uclogic: Add HID_QUIRK_HIDINPUT_FORCE quirk
fa335f5bb9 UPSTREAM: HID: ite: Enable QUIRK_TOUCHPAD_ON_OFF_REPORT on Acer Aspire Switch V 10
784df646aa UPSTREAM: HID: ite: Enable QUIRK_TOUCHPAD_ON_OFF_REPORT on Acer Aspire Switch 10E
29cde746b8 UPSTREAM: HID: ite: Add support for Acer S1002 keyboard-dock
228253f43f UPSTREAM: igb: Initialize mailbox message for VF reset
001a013e84 UPSTREAM: xhci: Apply XHCI_RESET_TO_DEFAULT quirk to ADL-N
4fa772e757 UPSTREAM: USB: serial: f81534: fix division by zero on line-speed change
d81b6e6e88 UPSTREAM: USB: serial: f81232: fix division by zero on line-speed change
190b01ac50 UPSTREAM: USB: serial: cp210x: add Kamstrup RF sniffer PIDs
34d4848ba3 UPSTREAM: USB: serial: option: add Quectel EM05-G modem
9e620f2b54 UPSTREAM: usb: gadget: uvc: Prevent buffer overflow in setup handler
a20fd832a4 BACKPORT: f2fs: do not allow to decompress files have FI_COMPRESS_RELEASED
16996773d6 BACKPORT: f2fs: handle decompress only post processing in softirq
ce72626280 BACKPORT: f2fs: introduce memory mode
246a996565 BACKPORT: f2fs: allow compression for mmap files in compress_mode=user
f069ba2b3d UPSTREAM: iommu/iova: Fix alloc iova overflows issue
a1806694fc UPSTREAM: media: dvb-core: Fix UAF due to refcount races at releasing
5f30de1dff ANDROID: GKI: Add Tuxera symbol list
e3a5b60c60 UPSTREAM: usb: dwc3: gadget: Skip waiting for CMDACT cleared during endxfer
6b23440751 UPSTREAM: usb: dwc3: Increase DWC3 controller halt timeout
4091dff1ff UPSTREAM: usb: dwc3: Remove DWC3 locking during gadget suspend/resume
4fc3932857 UPSTREAM: usb: dwc3: Avoid unmapping USB requests if endxfer is not complete
19803140c0 UPSTREAM: usb: dwc3: gadget: Continue handling EP0 xfercomplete events
0bbc89c346 UPSTREAM: usb: dwc3: gadget: Synchronize IRQ between soft connect/disconnect
35cb147c38 UPSTREAM: usb: dwc3: gadget: Force sending delayed status during soft disconnect
5dc06419d8 UPSTREAM: usb: dwc3: Do not service EP0 and conndone events if soft disconnected
dd8418a59a UPSTREAM: efi: rt-wrapper: Add missing include
67884a649c UPSTREAM: arm64: efi: Execute runtime services from a dedicated stack
6bd9415d98 ANDROID: cpu: correct dl_cpu_busy() calls
9e2b4cc230 UPSTREAM: ALSA: pcm: Move rwsem lock inside snd_ctl_elem_read to prevent UAF
80cad52515 UPSTREAM: firmware: tegra: Reduce stack usage
79c4f55c94 UPSTREAM: scsi: bfa: Move a large struct from the stack onto the heap
e096145ac3 ANDROID: mm: page_pinner: ensure do_div() arguments matches with respect to type
e427004fad ANDROID: Revert "ANDROID: allmodconfig: disable WERROR"
8cf3c25495 FROMGIT: scsi: ufs: Modify Tactive time setting conditions
fc1490c621 UPSTREAM: remoteproc: core: Fix rproc->firmware free in rproc_set_firmware()
869cae6f25 UPSTREAM: usb: gadget: f_fs: Fix unbalanced spinlock in __ffs_ep0_queue_wait
56c8a40436 UPSTREAM: usb: gadget: f_hid: fix f_hidg lifetime vs cdev
e973de77ad UPSTREAM: usb: gadget: f_hid: optional SETUP/SET_REPORT mode
283eb356fd ANDROID: GKI: add symbol list file for honor
d30de90932 ANDROID: add TEST_MAPPING for net/, include/net
75d0665639 BACKPORT: arm64/bpf: Remove 128MB limit for BPF JIT programs

Change-Id: I111e3dafc40d4f06832e374fd10ae5984921dff5
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2023-03-24 10:32:49 +00:00

1002 lines
36 KiB
C

/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion
*
* Copyright IBM Corporation, 2001
*
* Author: Dipankar Sarma <dipankar@in.ibm.com>
*
* Based on the original work by Paul McKenney <paulmck@vnet.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
* Papers:
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
*
* For detailed explanation of Read-Copy Update mechanism see -
* http://lse.sourceforge.net/locking/rcupdate.html
*
*/
#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <linux/irqflags.h>
#include <linux/preempt.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
#include <asm/processor.h>
#include <linux/cpumask.h>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
#define ulong2long(a) (*(long *)(&(a)))
#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
/*
* Defined as a macro as it is a very low level header included from
* areas that don't even know about current. This gives the rcu_read_lock()
* nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
#else /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TINY_RCU
#define rcu_read_unlock_strict() do { } while (0)
#else
void rcu_read_unlock_strict(void);
#endif
static inline void __rcu_read_lock(void)
{
preempt_disable();
}
static inline void __rcu_read_unlock(void)
{
preempt_enable();
rcu_read_unlock_strict();
}
static inline int rcu_preempt_depth(void)
{
return 0;
}
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
void rcu_init(void);
extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);
#ifdef CONFIG_TASKS_RCU_GENERIC
void rcu_init_tasks_generic(void);
#else
static inline void rcu_init_tasks_generic(void) { }
#endif
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
void rcu_sysrq_end(void);
#else /* #ifdef CONFIG_RCU_STALL_COMMON */
static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
#ifdef CONFIG_NO_HZ_FULL
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
#endif /* CONFIG_NO_HZ_FULL */
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
void rcu_nocb_flush_deferred_wakeup(void);
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static inline void rcu_init_nohz(void) { }
static inline void rcu_nocb_flush_deferred_wakeup(void) { }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/**
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
* @a: Code that RCU needs to pay attention to.
*
* RCU read-side critical sections are forbidden in the inner idle loop,
* that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
* will happily ignore any such read-side critical sections. However,
* things like powertop need tracepoints in the inner idle loop.
*
* This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
* will tell RCU that it needs to pay attention, invoke its argument
* (in this example, calling the do_something_with_RCU() function),
* and then tell RCU to go back to ignoring this CPU. It is permissible
* to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is
* on the order of a million or so, even on 32-bit systems). It is
* not legal to block within RCU_NONIDLE(), nor is it permissible to
* transfer control either into or out of RCU_NONIDLE()'s statement.
*/
#define RCU_NONIDLE(a) \
do { \
rcu_irq_enter_irqson(); \
do { a; } while (0); \
rcu_irq_exit_irqson(); \
} while (0)
/*
* Note a quasi-voluntary context switch for RCU-tasks's benefit.
* This is a macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU_GENERIC
# ifdef CONFIG_TASKS_RCU
# define rcu_tasks_classic_qs(t, preempt) \
do { \
if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
# else
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
# define call_rcu_tasks call_rcu
# define synchronize_rcu_tasks synchronize_rcu
# endif
# ifdef CONFIG_TASKS_TRACE_RCU
# define rcu_tasks_trace_qs(t) \
do { \
if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
!unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
smp_store_release(&(t)->trc_reader_checked, true); \
smp_mb(); /* Readers partitioned by store. */ \
} \
} while (0)
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
# endif
#define rcu_tasks_qs(t, preempt) \
do { \
rcu_tasks_classic_qs((t), (preempt)); \
rcu_tasks_trace_qs((t)); \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks_rude(void);
# endif
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_stop(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
#define rcu_tasks_qs(t, preempt) do { } while (0)
#define rcu_note_voluntary_context_switch(t) do { } while (0)
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_stop(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
/**
* cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
*
* This macro resembles cond_resched(), except that it is defined to
* report potential quiescent states to RCU-tasks even if the cond_resched()
* machinery were to be shut off, as some advocate for PREEMPTION kernels.
*/
#define cond_resched_tasks_rcu_qs() \
do { \
rcu_tasks_qs(current, false); \
cond_resched(); \
} while (0)
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
#if defined(CONFIG_TREE_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
#include <linux/rcutiny.h>
#else
#error "Unknown RCU implementation specified to kernel configuration"
#endif
/*
* The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls
* are needed for dynamic initialization and destruction of rcu_head
* on the stack, and init_rcu_head()/destroy_rcu_head() are needed for
* dynamic initialization and destruction of statically allocated rcu_head
* structures. However, rcu_head structures allocated dynamically in the
* heap don't need any initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
void init_rcu_head(struct rcu_head *head);
void destroy_rcu_head(struct rcu_head *head);
void init_rcu_head_on_stack(struct rcu_head *head);
void destroy_rcu_head_on_stack(struct rcu_head *head);
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
static inline void init_rcu_head(struct rcu_head *head) { }
static inline void destroy_rcu_head(struct rcu_head *head) { }
static inline void init_rcu_head_on_stack(struct rcu_head *head) { }
static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { }
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
static inline bool rcu_lockdep_current_cpu_online(void) { return true; }
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void rcu_lock_acquire(struct lockdep_map *map)
{
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
}
static inline void rcu_lock_release(struct lockdep_map *map)
{
lock_release(map, _THIS_IP_);
}
extern struct lockdep_map rcu_lock_map;
extern struct lockdep_map rcu_bh_lock_map;
extern struct lockdep_map rcu_sched_lock_map;
extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void);
int rcu_read_lock_sched_held(void);
int rcu_read_lock_any_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
# define rcu_lock_acquire(a) do { } while (0)
# define rcu_lock_release(a) do { } while (0)
static inline int rcu_read_lock_held(void)
{
return 1;
}
static inline int rcu_read_lock_bh_held(void)
{
return 1;
}
static inline int rcu_read_lock_sched_held(void)
{
return !preemptible();
}
static inline int rcu_read_lock_any_held(void)
{
return !preemptible();
}
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_PROVE_RCU
/**
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
* @s: informative message
*
* This checks debug_lockdep_rcu_enabled() before checking (c) to
* prevent early boot splats due to lockdep not yet being initialized,
* and rechecks it after checking (c) to prevent false-positive splats
* due to races with lockdep being disabled. See commit 3066820034b5dd
* ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail.
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
static bool __section(".data.unlikely") __warned; \
if (debug_lockdep_rcu_enabled() && (c) && \
debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
} while (0)
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
"Illegal context switch in RCU read-side critical section");
}
#else /* #ifdef CONFIG_PROVE_RCU */
static inline void rcu_preempt_sleep_check(void) { }
#endif /* #else #ifdef CONFIG_PROVE_RCU */
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
"Illegal context switch in RCU-bh read-side critical section"); \
RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
"Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
/*
* Helper functions for rcu_dereference_check(), rcu_dereference_protected()
* and rcu_assign_pointer(). Some of these could be folded into their
* callers, but they are left separate in order to ease introduction of
* multiple pointers markings to match different RCU implementations
* (e.g., __srcu), should this make sense in the future.
*/
#ifdef __CHECKER__
#define rcu_check_sparse(p, space) \
((void)(((typeof(*p) space *)p) == p))
#else /* #ifdef __CHECKER__ */
#define rcu_check_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */
#define __rcu_access_pointer(p, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
#define __rcu_dereference_check(p, c, space) \
({ \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
#define rcu_dereference_raw(p) \
({ \
/* Dependency order vs. p above. */ \
typeof(p) ________p1 = READ_ONCE(p); \
((typeof(*p) __force __kernel *)(________p1)); \
})
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
*/
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
*
* Assigns the specified value to the specified RCU-protected
* pointer, ensuring that any concurrent RCU readers will see
* any prior initialization.
*
* Inserts memory barriers on architectures that require them
* (which is most of them), and also prevents the compiler from
* reordering the code that initializes the structure after the pointer
* assignment. More importantly, this call documents which pointers
* will be dereferenced by RCU read-side code.
*
* In some special cases, you may use RCU_INIT_POINTER() instead
* of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
* to the fact that it does not constrain either the CPU or the compiler.
* That said, using RCU_INIT_POINTER() when you should have used
* rcu_assign_pointer() is a very bad thing that results in
* impossible-to-diagnose memory corruption. So please be careful.
* See the RCU_INIT_POINTER() comment header for details.
*
* Note that rcu_assign_pointer() evaluates each of its arguments only
* once, appearances notwithstanding. One of the "extra" evaluations
* is in typeof() and the other visible only to sparse (__CHECKER__),
* neither of which actually execute the argument. As with most cpp
* macros, this execute-arguments-only-once property is important, so
* please be careful when making changes to rcu_assign_pointer() and the
* other macros that it invokes.
*/
#define rcu_assign_pointer(p, v) \
do { \
uintptr_t _r_a_p__v = (uintptr_t)(v); \
rcu_check_sparse(p, __rcu); \
\
if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
else \
smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
} while (0)
/**
* rcu_replace_pointer() - replace an RCU pointer, returning its old value
* @rcu_ptr: RCU pointer, whose old value is returned
* @ptr: regular pointer
* @c: the lockdep conditions under which the dereference will take place
*
* Perform a replacement, where @rcu_ptr is an RCU-annotated
* pointer and @c is the lockdep argument that is passed to the
* rcu_dereference_protected() call used to read that pointer. The old
* value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
*/
#define rcu_replace_pointer(rcu_ptr, ptr, c) \
({ \
typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
rcu_assign_pointer((rcu_ptr), (ptr)); \
__tmp; \
})
/**
* rcu_access_pointer() - fetch RCU pointer with no dereferencing
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
* lockdep checks for being in an RCU read-side critical section. This is
* useful when the value of this pointer is accessed, but the pointer is
* not dereferenced, for example, when testing an RCU-protected pointer
* against NULL. Although rcu_access_pointer() may also be used in cases
* where update-side locks prevent the value of the pointer from changing,
* you should instead use rcu_dereference_protected() for this use case.
*
* It is also permissible to use rcu_access_pointer() when read-side
* access to the pointer was removed at least one grace period ago, as
* is the case in the context of the RCU callback that is freeing up
* the data, or after a synchronize_rcu() returns. This can be useful
* when tearing down multi-linked structures after a grace period
* has elapsed.
*/
#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
/**
* rcu_dereference_check() - rcu_dereference with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Do an rcu_dereference(), but check that the conditions under which the
* dereference will take place are correct. Typically the conditions
* indicate the various locking conditions that should be held at that
* point. The check should return true if the conditions are satisfied.
* An implicit check for being in an RCU read-side critical section
* (rcu_read_lock()) is included.
*
* For example:
*
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
*
* could be used to indicate to lockdep that foo->bar may only be dereferenced
* if either rcu_read_lock() is held, or that the lock required to replace
* the bar struct at foo->bar is held.
*
* Note that the list of conditions may also include indications of when a lock
* need not be held, for example during initialisation or destruction of the
* target struct:
*
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
* atomic_read(&foo->usage) == 0);
*
* Inserts memory barriers on architectures that require them
* (currently only the Alpha), prevents the compiler from refetching
* (and from merging fetches), and, more importantly, documents exactly
* which pointers are protected by RCU and checks that the pointer is
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* This is the RCU-bh counterpart to rcu_dereference_check().
*/
#define rcu_dereference_bh_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* This is the RCU-sched counterpart to rcu_dereference_check().
*/
#define rcu_dereference_sched_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
__rcu)
/*
* The tracing infrastructure traces RCU (we want that), but unfortunately
* some of the RCU checks causes tracing to lock up the system.
*
* The no-tracing version of rcu_dereference_raw() must not call
* rcu_read_lock_held().
*/
#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
* the READ_ONCE(). This is useful in cases where update-side locks
* prevent the value of the pointer from changing. Please note that this
* primitive does *not* prevent the compiler from repeating this reference
* or combining it with other references, so it should not be used without
* protection of appropriate locks.
*
* This function is only for update-side use. Using this function
* when protected only by rcu_read_lock() will result in infrequent
* but very ugly failures.
*/
#define rcu_dereference_protected(p, c) \
__rcu_dereference_protected((p), (c), __rcu)
/**
* rcu_dereference() - fetch RCU-protected pointer for dereferencing
* @p: The pointer to read, prior to dereferencing
*
* This is a simple wrapper around rcu_dereference_check().
*/
#define rcu_dereference(p) rcu_dereference_check(p, 0)
/**
* rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
* @p: The pointer to read, prior to dereferencing
*
* Makes rcu_dereference_check() do the dirty work.
*/
#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
/**
* rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
* @p: The pointer to read, prior to dereferencing
*
* Makes rcu_dereference_check() do the dirty work.
*/
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
/**
* rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
* @p: The pointer to hand off
*
* This is simply an identity function, but it documents where a pointer
* is handed off from RCU to some other synchronization mechanism, for
* example, reference counting or locking. In C11, it would map to
* kill_dependency(). It could be used as follows::
*
* rcu_read_lock();
* p = rcu_dereference(gp);
* long_lived = is_long_lived(p);
* if (long_lived) {
* if (!atomic_inc_not_zero(p->refcnt))
* long_lived = false;
* else
* p = rcu_pointer_handoff(p);
* }
* rcu_read_unlock();
*/
#define rcu_pointer_handoff(p) (p)
/**
* rcu_read_lock() - mark the beginning of an RCU read-side critical section
*
* When synchronize_rcu() is invoked on one CPU while other CPUs
* are within RCU read-side critical sections, then the
* synchronize_rcu() is guaranteed to block until after all the other
* CPUs exit their critical sections. Similarly, if call_rcu() is invoked
* on one CPU while other CPUs are within RCU read-side critical
* sections, invocation of the corresponding RCU callback is deferred
* until after the all the other CPUs exit their critical sections.
*
* Note, however, that RCU callbacks are permitted to run concurrently
* with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
* read-side critical section, (2) CPU 1 invokes call_rcu() to register
* an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
* (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
* callback is invoked. This is legal, because the RCU read-side critical
* section that was running concurrently with the call_rcu() (and which
* therefore might be referencing something that the corresponding RCU
* callback would free up) has completed before the corresponding
* RCU callback is invoked.
*
* RCU read-side critical sections may be nested. Any deferred actions
* will be deferred until the outermost RCU read-side critical section
* completes.
*
* You can avoid reading and understanding the next paragraph by
* following this rule: don't put anything in an rcu_read_lock() RCU
* read-side critical section that would block in a !PREEMPTION kernel.
* But if you want the full story, read on!
*
* In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
* it is illegal to block while in an RCU read-side critical section.
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
* kernel builds, RCU read-side critical sections may be preempted,
* but explicit blocking is illegal. Finally, in preemptible RCU
* implementations in real-time (with -rt patchset) kernel builds, RCU
* read-side critical sections may be preempted and they may also block, but
* only when acquiring spinlocks that are subject to priority inheritance.
*/
static __always_inline void rcu_read_lock(void)
{
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock() used illegally while idle");
}
/*
* So where is rcu_write_lock()? It does not exist, as there is no
* way for writers to lock out RCU readers. This is a feature, not
* a bug -- this property is what provides RCU's performance benefits.
* Of course, writers must coordinate with each other. The normal
* spinlock primitives work well for this, but any other technique may be
* used as well. RCU does not care how the writers keep out of each
* others' way, as long as they do so.
*/
/**
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
* In most situations, rcu_read_unlock() is immune from deadlock.
* However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
* is responsible for deboosting, which it does via rt_mutex_unlock().
* Unfortunately, this function acquires the scheduler's runqueue and
* priority-inheritance spinlocks. This means that deadlock could result
* if the caller of rcu_read_unlock() already holds one of these locks or
* any lock that is ever acquired while holding them.
*
* That said, RCU readers are never priority boosted unless they were
* preempted. Therefore, one way to avoid deadlock is to make sure
* that preemption never happens within any RCU read-side critical
* section whose outermost rcu_read_unlock() is called with one of
* rt_mutex_unlock()'s locks held. Such preemption can be avoided in
* a number of ways, for example, by invoking preempt_disable() before
* critical section's outermost rcu_read_lock().
*
* Given that the set of locks acquired by rt_mutex_unlock() might change
* at any time, a somewhat more future-proofed approach is to make sure
* that that preemption never happens within any RCU read-side critical
* section whose outermost rcu_read_unlock() is called with irqs disabled.
* This approach relies on the fact that rt_mutex_unlock() currently only
* acquires irq-disabled locks.
*
* The second of these two approaches is best in most situations,
* however, the first approach can also be useful, at least to those
* developers willing to keep abreast of the set of locks acquired by
* rt_mutex_unlock().
*
* See rcu_read_lock() for more information.
*/
static inline void rcu_read_unlock(void)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
__release(RCU);
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
* This is equivalent of rcu_read_lock(), but also disables softirqs.
* Note that anything else that disables softirqs can also serve as
* an RCU read-side critical section.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
* rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
* was invoked from some other task.
*/
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
}
/**
* rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section
*
* See rcu_read_lock_bh() for more information.
*/
static inline void rcu_read_unlock_bh(void)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
local_bh_enable();
}
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
* This is equivalent of rcu_read_lock(), but disables preemption.
* Read-side critical sections can also be introduced by anything else
* that disables preemption, including local_irq_disable() and friends.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
* rcu_read_unlock_sched() from process context if the matching
* rcu_read_lock_sched() was invoked from an NMI handler.
*/
static inline void rcu_read_lock_sched(void)
{
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle");
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_lock_sched_notrace(void)
{
preempt_disable_notrace();
__acquire(RCU_SCHED);
}
/**
* rcu_read_unlock_sched() - marks the end of a RCU-classic critical section
*
* See rcu_read_lock_sched() for more information.
*/
static inline void rcu_read_unlock_sched(void)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_unlock_sched_notrace(void)
{
__release(RCU_SCHED);
preempt_enable_notrace();
}
/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
* @p: The pointer to be initialized.
* @v: The value to initialized the pointer to.
*
* Initialize an RCU-protected pointer in special cases where readers
* do not need ordering constraints on the CPU or the compiler. These
* special cases are:
*
* 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
* 2. The caller has taken whatever steps are required to prevent
* RCU readers from concurrently accessing this pointer *or*
* 3. The referenced data structure has already been exposed to
* readers either at compile time or via rcu_assign_pointer() *and*
*
* a. You have not made *any* reader-visible changes to
* this structure since then *or*
* b. It is OK for readers accessing this structure from its
* new location to see the old state of the structure. (For
* example, the changes were to statistical counters or to
* other state where exact synchronization is not required.)
*
* Failure to follow these rules governing use of RCU_INIT_POINTER() will
* result in impossible-to-diagnose memory corruption. As in the structures
* will look OK in crash dumps, but any concurrent RCU readers might
* see pre-initialized values of the referenced data structure. So
* please be very careful how you use RCU_INIT_POINTER()!!!
*
* If you are creating an RCU-protected linked structure that is accessed
* by a single external-to-structure RCU-protected pointer, then you may
* use RCU_INIT_POINTER() to initialize the internal RCU-protected
* pointers, but you must use rcu_assign_pointer() to initialize the
* external-to-structure pointer *after* you have completely initialized
* the reader-accessible portions of the linked structure.
*
* Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
* ordering guarantees for either the CPU or the compiler.
*/
#define RCU_INIT_POINTER(p, v) \
do { \
rcu_check_sparse(p, __rcu); \
WRITE_ONCE(p, RCU_INITIALIZER(v)); \
} while (0)
/**
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
* @p: The pointer to be initialized.
* @v: The value to initialized the pointer to.
*
* GCC-style initialization for an RCU-protected pointer in a structure field.
*/
#define RCU_POINTER_INITIALIZER(p, v) \
.p = RCU_INITIALIZER(v)
/*
* Does the specified offset indicate that the corresponding rcu_head
* structure can be handled by kvfree_rcu()?
*/
#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
/*
* Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
*/
#define __kvfree_rcu(head, offset) \
do { \
BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \
kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
} while (0)
/**
* kfree_rcu() - kfree an object after a grace period.
* @ptr: pointer to kfree
* @rhf: the name of the struct rcu_head within the type of @ptr.
*
* Many rcu callbacks functions just call kfree() on the base structure.
* These functions are trivial, but their size adds up, and furthermore
* when they are used in a kernel module, that module must invoke the
* high-latency rcu_barrier() function at module-unload time.
*
* The kfree_rcu() function handles this issue. Rather than encoding a
* function address in the embedded rcu_head structure, kfree_rcu() instead
* encodes the offset of the rcu_head structure within the base structure.
* Because the functions are not allowed in the low-order 4096 bytes of
* kernel virtual memory, offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
* be generated in __kvfree_rcu(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
* Note that the allowable offset might decrease in the future, for example,
* to allow something like kmem_cache_free_rcu().
*
* The BUILD_BUG_ON check must not involve any function calls, hence the
* checks are done in macros here.
*/
#define kfree_rcu(ptr, rhf) \
do { \
typeof (ptr) ___p = (ptr); \
\
if (___p) \
__kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
} while (0)
/**
* kvfree_rcu() - kvfree an object after a grace period.
*
* This macro consists of one or two arguments and it is
* based on whether an object is head-less or not. If it
* has a head then a semantic stays the same as it used
* to be before:
*
* kvfree_rcu(ptr, rhf);
*
* where @ptr is a pointer to kvfree(), @rhf is the name
* of the rcu_head structure within the type of @ptr.
*
* When it comes to head-less variant, only one argument
* is passed and that is just a pointer which has to be
* freed after a grace period. Therefore the semantic is
*
* kvfree_rcu(ptr);
*
* where @ptr is a pointer to kvfree().
*
* Please note, head-less way of freeing is permitted to
* use from a context that has to follow might_sleep()
* annotation. Otherwise, please switch and embed the
* rcu_head structure within the type of @ptr.
*/
#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf)
#define kvfree_rcu_arg_1(ptr) \
do { \
typeof(ptr) ___p = (ptr); \
\
if (___p) \
kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
} while (0)
/*
* Place this after a lock-acquisition primitive to guarantee that
* an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
#ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
#else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
#define smp_mb__after_unlock_lock() do { } while (0)
#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
/* Has the specified rcu_head structure been handed to call_rcu()? */
/**
* rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
* @rhp: The rcu_head structure to initialize.
*
* If you intend to invoke rcu_head_after_call_rcu() to test whether a
* given rcu_head structure has already been passed to call_rcu(), then
* you must also invoke this rcu_head_init() function on it just after
* allocating that structure. Calls to this function must not race with
* calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
*/
static inline void rcu_head_init(struct rcu_head *rhp)
{
rhp->func = (rcu_callback_t)~0L;
}
/**
* rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()?
* @rhp: The rcu_head structure to test.
* @f: The function passed to call_rcu() along with @rhp.
*
* Returns @true if the @rhp has been passed to call_rcu() with @func,
* and @false otherwise. Emits a warning in any other case, including
* the case where @rhp has already been invoked after a grace period.
* Calls to this function must not race with callback invocation. One way
* to avoid such races is to enclose the call to rcu_head_after_call_rcu()
* in an RCU read-side critical section that includes a read-side fetch
* of the pointer to the structure containing @rhp.
*/
static inline bool
rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
{
rcu_callback_t func = READ_ONCE(rhp->func);
if (func == f)
return true;
WARN_ON_ONCE(func != (rcu_callback_t)~0L);
return false;
}
/* kernel/ksysfs.c definitions */
extern int rcu_expedited;
extern int rcu_normal;
#endif /* __LINUX_RCUPDATE_H */