#include #define inode_state_wait_address(inode, bit) ((char *)&(inode)->i_state + (bit)) static inline void inode_wake_up_bit(struct inode *inode, u32 bit) { /* Caller is responsible for correct memory barriers. */ wake_up_var(inode_state_wait_address(inode, bit)); } static inline struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe, struct inode *inode, u32 bit) { void *bit_address; bit_address = inode_state_wait_address(inode, bit); init_wait_var_entry(wqe, bit_address, 0); return __var_waitqueue(bit_address); } #define genradix_ptr_alloc_preallocated_inlined(_radix, _idx, _new_node, _gfp)\ (__genradix_cast(_radix) \ (__genradix_ptr_inlined(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx)) ?: \ __genradix_ptr_alloc(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx), \ _gfp))) #define GENRADIX_NODE_SHIFT 9 #define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT) #define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *)) #define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) #define GENRADIX_MAX_DEPTH \ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT) #define GENRADIX_DEPTH_MASK \ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1)) struct genradix_root; static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) { return kzalloc(GENRADIX_NODE_SIZE, gfp_mask); } static inline void genradix_free_node(struct genradix_node *node) { kfree(node); } static inline int genradix_depth_shift(unsigned depth) { return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth; } static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r) { return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK); } static inline unsigned genradix_root_to_depth(struct genradix_root *r) { return (unsigned long) r & GENRADIX_DEPTH_MASK; } struct genradix_node { union { /* Interior node: */ struct genradix_node *children[GENRADIX_ARY]; /* Leaf: */ u8 data[GENRADIX_NODE_SIZE]; }; }; static inline void *__genradix_ptr_inlined(struct __genradix *radix, size_t offset) { struct genradix_root *r = READ_ONCE(radix->root); struct genradix_node *n = genradix_root_to_node(r); unsigned level = genradix_root_to_depth(r); unsigned shift = genradix_depth_shift(level); if (unlikely(ilog2(offset) >= genradix_depth_shift(level))) return NULL; while (n && shift > GENRADIX_NODE_SHIFT) { shift -= GENRADIX_ARY_SHIFT; n = n->children[offset >> shift]; offset &= (1UL << shift) - 1; } return n ? &n->data[offset] : NULL; } static inline bool rt_or_dl_prio(int prio) { return unlikely(prio < MAX_RT_PRIO); } static inline bool rt_or_dl_task(struct task_struct *p) { return rt_or_dl_prio(p->prio); } #ifdef _BCACHEFS_FS_H inline void __iget(struct inode *inode) { atomic_inc(&inode->i_count); } #endif #define __closure_wait_event_timeout(waitlist, _cond, _until) \ ({ \ struct closure cl; \ long _t; \ \ closure_init_stack(&cl); \ \ while (1) { \ closure_wait(waitlist, &cl); \ if (_cond) { \ _t = max_t(long, 1L, _until - jiffies); \ break; \ } \ _t = max_t(long, 0L, _until - jiffies); \ if (!_t) \ break; \ closure_sync_timeout(&cl, _t); \ } \ closure_wake_up(waitlist); \ closure_sync(&cl); \ _t; \ }) /* * Returns 0 if timeout expired, remaining time in jiffies (at least 1) if * condition became true */ #define closure_wait_event_timeout(waitlist, _cond, _timeout) \ ({ \ unsigned long _until = jiffies + _timeout; \ (_cond) \ ? max_t(long, 1L, _until - jiffies) \ : __closure_wait_event_timeout(waitlist, _cond, _until);\ })