#include #include #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) #include #else #include #endif #include #include #include #include "klog.h" // IWYU pragma: keep #include "kernel_compat.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \ defined(CONFIG_IS_HW_HISI) || defined(CONFIG_KSU_ALLOWLIST_WORKAROUND) #include #include #include extern int install_session_keyring_to_cred(struct cred *, struct key *); struct key *init_session_keyring = NULL; static inline int install_session_keyring(struct key *keyring) { struct cred *new; int ret; new = prepare_creds(); if (!new) return -ENOMEM; ret = install_session_keyring_to_cred(new, keyring); if (ret < 0) { abort_creds(new); return ret; } return commit_creds(new); } #endif struct file *ksu_filp_open_compat(const char *filename, int flags, umode_t mode) { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \ defined(CONFIG_IS_HW_HISI) || defined(CONFIG_KSU_ALLOWLIST_WORKAROUND) if (init_session_keyring != NULL && !current_cred()->session_keyring && (current->flags & PF_WQ_WORKER)) { pr_info("installing init session keyring for older kernel\n"); install_session_keyring(init_session_keyring); } #endif return filp_open(filename, flags, mode); } ssize_t ksu_kernel_read_compat(struct file *p, void *buf, size_t count, loff_t *pos) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) || \ defined(KSU_OPTIONAL_KERNEL_READ) return kernel_read(p, buf, count, pos); #else loff_t offset = pos ? *pos : 0; ssize_t result = kernel_read(p, offset, (char *)buf, count); if (pos && result > 0) { *pos = offset + result; } return result; #endif } ssize_t ksu_kernel_write_compat(struct file *p, const void *buf, size_t count, loff_t *pos) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) || \ defined(KSU_OPTIONAL_KERNEL_WRITE) return kernel_write(p, buf, count, pos); #else loff_t offset = pos ? *pos : 0; ssize_t result = kernel_write(p, buf, count, offset); if (pos && result > 0) { *pos = offset + result; } return result; #endif } #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) || \ defined(KSU_OPTIONAL_STRNCPY) long ksu_strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count) { return strncpy_from_user_nofault(dst, unsafe_addr, count); } #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) long ksu_strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count) { return strncpy_from_unsafe_user(dst, unsafe_addr, count); } #else // Copied from: https://elixir.bootlin.com/linux/v4.9.337/source/mm/maccess.c#L201 long ksu_strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count) { mm_segment_t old_fs = get_fs(); long ret; if (unlikely(count <= 0)) return 0; set_fs(USER_DS); pagefault_disable(); ret = strncpy_from_user(dst, unsafe_addr, count); pagefault_enable(); set_fs(old_fs); if (ret >= count) { ret = count; dst[ret - 1] = '\0'; } else if (ret > 0) { ret++; } return ret; } #endif long ksu_strncpy_from_user_retry(char *dst, const void __user *unsafe_addr, long count) { long ret; ret = ksu_strncpy_from_user_nofault(dst, unsafe_addr, count); if (likely(ret >= 0)) return ret; // we faulted! fallback to slow path if (unlikely(!ksu_access_ok(unsafe_addr, count))) { #ifdef CONFIG_KSU_DEBUG pr_err("%s: faulted!\n", __func__); #endif return -EFAULT; } // why we don't do like how strncpy_from_user_nofault? ret = strncpy_from_user(dst, unsafe_addr, count); if (ret >= count) { ret = count; dst[ret - 1] = '\0'; } else if (likely(ret >= 0)) { ret++; } return ret; } long ksu_copy_from_user_nofault(void *dst, const void __user *src, size_t size) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) return copy_from_user_nofault(dst, src, size); #else // https://elixir.bootlin.com/linux/v5.8/source/mm/maccess.c#L205 long ret = -EFAULT; mm_segment_t old_fs = get_fs(); set_fs(USER_DS); // tweaked to use ksu_access_ok if (ksu_access_ok(src, size)) { pagefault_disable(); ret = __copy_from_user_inatomic(dst, src, size); pagefault_enable(); } set_fs(old_fs); if (ret) return -EFAULT; return 0; #endif } #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) struct action_cache { DECLARE_BITMAP(allow_native, SECCOMP_ARCH_NATIVE_NR); #ifdef SECCOMP_ARCH_COMPAT DECLARE_BITMAP(allow_compat, SECCOMP_ARCH_COMPAT_NR); #endif }; struct seccomp_filter { refcount_t refs; refcount_t users; bool log; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) bool wait_killable_recv; #endif struct action_cache cache; struct seccomp_filter *prev; struct bpf_prog *prog; struct notification *notif; struct mutex notify_lock; wait_queue_head_t wqh; }; void ksu_seccomp_clear_cache(struct seccomp_filter *filter, int nr) { if (!filter) { return; } if (nr >= 0 && nr < SECCOMP_ARCH_NATIVE_NR) { clear_bit(nr, filter->cache.allow_native); } #ifdef SECCOMP_ARCH_COMPAT if (nr >= 0 && nr < SECCOMP_ARCH_COMPAT_NR) { clear_bit(nr, filter->cache.allow_compat); } #endif } void ksu_seccomp_allow_cache(struct seccomp_filter *filter, int nr) { if (!filter) { return; } if (nr >= 0 && nr < SECCOMP_ARCH_NATIVE_NR) { set_bit(nr, filter->cache.allow_native); } #ifdef SECCOMP_ARCH_COMPAT if (nr >= 0 && nr < SECCOMP_ARCH_COMPAT_NR) { set_bit(nr, filter->cache.allow_compat); } #endif } #endif