#include "linux/compiler.h" #include "linux/printk.h" #include "linux/sched.h" #include "selinux/selinux.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "objsec.h" #include "allowlist.h" #include "arch.h" #include "feature.h" #include "klog.h" // IWYU pragma: keep #include "ksud.h" #include "sucompat.h" #include "core_hook.h" #include "sulog.h" #define SU_PATH "/system/bin/su" #define SH_PATH "/system/bin/sh" bool ksu_su_compat_enabled __read_mostly = true; extern void escape_to_root(); void ksu_sucompat_enable(); void ksu_sucompat_disable(); void ksu_mark_running_process(void) { struct task_struct *p, *t; read_lock(&tasklist_lock); for_each_process_thread (p, t) { if (!t->mm) { // only user processes continue; } int uid = task_uid(t).val; bool ksu_root_process = uid == 0 && is_task_ksu_domain(get_task_cred(t)); if (ksu_root_process || ksu_is_allow_uid(uid)) { ksu_set_task_tracepoint_flag(t); pr_info("sucompat: mark process: pid:%d, uid: %d, comm:%s\n", t->pid, uid, t->comm); } } read_unlock(&tasklist_lock); } static void handle_process_mark(bool mark) { struct task_struct *p, *t; read_lock(&tasklist_lock); for_each_process_thread(p, t) { if (mark) ksu_set_task_tracepoint_flag(t); else ksu_clear_task_tracepoint_flag(t); } read_unlock(&tasklist_lock); } static void mark_all_process(void) { handle_process_mark(true); pr_info("sucompat: mark all user process done!\n"); } static void unmark_all_process(void) { handle_process_mark(false); pr_info("sucompat: unmark all user process done!\n"); } static int su_compat_feature_get(u64 *value) { *value = ksu_su_compat_enabled ? 1 : 0; return 0; } static int su_compat_feature_set(u64 value) { bool enable = value != 0; if (enable == ksu_su_compat_enabled) { pr_info("su_compat: no need to change\n"); return 0; } if (enable) { ksu_sucompat_enable(); } else { ksu_sucompat_disable(); } ksu_su_compat_enabled = enable; pr_info("su_compat: set to %d\n", enable); return 0; } static const struct ksu_feature_handler su_compat_handler = { .feature_id = KSU_FEATURE_SU_COMPAT, .name = "su_compat", .get_handler = su_compat_feature_get, .set_handler = su_compat_feature_set, }; #ifndef KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK static bool ksu_sucompat_hook_state __read_mostly = true; #endif static void __user *userspace_stack_buffer(const void *d, size_t len) { /* To avoid having to mmap a page in userspace, just write below the stack * pointer. */ char __user *p = (void __user *)current_user_stack_pointer() - len; return copy_to_user(p, d, len) ? NULL : p; } static char __user *sh_user_path(void) { static const char sh_path[] = "/system/bin/sh"; return userspace_stack_buffer(sh_path, sizeof(sh_path)); } static char __user *ksud_user_path(void) { static const char ksud_path[] = KSUD_PATH; return userspace_stack_buffer(ksud_path, sizeof(ksud_path)); } int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode, int *__unused_flags) { const char su[] = SU_PATH; #ifndef KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK if (!ksu_sucompat_hook_state) { return 0; } #endif if (!ksu_is_allow_uid_for_current(current_uid().val)) { return 0; } char path[sizeof(su) + 1]; memset(path, 0, sizeof(path)); strncpy_from_user_nofault(path, *filename_user, sizeof(path)); if (unlikely(!memcmp(path, su, sizeof(su)))) { #if __SULOG_GATE ksu_sulog_report_syscall(current_uid().val, NULL, "faccessat", path); #endif pr_info("faccessat su->sh!\n"); *filename_user = sh_user_path(); } return 0; } int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags) { // const char sh[] = SH_PATH; const char su[] = SU_PATH; #ifndef KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK if (!ksu_sucompat_hook_state) { return 0; } #endif if (!ksu_is_allow_uid_for_current(current_uid().val)) { return 0; } if (unlikely(!filename_user)) { return 0; } char path[sizeof(su) + 1]; memset(path, 0, sizeof(path)); // Remove this later!! we use syscall hook, so this will never happen!!!!! #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) && 0 // it becomes a `struct filename *` after 5.18 // https://elixir.bootlin.com/linux/v5.18/source/fs/stat.c#L216 const char sh[] = SH_PATH; struct filename *filename = *((struct filename **)filename_user); if (IS_ERR(filename)) { return 0; } if (likely(memcmp(filename->name, su, sizeof(su)))) return 0; pr_info("vfs_statx su->sh!\n"); memcpy((void *)filename->name, sh, sizeof(sh)); #else strncpy_from_user_nofault(path, *filename_user, sizeof(path)); if (unlikely(!memcmp(path, su, sizeof(su)))) { #if __SULOG_GATE ksu_sulog_report_syscall(current_uid().val, NULL, "newfstatat", path); #endif pr_info("newfstatat su->sh!\n"); *filename_user = sh_user_path(); } #endif return 0; } // the call from execve_handler_pre won't provided correct value for __never_use_argument, use them after fix execve_handler_pre, keeping them for consistence for manually patched code int ksu_handle_execveat_sucompat(int *fd, struct filename **filename_ptr, void *__never_use_argv, void *__never_use_envp, int *__never_use_flags) { struct filename *filename; const char sh[] = KSUD_PATH; const char su[] = SU_PATH; #ifndef KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK if (!ksu_sucompat_hook_state) { return 0; } #endif if (unlikely(!filename_ptr)) return 0; filename = *filename_ptr; if (IS_ERR(filename)) { return 0; } if (likely(memcmp(filename->name, su, sizeof(su)))) return 0; #if __SULOG_GATE bool is_allowed = ksu_is_allow_uid_for_current(current_uid().val); ksu_sulog_report_syscall(current_uid().val, NULL, "execve", filename->name); if (!is_allowed) { return 0; } ksu_sulog_report_su_attempt(current_uid().val, NULL, filename->name, is_allowed); #else if (!ksu_is_allow_uid_for_current(current_uid().val)) { return 0; } #endif pr_info("do_execveat_common su found\n"); memcpy((void *)filename->name, sh, sizeof(sh)); escape_to_root(); return 0; } int ksu_handle_execveat(int *fd, struct filename **filename_ptr, void *argv, void *envp, int *flags) { return ksu_handle_execveat_sucompat(fd, filename_ptr, argv, envp, flags); } int ksu_handle_execve_sucompat(int *fd, const char __user **filename_user, void *__never_use_argv, void *__never_use_envp, int *__never_use_flags) { const char su[] = SU_PATH; char path[sizeof(su) + 1]; #ifndef KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK if (!ksu_sucompat_hook_state){ return 0; } #endif if (unlikely(!filename_user)) return 0; memset(path, 0, sizeof(path)); strncpy_from_user_nofault(path, *filename_user, sizeof(path)); if (likely(memcmp(path, su, sizeof(su)))) return 0; #if __SULOG_GATE bool is_allowed = ksu_is_allow_uid_for_current(current_uid().val); ksu_sulog_report_syscall(current_uid().val, NULL, "execve", path); if (!is_allowed) return 0; ksu_sulog_report_su_attempt(current_uid().val, NULL, path, is_allowed); #else if (!ksu_is_allow_uid_for_current(current_uid().val)) { return 0; } #endif pr_info("sys_execve su found\n"); *filename_user = ksud_user_path(); escape_to_root(); return 0; } int __ksu_handle_devpts(struct inode *inode) { #ifndef KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK if (!ksu_sucompat_hook_state) return 0; #endif if (!current->mm) { return 0; } uid_t uid = current_uid().val; if (uid % 100000 < 10000) { // not untrusted_app, ignore it return 0; } if (likely(!ksu_is_allow_uid_for_current(uid))) return 0; struct inode_security_struct *sec = selinux_inode(inode); if (ksu_file_sid && sec) sec->sid = ksu_file_sid; return 0; } #ifdef KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK // Tracepoint probe for sys_enter static void sucompat_sys_enter_handler(void *data, struct pt_regs *regs, long id) { // Handle newfstatat if (unlikely(id == __NR_newfstatat)) { int *dfd = (int *)&PT_REGS_PARM1(regs); const char __user **filename_user = (const char __user **)&PT_REGS_PARM2(regs); int *flags = (int *)&PT_REGS_SYSCALL_PARM4(regs); ksu_handle_stat(dfd, filename_user, flags); return; } // Handle faccessat if (unlikely(id == __NR_faccessat)) { int *dfd = (int *)&PT_REGS_PARM1(regs); const char __user **filename_user = (const char __user **)&PT_REGS_PARM2(regs); int *mode = (int *)&PT_REGS_PARM3(regs); ksu_handle_faccessat(dfd, filename_user, mode, NULL); return; } // Handle execve if (unlikely(id == __NR_execve)) { const char __user **filename_user = (const char __user **)&PT_REGS_PARM1(regs); ksu_handle_execve_sucompat(AT_FDCWD, filename_user, NULL, NULL, NULL); return; } } #endif // KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK #ifdef CONFIG_KRETPROBES static struct kretprobe *init_kretprobe(const char *name, kretprobe_handler_t handler) { struct kretprobe *rp = kzalloc(sizeof(struct kretprobe), GFP_KERNEL); if (!rp) return NULL; rp->kp.symbol_name = name; rp->handler = handler; rp->data_size = 0; rp->maxactive = 0; int ret = register_kretprobe(rp); pr_info("sucompat: register_%s kretprobe: %d\n", name, ret); if (ret) { kfree(rp); return NULL; } return rp; } static void destroy_kretprobe(struct kretprobe **rp_ptr) { struct kretprobe *rp = *rp_ptr; if (!rp) return; unregister_kretprobe(rp); synchronize_rcu(); kfree(rp); *rp_ptr = NULL; } #endif #ifdef CONFIG_KRETPROBES static int tracepoint_reg_count = 0; static DEFINE_SPINLOCK(tracepoint_reg_lock); static int syscall_regfunc_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long flags; spin_lock_irqsave(&tracepoint_reg_lock, flags); if (tracepoint_reg_count < 1) { // while install our tracepoint, mark our processes unmark_all_process(); ksu_mark_running_process(); } else { // while installing other tracepoint, mark all processes mark_all_process(); } tracepoint_reg_count++; spin_unlock_irqrestore(&tracepoint_reg_lock, flags); return 0; } static int syscall_unregfunc_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long flags; spin_lock_irqsave(&tracepoint_reg_lock, flags); if (tracepoint_reg_count <= 1) { // while uninstall our tracepoint, unmark all processes unmark_all_process(); } else { // while uninstalling other tracepoint, mark our processes unmark_all_process(); ksu_mark_running_process(); } tracepoint_reg_count--; spin_unlock_irqrestore(&tracepoint_reg_lock, flags); return 0; } static struct kretprobe *syscall_regfunc_rp = NULL; static struct kretprobe *syscall_unregfunc_rp = NULL; #endif void ksu_sucompat_enable() { int ret; pr_info("sucompat: ksu_sucompat_enable called\n"); #ifdef CONFIG_KRETPROBES // Register kretprobe for syscall_regfunc syscall_regfunc_rp = init_kretprobe("syscall_regfunc", syscall_regfunc_handler); // Register kretprobe for syscall_unregfunc syscall_unregfunc_rp = init_kretprobe("syscall_unregfunc", syscall_unregfunc_handler); #endif #ifdef KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK ret = register_trace_sys_enter(sucompat_sys_enter_handler, NULL); #ifndef CONFIG_KRETPROBES unmark_all_process(); ksu_mark_running_process(); #endif if (ret) { pr_err("sucompat: failed to register sys_enter tracepoint: %d\n", ret); } else { pr_info("sucompat: sys_enter tracepoint registered\n"); } #else ksu_sucompat_hook_state = true; pr_info("ksu_sucompat_init: hooks enabled: execve/execveat_su, faccessat, stat\n"); #endif } void ksu_sucompat_disable() { pr_info("sucompat: ksu_sucompat_disable called\n"); #ifdef KSU_HAVE_SYSCALL_TRACEPOINTS_HOOK unregister_trace_sys_enter(sucompat_sys_enter_handler, NULL); tracepoint_synchronize_unregister(); pr_info("sucompat: sys_enter tracepoint unregistered\n"); #else ksu_sucompat_hook_state = false; pr_info("ksu_sucompat_exit: hooks disabled: execve/execveat_su, faccessat, stat\n"); #endif #ifdef CONFIG_KRETPROBES destroy_kretprobe(&syscall_regfunc_rp); destroy_kretprobe(&syscall_unregfunc_rp); #endif } // sucompat: permited process can execute 'su' to gain root access. void ksu_sucompat_init() { if (ksu_register_feature_handler(&su_compat_handler)) { pr_err("Failed to register su_compat feature handler\n"); } if (ksu_su_compat_enabled) { ksu_sucompat_enable(); } } void ksu_sucompat_exit() { if (ksu_su_compat_enabled) { ksu_sucompat_disable(); } ksu_unregister_feature_handler(KSU_FEATURE_SU_COMPAT); }