#include #include #include #include #include #include #include #include #include #include #include #include #include #include "allowlist.h" #include "feature.h" #include "klog.h" // IWYU pragma: keep #include "ksud.h" #include "sucompat.h" #include "app_profile.h" #include "sulog.h" #define SU_PATH "/system/bin/su" #define SH_PATH "/system/bin/sh" bool ksu_su_compat_enabled __read_mostly = true; static int su_compat_feature_get(u64 *value) { *value = ksu_su_compat_enabled ? 1 : 0; return 0; } static int su_compat_feature_set(u64 value) { bool enable = value != 0; ksu_su_compat_enabled = enable; pr_info("su_compat: set to %d\n", enable); return 0; } static const struct ksu_feature_handler su_compat_handler = { .feature_id = KSU_FEATURE_SU_COMPAT, .name = "su_compat", .get_handler = su_compat_feature_get, .set_handler = su_compat_feature_set, }; static void __user *userspace_stack_buffer(const void *d, size_t len) { // To avoid having to mmap a page in userspace, just write below the stack // pointer. char __user *p = (void __user *)current_user_stack_pointer() - len; return copy_to_user(p, d, len) ? NULL : p; } static char __user *sh_user_path(void) { static const char sh_path[] = "/system/bin/sh"; return userspace_stack_buffer(sh_path, sizeof(sh_path)); } static char __user *ksud_user_path(void) { static const char ksud_path[] = KSUD_PATH; return userspace_stack_buffer(ksud_path, sizeof(ksud_path)); } static bool try_set_access_flag(unsigned long addr) { #ifdef CONFIG_ARM64 struct mm_struct *mm = current->mm; struct vm_area_struct *vma; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *ptep, pte; spinlock_t *ptl; bool ret = false; if (!mm) return false; if (!mmap_read_trylock(mm)) return false; vma = find_vma(mm, addr); if (!vma || addr < vma->vm_start) goto out_unlock; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) goto out_unlock; p4d = p4d_offset(pgd, addr); if (!p4d_present(*p4d)) goto out_unlock; pud = pud_offset(p4d, addr); if (!pud_present(*pud)) goto out_unlock; pmd = pmd_offset(pud, addr); if (!pmd_present(*pmd)) goto out_unlock; if (pmd_trans_huge(*pmd)) goto out_unlock; ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!ptep) goto out_unlock; pte = *ptep; if (!pte_present(pte)) goto out_pte_unlock; if (pte_young(pte)) { ret = true; goto out_pte_unlock; } ptep_set_access_flags(vma, addr, ptep, pte_mkyoung(pte), 0); pr_info("set AF for addr %lx\n", addr); ret = true; out_pte_unlock: pte_unmap_unlock(ptep, ptl); out_unlock: mmap_read_unlock(mm); return ret; #else return false; #endif } int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode, int *__unused_flags) { const char su[] = SU_PATH; if (!ksu_is_allow_uid_for_current(current_uid().val)) { return 0; } char path[sizeof(su) + 1]; memset(path, 0, sizeof(path)); strncpy_from_user_nofault(path, *filename_user, sizeof(path)); if (unlikely(!memcmp(path, su, sizeof(su)))) { #if __SULOG_GATE ksu_sulog_report_syscall(current_uid().val, NULL, "faccessat", path); #endif pr_info("faccessat su->sh!\n"); *filename_user = sh_user_path(); } return 0; } int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags) { // const char sh[] = SH_PATH; const char su[] = SU_PATH; if (!ksu_is_allow_uid_for_current(current_uid().val)) { return 0; } if (unlikely(!filename_user)) { return 0; } char path[sizeof(su) + 1]; memset(path, 0, sizeof(path)); strncpy_from_user_nofault(path, *filename_user, sizeof(path)); if (unlikely(!memcmp(path, su, sizeof(su)))) { #if __SULOG_GATE ksu_sulog_report_syscall(current_uid().val, NULL, "newfstatat", path); #endif pr_info("newfstatat su->sh!\n"); *filename_user = sh_user_path(); } return 0; } int ksu_handle_execve_sucompat(const char __user **filename_user, void *__never_use_argv, void *__never_use_envp, int *__never_use_flags) { const char su[] = SU_PATH; const char __user *fn; char path[sizeof(su) + 1]; long ret; unsigned long addr; if (unlikely(!filename_user)) return 0; #if __SULOG_GATE bool is_allowed = ksu_is_allow_uid_for_current(current_uid().val); ksu_sulog_report_syscall(current_uid().val, NULL, "execve", path); if (!is_allowed) return 0; ksu_sulog_report_su_attempt(current_uid().val, NULL, path, is_allowed); #else if (!ksu_is_allow_uid_for_current(current_uid().val)) { return 0; } #endif addr = untagged_addr((unsigned long)*filename_user); fn = (const char __user *)addr; memset(path, 0, sizeof(path)); ret = strncpy_from_user_nofault(path, fn, sizeof(path)); if (ret < 0 && try_set_access_flag(addr)) { ret = strncpy_from_user_nofault(path, fn, sizeof(path)); } if (ret < 0 && preempt_count()) { /* This is crazy, but we know what we are doing: * Temporarily exit atomic context to handle page faults, then restore it */ pr_info("Access filename failed, try rescue..\n"); preempt_enable_no_resched_notrace(); ret = strncpy_from_user(path, fn, sizeof(path)); preempt_disable_notrace(); } if (ret < 0) { pr_warn("Access filename when execve failed: %ld", ret); return 0; } if (likely(memcmp(path, su, sizeof(su)))) return 0; pr_info("sys_execve su found\n"); *filename_user = ksud_user_path(); escape_with_root_profile(); } // sucompat: permitted process can execute 'su' to gain root access. void ksu_sucompat_init() { if (ksu_register_feature_handler(&su_compat_handler)) { pr_err("Failed to register su_compat feature handler\n"); } } void ksu_sucompat_exit() { ksu_unregister_feature_handler(KSU_FEATURE_SU_COMPAT); }