22 Commits

Author SHA1 Message Date
ShirkNeko
7b6074cfc3 kernel: Fix when calling iterate_dir() under an encrypted directory (F2FS + file-based encryption), the kernel encountered a NEON/FPSIMD register state error while decrypting filenames.
Error Log :
[ T4681] Call trace:
[ T4681]  fpsimd_save_state+0x4/0x58
[ T4681]  cts_cbc_decrypt+0x268/0x384
[ T4681]  fscrypt_fname_disk_to_usr+0x1dc/0x338
[ T4681]  f2fs_fill_dentries+0x1cc/0x330
[ T4681]  f2fs_readdir+0x1a0/0x3ec
[ T4681]  iterate_dir+0x80/0x170
[ T4681]  scan_user_data_for_uids+0x170/0x560
[ T4681]  throne_tracker_thread+0x68/0x290
2025-09-16 22:36:26 +08:00
ShirkNeko
4e8d699654 sporadic deadlock fix
move to always kthreaded and mitigate sporadic deadlocks on

Co-authored-by: backslashxx <118538522+backslashxx@users.noreply.github.com>
2025-09-16 19:36:47 +08:00
ShirkNeko
60d122c01b kernel: Add support for concurrent scanning of user data apps 2025-09-16 18:16:19 +08:00
ShirkNeko
335ddc4432 kernel: Enhanced user data scanning
Added filesystem type checks to prevent dangerous paths
2025-09-16 17:27:00 +08:00
ShirkNeko
765106c56a kernel: Separate and modularize the user data scanner scan function 2025-09-16 17:04:49 +08:00
ShirkNeko
b685f03a6e kernel: Separate kern_path() and iterate_dir() operations to avoid lock contention. 2025-09-16 15:45:29 +08:00
ShirkNeko
fae301c161 kernel: Remove duplicate #include <linux/list.h> 2025-09-16 15:09:13 +08:00
ShirkNeko
73cd1f2cf3 kernel: Optimizing thread scheduling during user scans
`[   23.379244][ T5074] ufshcd-qcom 1d84000.ufshc: ............. ufs dump complete ..........
[   23.379263][ T5074] dump-reason-buffer-size: 256
[   23.379267][ T5074] dump-reason-pidbuffer:PID: 5074, Process Name: throne_tracker
[   23.379295][ T5074] qcom_q6v5_pas a3380000.remoteproc-soccp: waking SOCCP from panic path
[   23.379455][ T5074] CPU0 next event is 23368000000
[   23.379456][ T5074] CP.rkp_only' to 'true' in property file '/odm/build.prop': SELinux permission check failed
[    1.248057][    T1] init: Do not have permissions to set 'ro.oplus.nfc.support.tee' to 'true' in pro   23.379459][ T5074] CPU5 next event is 23368000000
[   23.3794   1.248059][    T1] init: Do not have permissions to set 'ro.oplus.eid.enable.state' to '1' in property file '/odm/build.prop':l-3d0: snapshot: device is powered off
[   23.589323][ T5074] Kernel Offset: 0x1738a00000 from 0xffffffc080000000
[   23.589325][ T5074] PHYS_OFFSET: 0x80000000
[   23.589326][ T5074] CPU features: 0x000000,00000000,70024f43,95fffea7
[   23.589328][ T5074] Memory Limit: none
[   23.589490][ T5074] mhi mhi_110e_00.01.00: [E][mhi_debug_reg_dump] host pm_state:M2 dev_state:M2 ee:MISSION MODE
[   23.589505][ T5074] mhi mhi_110e_00.01.00: [E][mhi_debug_reg_dump] device ee: MISSION MODE dev_state: M2`

Signed-off-by: ShirkNeko <109797057+ShirkNeko@users.noreply.github.com>
2025-09-16 15:00:05 +08:00
ShirkNeko
eb5d8fa770 kernel: Use a cached buffer as an array-based stack to avoid panics caused by overly deep traversal.
INTCAM: no information
       TPU: no information
       TNR: no information
       MFC: no information
        BO: no information
[   4.715484] [I] [DSS] Last AVB: avb_ret=ERROR_VERIFICATION
[   4.715890] [I] [DSS] Last AVB: avb_veritymode=enforcing
[   4.716289] [I] [DSS] Last AVB: avb_error_parts=boot
[   4.717085] [I] [LNXDBG] build info set by kernel
RAMDUMP_MSG.txt:
  reset message: KP: kernel stack overflow
  UUID: e2faff80-83ea-c240-ac75-d7b8a528c892
  last kernel version: 6.1.134-android14-11-g23e556daebf3-ab13800907
  aosp kernel version: 6.1.145-android14-11-g8d713f9e8e7b-ab13202960
  build: google/shiba/shiba:16/BP3A.250905.014/13873947:user/release-keys
  RST_STAT: 0x1 - CLUSTER0_NONCPU_WDTRESET
  GSA_RESET_STATUS: 0x0 -
  Reboot reason: 0xbaba - Kernel PANIC
  Reboot mode: 0x0 - Normal Boot
[   4.719030] [I] [DSS] -------------------- DSS LOGS END --------------------

Reboot Info:
  RST_STAT: 0x180000 - PIN_RESET | PO_RESET
  GSA_RESET_STATUS: 0x0 -
  Reboot reason: 0xbaba - Kernel PANIC
  Reboot mode: 0x0 - Normal Boot

Signed-off-by: ShirkNeko <109797057+ShirkNeko@users.noreply.github.com>
2025-09-16 00:53:48 +08:00
ShirkNeko
a197600cb5 kernel: Add optional full-user scanning capability using prctl 2025-09-15 19:14:55 +08:00
ShirkNeko
39c1b45257 Sync with latest official KernelSU commit 4d3560b12bec5f238fe11f908a246f0ac97e9c27
Co-authored-by: simonpunk <simonpunk2016@gmail.com>
2025-09-15 15:54:46 +08:00
Wang Han
4be4758334 Unmount isolated process which forks from zygote unconditionally (#2747)
Rethink about this patch, isolated process which directly forks from
zygote is just like normal app, so there is no reason apps won't crash
but Isolated process will. Also zygote reopens fd before actually fork,
so it should be fine.

This reverts commit 2a1741de96a789957555053cf5a397cbef1eb3e4.
2025-09-15 15:19:00 +08:00
ShirkNeko
6892a23c6a kernel: Fixed an issue where scanning could cause the application to freeze. 2025-09-14 21:49:26 +08:00
ShirkNeko
f8abf097d7 kernel: Improve dynamic manager functions and logging 2025-09-14 19:31:21 +08:00
ShirkNeko
fb2ad3ec7b kernel: keep legacy throne tracker as an option
kernel: keep legacy throne tracker as an option

This change restores older throne tracker that uses packages.list scanning
to track app UIDs. It's intended for ultra-legacy Linux 3.X kernels that
experience deadlocks or crashes with the newer implementation due to issues
in user_data_actor().

We have to remember that the whole iterate_dir, and filldir subsystem is way
different on 3.X.

Changes:
- CONFIG_KSU_THRONE_TRACKER_LEGACY in Kconfig
- conditional compilation in Makefile
- throne_tracker_legacy.c which keeps the old implementation

Enable this option if newer throne tracker (tiann #2757crashes on you.)

Co-authored-by: backslashxx <118538522+backslashxx@users.noreply.github.com>
2025-09-14 17:38:25 +08:00
ShirkNeko
debd7d5a01 kernel: throne_tracker: offload to kthread (tiann[#2632](https://github.com/SukiSU-Ultra/SukiSU-Ultra/issues/2632))
Run throne_tracker() in kthread instead of blocking the caller.
Prevents full lockup during installation and removing the manager.

First run remains synchronous for compatibility purposes (FDE, FBEv1, FBEv2)

Features:
- run track_throne() in a kthread after the first synchronous run
- prevent duplicate thread creation with a single-instance check
- spinlock-on-d_lock based polling adressing possible race conditions.

Race conditions adressed
- single instance kthread lock, smp_mb()
- is_manager_apk, apk, spinlock-on-d_lock based polling

This is a squash of:
https://github.com/tiann/KernelSU/pull/2632

Rebased on top of
https://github.com/tiann/KernelSU/pull/2757

Original skeleton based on:
`kernelsu: move throne_tracker() to kthread`
`kernelsu: check locking before accessing files and dirs during searching manager`
`kernelsu: look for manager UID in /data/system/packages.list, not /data/system/packages.list.tmp`
0b05e927...8783badd

Co-Authored-By: backslashxx <118538522+backslashxx@users.noreply.github.com>
Co-Authored-By: Yaroslav Zviezda <10716792+acroreiser@users.noreply.github.com>
Signed-off-by: backslashxx <118538522+backslashxx@users.noreply.github.com>
2025-09-14 11:53:33 +08:00
ShirkNeko
4c3bdcd016 kernel: Switch to using pr_info for printing 2025-09-14 11:25:59 +08:00
ShirkNeko
c5a2e06b94 kernel: Simplify and improve readability 2025-09-14 10:22:40 +08:00
ShirkNeko
307bb67856 Add vfs_getattr compatibility for kernels < 4.14
Co-authored-by: backslashxx <118538522+backslashxx@users.noreply.github.com>
Signed-off-by: ShirkNeko <109797057+ShirkNeko@users.noreply.github.com>
2025-09-14 10:00:14 +08:00
ShirkNeko
63d9bdd9d6 kernel: Use iterate_dir for multi-user traversal instead of a fixed user ID range. 2025-09-13 21:24:40 +08:00
ShirkNeko
eb87c1355b Fixed some minor issues that may have existed 2025-09-13 20:26:44 +08:00
ShirkNeko
316cb79f32 kernel: Remove fallback scan for packages.list
Enhance scan support for active users in /data/user_de

Signed-off-by: ShirkNeko <109797057+ShirkNeko@users.noreply.github.com>
2025-09-12 16:02:51 +08:00
72 changed files with 5643 additions and 7925 deletions

View File

@@ -4,38 +4,48 @@ config KSU
tristate "KernelSU function support" tristate "KernelSU function support"
default y default y
help help
Enable kernel-level root privileges on Android System. Enable kernel-level root privileges on Android System.
To compile as a module, choose M here: the To compile as a module, choose M here: the
module will be called kernelsu. module will be called kernelsu.
config KSU_THRONE_TRACKER_LEGACY
bool "Use legacy throne tracker (packages.list scanning)"
depends on KSU
default n
help
Use legacy throne tracker that scans packages.list for app UIDs.
This is kept for Ultra-Legacy Linux 3.X kernels which are prone to deadlocks.
Enable this if default scanning deadlocks/crashes on you.
config KSU_DEBUG config KSU_DEBUG
bool "KernelSU debug mode" bool "KernelSU debug mode"
depends on KSU depends on KSU
default n default n
help help
Enable KernelSU debug mode. Enable KernelSU debug mode.
config KSU_ALLOWLIST_WORKAROUND
bool "KernelSU Session Keyring Init workaround"
depends on KSU
default n
help
Enable session keyring init workaround for problematic devices.
Useful for situations where the SU allowlist is not kept after a reboot
config KSU_MANUAL_SU
bool "Use manual su"
depends on KSU
default y
help
Use manual su and authorize the corresponding command line and application via prctl
config KSU_MULTI_MANAGER_SUPPORT config KSU_MULTI_MANAGER_SUPPORT
bool "Multi KernelSU manager support" bool "Multi KernelSU manager support"
depends on KSU depends on KSU
default n default n
help help
Enable multi KernelSU manager support Enable multi KernelSU manager support
config KSU_ALLOWLIST_WORKAROUND
bool "KernelSU Session Keyring Init workaround"
depends on KSU
default n
help
Enable session keyring init workaround for problematic devices.
Useful for situations where the SU allowlist is not kept after a reboot
config KSU_CMDLINE
bool "Enable KernelSU cmdline"
depends on KSU && KSU != m
default n
help
Enable a cmdline called kernelsu.enabled
Value 1 means enabled, value 0 means disabled.
config KPM config KPM
bool "Enable SukiSU KPM" bool "Enable SukiSU KPM"
@@ -44,105 +54,169 @@ config KPM
select KALLSYMS_ALL select KALLSYMS_ALL
default n default n
help help
Enabling this option will activate the KPM feature of SukiSU. Enabling this option will activate the KPM feature of SukiSU.
This option is suitable for scenarios where you need to force KPM to be enabled. This option is suitable for scenarios where you need to force KPM to be enabled.
but it may affect system stability. but it may affect system stability.
choice
prompt "KernelSU hook type"
depends on KSU
default KSU_KPROBES_HOOK
help
Hook type for KernelSU
config KSU_KPROBES_HOOK
bool "Hook KernelSU with Kprobes"
depends on KPROBES
help
If enabled, Hook required KernelSU syscalls with Kernel-probe.
config KSU_TRACEPOINT_HOOK
bool "Hook KernelSU with Tracepoint"
depends on TRACEPOINTS
help
If enabled, Hook required KernelSU syscalls with Tracepoint.
config KSU_MANUAL_HOOK config KSU_MANUAL_HOOK
bool "Hook KernelSU manually" bool "Hook KernelSU manually"
depends on KSU != m && !KSU_SUSFS depends on KSU != m
help help
If enabled, Hook required KernelSU syscalls with manually-patched function. If enabled, Hook required KernelSU syscalls with manually-patched function.
menu "KernelSU - SUSFS" endchoice
menu "KernelSU - SUSFS"
config KSU_SUSFS config KSU_SUSFS
bool "KernelSU addon - SUSFS" bool "KernelSU addon - SUSFS"
depends on KSU depends on KSU
depends on THREAD_INFO_IN_TASK depends on THREAD_INFO_IN_TASK
default y default y
help help
Patch and Enable SUSFS to kernel with KernelSU. Patch and Enable SUSFS to kernel with KernelSU.
config KSU_SUSFS_HAS_MAGIC_MOUNT
bool "Say yes if the current KernelSU repo has magic mount implemented (default y)"
depends on KSU_SUSFS
default y
help
- Enable to indicate that the current SUSFS kernel supports the auto hide features for 5ec1cff's Magic Mount KernelSU
- Every mounts from /debug_ramdisk/workdir will be treated as magic mount and processed differently by susfs
config KSU_SUSFS_SUS_PATH config KSU_SUSFS_SUS_PATH
bool "Enable to hide suspicious path (NOT recommended)" bool "Enable to hide suspicious path (NOT recommended)"
depends on KSU_SUSFS depends on KSU_SUSFS
default y default y
help help
- Allow hiding the user-defined path and all its sub-paths from various system calls. - Allow hiding the user-defined path and all its sub-paths from various system calls.
- Includes temp fix for the leaks of app path in /sdcard/Android/data directory. - tmpfs filesystem is not allowed to be added.
- Effective only on zygote spawned user app process. - Effective only on zygote spawned user app process.
- Use with cautious as it may cause performance loss and will be vulnerable to side channel attacks, - Use with cautious as it may cause performance loss and will be vulnerable to side channel attacks,
just disable this feature if it doesn't work for you or you don't need it at all. just disable this feature if it doesn't work for you or you don't need it at all.
config KSU_SUSFS_SUS_MOUNT config KSU_SUSFS_SUS_MOUNT
bool "Enable to hide suspicious mounts" bool "Enable to hide suspicious mounts"
depends on KSU_SUSFS depends on KSU_SUSFS
default y default y
help help
- Allow hiding the user-defined mount paths from /proc/self/[mounts|mountinfo|mountstat]. - Allow hiding the user-defined mount paths from /proc/self/[mounts|mountinfo|mountstat].
- Effective on all processes for hiding mount entries. - Effective on all processes for hiding mount entries.
- Mounts mounted by process with ksu domain will be forced to be assigned the dev name "KSU". - Mounts mounted by process with ksu domain will be forced to be assigned the dev name "KSU".
- mnt_id and mnt_group_id of the sus mount will be assigned to a much bigger number to solve the issue of id not being contiguous. - mnt_id and mnt_group_id of the sus mount will be assigned to a much bigger number to solve the issue of id not being contiguous.
config KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT
bool "Enable to hide KSU's default mounts automatically (experimental)"
depends on KSU_SUSFS_SUS_MOUNT
default y
help
- Automatically add KSU's default mounts to sus_mount.
- No susfs command is needed in userspace.
- Only mount operation from process with ksu domain will be checked.
config KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT
bool "Enable to hide suspicious bind mounts automatically (experimental)"
depends on KSU_SUSFS_SUS_MOUNT
default y
help
- Automatically add binded mounts to sus_mount.
- No susfs command is needed in userspace.
- Only mount operation from process with ksu domain will be checked.
config KSU_SUSFS_SUS_KSTAT config KSU_SUSFS_SUS_KSTAT
bool "Enable to spoof suspicious kstat" bool "Enable to spoof suspicious kstat"
depends on KSU_SUSFS depends on KSU_SUSFS
default y default y
help help
- Allow spoofing the kstat of user-defined file/directory. - Allow spoofing the kstat of user-defined file/directory.
- Effective only on zygote spawned user app process. - Effective only on zygote spawned user app process.
config KSU_SUSFS_TRY_UMOUNT
bool "Enable to use ksu's try_umount"
depends on KSU_SUSFS
default y
help
- Allow using try_umount to umount other user-defined mount paths prior to ksu's default umount paths.
- Effective on all NO-root-access-granted processes.
config KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT
bool "Enable to add bind mounts to ksu's try_umount automatically (experimental)"
depends on KSU_SUSFS_TRY_UMOUNT
default y
help
- Automatically add binded mounts to ksu's try_umount.
- No susfs command is needed in userspace.
- Only mount operation from process with ksu domain will be checked.
config KSU_SUSFS_SPOOF_UNAME config KSU_SUSFS_SPOOF_UNAME
bool "Enable to spoof uname" bool "Enable to spoof uname"
depends on KSU_SUSFS depends on KSU_SUSFS
default y default y
help help
- Allow spoofing the string returned by uname syscall to user-defined string. - Allow spoofing the string returned by uname syscall to user-defined string.
- Effective on all processes. - Effective on all processes.
config KSU_SUSFS_ENABLE_LOG config KSU_SUSFS_ENABLE_LOG
bool "Enable logging susfs log to kernel" bool "Enable logging susfs log to kernel"
depends on KSU_SUSFS depends on KSU_SUSFS
default y default y
help help
- Allow logging susfs log to kernel, uncheck it to completely disable all susfs log. - Allow logging susfs log to kernel, uncheck it to completely disable all susfs log.
config KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS config KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS
bool "Enable to automatically hide ksu and susfs symbols from /proc/kallsyms" bool "Enable to automatically hide ksu and susfs symbols from /proc/kallsyms"
depends on KSU_SUSFS depends on KSU_SUSFS
default y default y
help help
- Automatically hide ksu and susfs symbols from '/proc/kallsyms'. - Automatically hide ksu and susfs symbols from '/proc/kallsyms'.
- Effective on all processes. - Effective on all processes.
config KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG config KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG
bool "Enable to spoof /proc/bootconfig (gki) or /proc/cmdline (non-gki)" bool "Enable to spoof /proc/bootconfig (gki) or /proc/cmdline (non-gki)"
depends on KSU_SUSFS depends on KSU_SUSFS
default y default y
help help
- Spoof the output of /proc/bootconfig (gki) or /proc/cmdline (non-gki) with a user-defined file. - Spoof the output of /proc/bootconfig (gki) or /proc/cmdline (non-gki) with a user-defined file.
- Effective on all processes. - Effective on all processes.
config KSU_SUSFS_OPEN_REDIRECT config KSU_SUSFS_OPEN_REDIRECT
bool "Enable to redirect a path to be opened with another path (experimental)" bool "Enable to redirect a path to be opened with another path (experimental)"
depends on KSU_SUSFS depends on KSU_SUSFS
default y default y
help help
- Allow redirecting a target path to be opened with another user-defined path. - Allow redirecting a target path to be opened with another user-defined path.
- Effective only on processes with uid < 2000. - Effective only on processes with uid < 2000.
- Please be reminded that process with open access to the target and redirected path can be detected. - Please be reminded that process with open access to the target and redirected path can be detected.
config KSU_SUSFS_SUS_MAP config KSU_SUSFS_SUS_SU
bool "Enable to hide some mmapped real file from different proc maps interfaces" bool "Enable SUS-SU in runtime temporarily"
depends on KSU_SUSFS depends on KSU_SUSFS && KPROBES && HAVE_KPROBES && KPROBE_EVENTS
default y default y
help help
- Allow hiding mmapped real file from /proc/<pid>/[maps|smaps|smaps_rollup|map_files|mem|pagemap] - Allow user to enable or disable core ksu kprobes hooks temporarily in runtime. There are 2 working modes for sus_su.
- It does NOT support hiding for anon memory. - Mode 0 (default): Disable sus_su, and enable ksu kprobe hooks for su instead.
- It does NOT hide any inline hooks or plt hooks cause by the injected library itself. - Mode 1 (deprecated):
- It may not be able to evade detections by apps that implement a good injection detection. - Mode 2: Enable sus_su, and disable ksu kprobe hooks for su, which means the kernel inline hooks are enabled,
- Effective only on zygote spawned umounted user app process. the same as the su implementaion of non-gki kernel without kprobe supported.
- Only apps with root access granted by ksu manager are allowed to get root.
endmenu endmenu

View File

@@ -1,47 +1,41 @@
kernelsu-objs := ksu.o kernelsu-objs := ksu.o
kernelsu-objs += allowlist.o kernelsu-objs += allowlist.o
kernelsu-objs += dynamic_manager.o kernelsu-objs += dynamic_manager.o
kernelsu-objs += app_profile.o
kernelsu-objs += apk_sign.o kernelsu-objs += apk_sign.o
kernelsu-objs += sucompat.o kernelsu-objs += sucompat.o
kernelsu-objs += syscall_hook_manager.o kernelsu-objs += core_hook.o
kernelsu-objs += throne_tracker.o
kernelsu-objs += pkg_observer.o
kernelsu-objs += setuid_hook.o
kernelsu-objs += lsm_hooks.o
kernelsu-objs += kernel_compat.o
kernelsu-objs += kernel_umount.o
kernelsu-objs += supercalls.o
kernelsu-objs += feature.o
kernelsu-objs += throne_tracker.o
kernelsu-objs += ksud.o kernelsu-objs += ksud.o
kernelsu-objs += embed_ksud.o kernelsu-objs += embed_ksud.o
kernelsu-objs += seccomp_cache.o kernelsu-objs += kernel_compat.o
kernelsu-objs += file_wrapper.o
kernelsu-objs += throne_comm.o
kernelsu-objs += sulog.o
kernelsu-objs += umount_manager.o
ifeq ($(CONFIG_KSU_MANUAL_SU), y) ifeq ($(CONFIG_KSU_TRACEPOINT_HOOK), y)
ccflags-y += -DCONFIG_KSU_MANUAL_SU kernelsu-objs += ksu_trace.o
kernelsu-objs += manual_su.o
endif endif
kernelsu-objs += selinux/selinux.o kernelsu-objs += selinux/selinux.o
kernelsu-objs += selinux/sepolicy.o kernelsu-objs += selinux/sepolicy.o
kernelsu-objs += selinux/rules.o kernelsu-objs += selinux/rules.o
ifeq ($(CONFIG_KSU_THRONE_TRACKER_LEGACY),y)
kernelsu-objs += throne_tracker_legacy.o
else
kernelsu-objs += throne_tracker.o
endif
kernelsu-objs += user_data_scanner.o
ccflags-y += -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include ccflags-y += -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
ccflags-y += -I$(objtree)/security/selinux -include $(srctree)/include/uapi/asm-generic/errno.h ccflags-y += -I$(objtree)/security/selinux -include $(srctree)/include/uapi/asm-generic/errno.h
obj-$(CONFIG_KSU) += kernelsu.o obj-$(CONFIG_KSU) += kernelsu.o
obj-$(CONFIG_KSU_TRACEPOINT_HOOK) += ksu_trace_export.o
obj-$(CONFIG_KPM) += kpm/ obj-$(CONFIG_KPM) += kpm/
REPO_OWNER := SukiSU-Ultra REPO_OWNER := SukiSU-Ultra
REPO_NAME := SukiSU-Ultra REPO_NAME := SukiSU-Ultra
REPO_BRANCH := main REPO_BRANCH := main
KSU_VERSION_API := 4.0.0 KSU_VERSION_API := 3.1.9
GIT_BIN := /usr/bin/env PATH="$$PATH":/usr/bin:/usr/local/bin git GIT_BIN := /usr/bin/env PATH="$$PATH":/usr/bin:/usr/local/bin git
CURL_BIN := /usr/bin/env PATH="$$PATH":/usr/bin:/usr/local/bin curl CURL_BIN := /usr/bin/env PATH="$$PATH":/usr/bin:/usr/local/bin curl
@@ -49,36 +43,30 @@ CURL_BIN := /usr/bin/env PATH="$$PATH":/usr/bin:/usr/local/bin curl
KSU_GITHUB_VERSION := $(shell $(CURL_BIN) -s "https://api.github.com/repos/$(REPO_OWNER)/$(REPO_NAME)/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/') KSU_GITHUB_VERSION := $(shell $(CURL_BIN) -s "https://api.github.com/repos/$(REPO_OWNER)/$(REPO_NAME)/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/')
KSU_GITHUB_VERSION_COMMIT := $(shell $(CURL_BIN) -sI "https://api.github.com/repos/$(REPO_OWNER)/$(REPO_NAME)/commits?sha=$(REPO_BRANCH)&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p') KSU_GITHUB_VERSION_COMMIT := $(shell $(CURL_BIN) -sI "https://api.github.com/repos/$(REPO_OWNER)/$(REPO_NAME)/commits?sha=$(REPO_BRANCH)&per_page=1" | grep -i "link:" | sed -n 's/.*page=\([0-9]*\)>; rel="last".*/\1/p')
ifeq ($(findstring $(srctree),$(src)),$(srctree)) LOCAL_GIT_EXISTS := $(shell test -e $(srctree)/$(src)/../.git && echo 1 || echo 0)
KSU_SRC := $(src)
else
KSU_SRC := $(srctree)/$(src)
endif
LOCAL_GIT_EXISTS := $(shell test -e $(KSU_SRC)/../.git && echo 1 || echo 0)
define get_ksu_version_full define get_ksu_version_full
v$1-$(shell cd $(KSU_SRC); $(GIT_BIN) rev-parse --short=8 HEAD)@$(shell cd $(KSU_SRC); $(GIT_BIN) rev-parse --abbrev-ref HEAD) v$1-$(shell cd $(srctree)/$(src); $(GIT_BIN) rev-parse --short=8 HEAD)@$(shell cd $(srctree)/$(src); $(GIT_BIN) rev-parse --abbrev-ref HEAD)
endef endef
ifeq ($(KSU_GITHUB_VERSION_COMMIT),) ifeq ($(KSU_GITHUB_VERSION_COMMIT),)
ifeq ($(LOCAL_GIT_EXISTS),1) ifeq ($(LOCAL_GIT_EXISTS),1)
$(shell cd $(KSU_SRC); [ -f ../.git/shallow ] && $(GIT_BIN) fetch --unshallow) $(shell cd $(srctree)/$(src); [ -f ../.git/shallow ] && $(GIT_BIN) fetch --unshallow)
KSU_LOCAL_VERSION := $(shell cd $(KSU_SRC); $(GIT_BIN) rev-list --count $(REPO_BRANCH)) KSU_LOCAL_VERSION := $(shell cd $(srctree)/$(src); $(GIT_BIN) rev-list --count $(REPO_BRANCH))
KSU_VERSION := $(shell expr 40000 + $(KSU_LOCAL_VERSION) - 2815) KSU_VERSION := $(shell expr 10000 + $(KSU_LOCAL_VERSION) + 700)
$(info -- $(REPO_NAME) version (local .git): $(KSU_VERSION)) $(info -- $(REPO_NAME) version (local .git): $(KSU_VERSION))
else else
KSU_VERSION := 13000 KSU_VERSION := 13000
$(warning -- Could not fetch version online or via local .git! Using fallback version: $(KSU_VERSION)) $(warning -- Could not fetch version online or via local .git! Using fallback version: $(KSU_VERSION))
endif endif
else else
KSU_VERSION := $(shell expr 40000 + $(KSU_GITHUB_VERSION_COMMIT) - 2815) KSU_VERSION := $(shell expr 10000 + $(KSU_GITHUB_VERSION_COMMIT) + 700)
$(info -- $(REPO_NAME) version (GitHub): $(KSU_VERSION)) $(info -- $(REPO_NAME) version (GitHub): $(KSU_VERSION))
endif endif
ifeq ($(KSU_GITHUB_VERSION),) ifeq ($(KSU_GITHUB_VERSION),)
ifeq ($(LOCAL_GIT_EXISTS),1) ifeq ($(LOCAL_GIT_EXISTS),1)
$(shell cd $(KSU_SRC); [ -f ../.git/shallow ] && $(GIT_BIN) fetch --unshallow) $(shell cd $(srctree)/$(src); [ -f ../.git/shallow ] && $(GIT_BIN) fetch --unshallow)
KSU_VERSION_FULL := $(call get_ksu_version_full,$(KSU_VERSION_API)) KSU_VERSION_FULL := $(call get_ksu_version_full,$(KSU_VERSION_API))
$(info -- $(REPO_NAME) version (local .git): $(KSU_VERSION_FULL)) $(info -- $(REPO_NAME) version (local .git): $(KSU_VERSION_FULL))
$(info -- $(REPO_NAME) Formatted version (local .git): $(KSU_VERSION)) $(info -- $(REPO_NAME) Formatted version (local .git): $(KSU_VERSION))
@@ -87,7 +75,7 @@ ifeq ($(KSU_GITHUB_VERSION),)
$(warning -- $(REPO_NAME) version: $(KSU_VERSION_FULL)) $(warning -- $(REPO_NAME) version: $(KSU_VERSION_FULL))
endif endif
else else
$(shell cd $(KSU_SRC); [ -f ../.git/shallow ] && $(GIT_BIN) fetch --unshallow) $(shell cd $(srctree)/$(src); [ -f ../.git/shallow ] && $(GIT_BIN) fetch --unshallow)
KSU_VERSION_FULL := $(call get_ksu_version_full,$(KSU_GITHUB_VERSION)) KSU_VERSION_FULL := $(call get_ksu_version_full,$(KSU_GITHUB_VERSION))
$(info -- $(REPO_NAME) version (Github): $(KSU_VERSION_FULL)) $(info -- $(REPO_NAME) version (Github): $(KSU_VERSION_FULL))
endif endif
@@ -95,18 +83,12 @@ endif
ccflags-y += -DKSU_VERSION=$(KSU_VERSION) ccflags-y += -DKSU_VERSION=$(KSU_VERSION)
ccflags-y += -DKSU_VERSION_FULL=\"$(KSU_VERSION_FULL)\" ccflags-y += -DKSU_VERSION_FULL=\"$(KSU_VERSION_FULL)\"
$(info -- Supported Unofficial Manager: 5ec1cff (GKI) rsuntk (Non-GKI) ShirkNeko udochina (GKI and non-GKI and KPM)) ifeq ($(CONFIG_KSU_KPROBES_HOOK), y)
$(info -- SukiSU: CONFIG_KSU_KPROBES_HOOK)
ifeq ($(CONFIG_KSU_MANUAL_HOOK), y) else ifeq ($(CONFIG_KSU_TRACEPOINT_HOOK), y)
ccflags-y += -DKSU_MANUAL_HOOK $(info -- SukiSU: CONFIG_KSU_TRACEPOINT_HOOK)
$(info -- SukiSU: KSU_MANUAL_HOOK) else ifeq ($(CONFIG_KSU_MANUAL_HOOK), y)
else ifeq ($(CONFIG_KSU_SUSFS), y) $(info -- SukiSU: CONFIG_KSU_MANUAL_HOOK)
ccflags-y += -DCONFIG_KSU_SUSFS
$(info -- SukiSU: SUSFS_INLINE_HOOK)
else
ccflags-y += -DKSU_KPROBES_HOOK
ccflags-y += -DKSU_TP_HOOK
$(info -- SukiSU: KSU_TRACEPOINT_HOOK)
endif endif
KERNEL_VERSION := $(VERSION).$(PATCHLEVEL) KERNEL_VERSION := $(VERSION).$(PATCHLEVEL)
@@ -148,40 +130,30 @@ endif
ifeq ($(shell grep "ssize_t kernel_write" $(srctree)/fs/read_write.c | grep -q "const void" ; echo $$?),0) ifeq ($(shell grep "ssize_t kernel_write" $(srctree)/fs/read_write.c | grep -q "const void" ; echo $$?),0)
ccflags-y += -DKSU_OPTIONAL_KERNEL_WRITE ccflags-y += -DKSU_OPTIONAL_KERNEL_WRITE
endif endif
ifeq ($(shell grep -q "int\s\+path_umount" $(srctree)/fs/namespace.c; echo $$?),0)
ccflags-y += -DKSU_HAS_PATH_UMOUNT
endif
ifeq ($(shell grep -q "inode_security_struct\s\+\*selinux_inode" $(srctree)/security/selinux/include/objsec.h; echo $$?),0) ifeq ($(shell grep -q "inode_security_struct\s\+\*selinux_inode" $(srctree)/security/selinux/include/objsec.h; echo $$?),0)
ccflags-y += -DKSU_OPTIONAL_SELINUX_INODE ccflags-y += -DKSU_OPTIONAL_SELINUX_INODE
endif endif
ifeq ($(shell grep -q "task_security_struct\s\+\*selinux_cred" $(srctree)/security/selinux/include/objsec.h; echo $$?),0) ifeq ($(shell grep -q "int\s\+path_umount" $(srctree)/fs/namespace.c; echo $$?),0)
ccflags-y += -DKSU_OPTIONAL_SELINUX_CRED ccflags-y += -DKSU_HAS_PATH_UMOUNT
ifneq ($(shell grep -Eq "^int path_umount" $(srctree)/fs/internal.h; echo $$?),0)
$(shell sed -i '/^extern void __init mnt_init/a int path_umount(struct path *path, int flags);' $(srctree)/fs/internal.h;)
$(info -- KernelSU: SusFS: Adding 'int path_umount(struct path *path, int flags);' to $(srctree)/fs/internal.h)
endif endif
# seccomp_types.h were added on 6.7
ifeq ($(shell grep -q "atomic_t\s\+filter_count" $(srctree)/include/linux/seccomp.h $(srctree)/include/linux/seccomp_types.h; echo $$?),0)
ccflags-y += -DKSU_OPTIONAL_SECCOMP_FILTER_CNT
endif endif
# some old kernel backport this, let's check if put_seccomp_filter still exist # Checks Samsung UH drivers
ifneq ($(shell grep -wq "put_seccomp_filter" $(srctree)/kernel/seccomp.c $(srctree)/include/linux/seccomp.h; echo $$?),0)
ccflags-y += -DKSU_OPTIONAL_SECCOMP_FILTER_RELEASE
endif
ifeq ($(shell grep -q "anon_inode_getfd_secure" $(srctree)/fs/anon_inodes.c; echo $$?),0)
ccflags-y += -DKSU_HAS_GETFD_SECURE
endif
ifeq ($(shell grep -A1 "^int vfs_getattr" $(srctree)/fs/stat.c | grep -q "query_flags"; echo $$?),0)
ccflags-y += -DKSU_HAS_NEW_VFS_GETATTR
endif
# Checks Samsung
ifeq ($(shell grep -q "CONFIG_KDP_CRED" $(srctree)/kernel/cred.c; echo $$?),0) ifeq ($(shell grep -q "CONFIG_KDP_CRED" $(srctree)/kernel/cred.c; echo $$?),0)
ccflags-y += -DSAMSUNG_UH_DRIVER_EXIST ccflags-y += -DSAMSUNG_UH_DRIVER_EXIST
endif endif
# Samsung SELinux Porting
ifeq ($(shell grep -q "SEC_SELINUX_PORTING_COMMON" $(srctree)/security/selinux/avc.c; echo $$?),0) ifeq ($(shell grep -q "SEC_SELINUX_PORTING_COMMON" $(srctree)/security/selinux/avc.c; echo $$?),0)
ccflags-y += -DSAMSUNG_SELINUX_PORTING ccflags-y += -DSAMSUNG_SELINUX_PORTING
endif endif
# Function proc_ops check
ifeq ($(shell grep -q "struct proc_ops " $(srctree)/include/linux/proc_fs.h; echo $$?),0) # Check new vfs_getattr()
ccflags-y += -DKSU_COMPAT_HAS_PROC_OPS ifeq ($(shell grep -A1 "^int vfs_getattr" $(srctree)/fs/stat.c | grep -q "query_flags" ; echo $$?),0)
$(info -- KernelSU/compat: new vfs_getattr() found)
ccflags-y += -DKSU_HAS_NEW_VFS_GETATTR
endif endif
# Custom Signs # Custom Signs
@@ -200,9 +172,10 @@ ccflags-y += -DKSU_MANAGER_PACKAGE=\"$(KSU_MANAGER_PACKAGE)\"
$(info -- SukiSU Manager package name: $(KSU_MANAGER_PACKAGE)) $(info -- SukiSU Manager package name: $(KSU_MANAGER_PACKAGE))
endif endif
$(info -- Supported Unofficial Manager: 5ec1cff (GKI) rsuntk (Non-GKI) ShirkNeko udochina (GKI and non-GKI and KPM))
ccflags-y += -Wno-implicit-function-declaration -Wno-strict-prototypes -Wno-int-conversion -Wno-gcc-compat ccflags-y += -Wno-implicit-function-declaration -Wno-strict-prototypes -Wno-int-conversion -Wno-gcc-compat
ccflags-y += -Wno-declaration-after-statement -Wno-unused-function -Wno-unused-variable ccflags-y += -Wno-declaration-after-statement -Wno-unused-function
## For susfs stuff ## ## For susfs stuff ##
ifeq ($(shell test -e $(srctree)/fs/susfs.c; echo $$?),0) ifeq ($(shell test -e $(srctree)/fs/susfs.c; echo $$?),0)
@@ -213,5 +186,4 @@ else
$(info -- You have not integrated susfs in your kernel yet.) $(info -- You have not integrated susfs in your kernel yet.)
$(info -- Read: https://gitlab.com/simonpunk/susfs4ksu) $(info -- Read: https://gitlab.com/simonpunk/susfs4ksu)
endif endif
# Keep a new line here!! Because someone may append config # Keep a new line here!! Because someone may append config

View File

@@ -1,5 +1,3 @@
#include <linux/mutex.h>
#include <linux/task_work.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/fs.h> #include <linux/fs.h>
@@ -10,27 +8,19 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/version.h> #include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
#include <linux/sched/task.h>
#else
#include <linux/sched.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
#include <linux/compiler_types.h> #include <linux/compiler_types.h>
#endif #endif
#include "ksu.h"
#include "klog.h" // IWYU pragma: keep #include "klog.h" // IWYU pragma: keep
#include "ksud.h"
#include "selinux/selinux.h" #include "selinux/selinux.h"
#include "kernel_compat.h"
#include "allowlist.h" #include "allowlist.h"
#include "manager.h" #include "manager.h"
#include "kernel_compat.h"
#ifndef CONFIG_KSU_SUSFS
#include "syscall_hook_manager.h"
#endif // #ifndef CONFIG_KSU_SUSFS
#define FILE_MAGIC 0x7f4b5355 // ' KSU', u32 #define FILE_MAGIC 0x7f4b5355 // ' KSU', u32
#define FILE_FORMAT_VERSION 3 // u32 #define FILE_FORMAT_VERSION 4 // u32
#define KSU_APP_PROFILE_PRESERVE_UID 9999 // NOBODY_UID #define KSU_APP_PROFILE_PRESERVE_UID 9999 // NOBODY_UID
#define KSU_DEFAULT_SELINUX_DOMAIN "u:r:su:s0" #define KSU_DEFAULT_SELINUX_DOMAIN "u:r:su:s0"
@@ -41,10 +31,11 @@ static DEFINE_MUTEX(allowlist_mutex);
static struct root_profile default_root_profile; static struct root_profile default_root_profile;
static struct non_root_profile default_non_root_profile; static struct non_root_profile default_non_root_profile;
static int allow_list_arr[PAGE_SIZE / sizeof(int)] __read_mostly static int allow_list_arr[PAGE_SIZE / sizeof(int)] __read_mostly __aligned(PAGE_SIZE);
__aligned(PAGE_SIZE);
static int allow_list_pointer __read_mostly = 0; static int allow_list_pointer __read_mostly = 0;
bool scan_all_users __read_mostly = false;
static void remove_uid_from_arr(uid_t uid) static void remove_uid_from_arr(uid_t uid)
{ {
int *temp_arr; int *temp_arr;
@@ -53,7 +44,7 @@ static void remove_uid_from_arr(uid_t uid)
if (allow_list_pointer == 0) if (allow_list_pointer == 0)
return; return;
temp_arr = kzalloc(sizeof(allow_list_arr), GFP_KERNEL); temp_arr = kmalloc(sizeof(allow_list_arr), GFP_KERNEL);
if (temp_arr == NULL) { if (temp_arr == NULL) {
pr_err("%s: unable to allocate memory\n", __func__); pr_err("%s: unable to allocate memory\n", __func__);
return; return;
@@ -83,7 +74,7 @@ static void init_default_profiles(void)
default_root_profile.groups_count = 1; default_root_profile.groups_count = 1;
default_root_profile.groups[0] = 0; default_root_profile.groups[0] = 0;
memcpy(&default_root_profile.capabilities.effective, &full_cap, memcpy(&default_root_profile.capabilities.effective, &full_cap,
sizeof(default_root_profile.capabilities.effective)); sizeof(default_root_profile.capabilities.effective));
default_root_profile.namespaces = 0; default_root_profile.namespaces = 0;
strcpy(default_root_profile.selinux_domain, KSU_DEFAULT_SELINUX_DOMAIN); strcpy(default_root_profile.selinux_domain, KSU_DEFAULT_SELINUX_DOMAIN);
@@ -103,7 +94,10 @@ static uint8_t allow_list_bitmap[PAGE_SIZE] __read_mostly __aligned(PAGE_SIZE);
#define KERNEL_SU_ALLOWLIST "/data/adb/ksu/.allowlist" #define KERNEL_SU_ALLOWLIST "/data/adb/ksu/.allowlist"
void persistent_allow_list(void); static struct work_struct ksu_save_work;
static struct work_struct ksu_load_work;
static bool persistent_allow_list(void);
void ksu_show_allow_list(void) void ksu_show_allow_list(void)
{ {
@@ -126,8 +120,7 @@ static void ksu_grant_root_to_shell(void)
.current_uid = 2000, .current_uid = 2000,
}; };
strcpy(profile.key, "com.android.shell"); strcpy(profile.key, "com.android.shell");
strcpy(profile.rp_config.profile.selinux_domain, strcpy(profile.rp_config.profile.selinux_domain, KSU_DEFAULT_SELINUX_DOMAIN);
KSU_DEFAULT_SELINUX_DOMAIN);
ksu_set_app_profile(&profile, false); ksu_set_app_profile(&profile, false);
} }
#endif #endif
@@ -153,10 +146,9 @@ exit:
return found; return found;
} }
static inline bool forbid_system_uid(uid_t uid) static inline bool forbid_system_uid(uid_t uid) {
{ #define SHELL_UID 2000
#define SHELL_UID 2000 #define SYSTEM_UID 1000
#define SYSTEM_UID 1000
return uid < SHELL_UID && uid != SYSTEM_UID; return uid < SHELL_UID && uid != SYSTEM_UID;
} }
@@ -199,7 +191,7 @@ bool ksu_set_app_profile(struct app_profile *profile, bool persist)
p = list_entry(pos, struct perm_data, list); p = list_entry(pos, struct perm_data, list);
// both uid and package must match, otherwise it will break multiple package with different user id // both uid and package must match, otherwise it will break multiple package with different user id
if (profile->current_uid == p->profile.current_uid && if (profile->current_uid == p->profile.current_uid &&
!strcmp(profile->key, p->profile.key)) { !strcmp(profile->key, p->profile.key)) {
// found it, just override it all! // found it, just override it all!
memcpy(&p->profile, profile, sizeof(*profile)); memcpy(&p->profile, profile, sizeof(*profile));
result = true; result = true;
@@ -208,7 +200,7 @@ bool ksu_set_app_profile(struct app_profile *profile, bool persist)
} }
// not found, alloc a new node! // not found, alloc a new node!
p = (struct perm_data *)kzalloc(sizeof(struct perm_data), GFP_KERNEL); p = (struct perm_data *)kmalloc(sizeof(struct perm_data), GFP_KERNEL);
if (!p) { if (!p) {
pr_err("ksu_set_app_profile alloc failed\n"); pr_err("ksu_set_app_profile alloc failed\n");
return false; return false;
@@ -230,11 +222,9 @@ bool ksu_set_app_profile(struct app_profile *profile, bool persist)
out: out:
if (profile->current_uid <= BITMAP_UID_MAX) { if (profile->current_uid <= BITMAP_UID_MAX) {
if (profile->allow_su) if (profile->allow_su)
allow_list_bitmap[profile->current_uid / BITS_PER_BYTE] |= allow_list_bitmap[profile->current_uid / BITS_PER_BYTE] |= 1 << (profile->current_uid % BITS_PER_BYTE);
1 << (profile->current_uid % BITS_PER_BYTE);
else else
allow_list_bitmap[profile->current_uid / BITS_PER_BYTE] &= allow_list_bitmap[profile->current_uid / BITS_PER_BYTE] &= ~(1 << (profile->current_uid % BITS_PER_BYTE));
~(1 << (profile->current_uid % BITS_PER_BYTE));
} else { } else {
if (profile->allow_su) { if (profile->allow_su) {
/* /*
@@ -246,8 +236,7 @@ out:
WARN_ON(1); WARN_ON(1);
return false; return false;
} }
allow_list_arr[allow_list_pointer++] = allow_list_arr[allow_list_pointer++] = profile->current_uid;
profile->current_uid;
} else { } else {
remove_uid_from_arr(profile->current_uid); remove_uid_from_arr(profile->current_uid);
} }
@@ -258,20 +247,17 @@ out:
if (unlikely(!strcmp(profile->key, "$"))) { if (unlikely(!strcmp(profile->key, "$"))) {
// set default non root profile // set default non root profile
memcpy(&default_non_root_profile, &profile->nrp_config.profile, memcpy(&default_non_root_profile, &profile->nrp_config.profile,
sizeof(default_non_root_profile)); sizeof(default_non_root_profile));
} }
if (unlikely(!strcmp(profile->key, "#"))) { if (unlikely(!strcmp(profile->key, "#"))) {
// set default root profile // set default root profile
memcpy(&default_root_profile, &profile->rp_config.profile, memcpy(&default_root_profile, &profile->rp_config.profile,
sizeof(default_root_profile)); sizeof(default_root_profile));
} }
if (persist) { if (persist)
persistent_allow_list(); persistent_allow_list();
// FIXME: use a new flag
ksu_mark_running_process();
}
return result; return result;
} }
@@ -280,20 +266,23 @@ bool __ksu_is_allow_uid(uid_t uid)
{ {
int i; int i;
if (unlikely(uid == 0)) {
// already root, but only allow our domain.
return is_ksu_domain();
}
if (forbid_system_uid(uid)) { if (forbid_system_uid(uid)) {
// do not bother going through the list if it's system // do not bother going through the list if it's system
return false; return false;
} }
if (likely(ksu_is_manager_uid_valid()) && if (likely(ksu_is_manager_uid_valid()) && unlikely(ksu_get_manager_uid() == uid)) {
unlikely(ksu_get_manager_uid() == uid)) {
// manager is always allowed! // manager is always allowed!
return true; return true;
} }
if (likely(uid <= BITMAP_UID_MAX)) { if (likely(uid <= BITMAP_UID_MAX)) {
return !!(allow_list_bitmap[uid / BITS_PER_BYTE] & return !!(allow_list_bitmap[uid / BITS_PER_BYTE] & (1 << (uid % BITS_PER_BYTE)));
(1 << (uid % BITS_PER_BYTE)));
} else { } else {
for (i = 0; i < allow_list_pointer; i++) { for (i = 0; i < allow_list_pointer; i++) {
if (allow_list_arr[i] == uid) if (allow_list_arr[i] == uid)
@@ -304,20 +293,10 @@ bool __ksu_is_allow_uid(uid_t uid)
return false; return false;
} }
bool __ksu_is_allow_uid_for_current(uid_t uid)
{
if (unlikely(uid == 0)) {
// already root, but only allow our domain.
return is_ksu_domain();
}
return __ksu_is_allow_uid(uid);
}
bool ksu_uid_should_umount(uid_t uid) bool ksu_uid_should_umount(uid_t uid)
{ {
struct app_profile profile = { .current_uid = uid }; struct app_profile profile = { .current_uid = uid };
if (likely(ksu_is_manager_uid_valid()) && if (likely(ksu_is_manager_uid_valid()) && unlikely(ksu_get_manager_uid() == uid)) {
unlikely(ksu_get_manager_uid() == uid)) {
// we should not umount on manager! // we should not umount on manager!
return false; return false;
} }
@@ -374,39 +353,61 @@ bool ksu_get_allow_list(int *array, int *length, bool allow)
return true; return true;
} }
static void do_persistent_allow_list(struct callback_head *_cb) bool ksu_set_scan_all_users(bool enabled)
{
mutex_lock(&allowlist_mutex);
scan_all_users = enabled;
mutex_unlock(&allowlist_mutex);
pr_info("scan_all_users set to: %d\n", enabled);
return persistent_allow_list();
}
bool ksu_get_scan_all_users(void)
{
return scan_all_users;
}
static void do_save_allow_list(struct work_struct *work)
{ {
u32 magic = FILE_MAGIC; u32 magic = FILE_MAGIC;
u32 version = FILE_FORMAT_VERSION; u32 version = FILE_FORMAT_VERSION;
u32 scan_setting = scan_all_users ? 1 : 0;
struct perm_data *p = NULL; struct perm_data *p = NULL;
struct list_head *pos = NULL; struct list_head *pos = NULL;
loff_t off = 0; loff_t off = 0;
mutex_lock(&allowlist_mutex); struct file *fp =
struct file *fp = ksu_filp_open_compat( ksu_filp_open_compat(KERNEL_SU_ALLOWLIST, O_WRONLY | O_CREAT | O_TRUNC, 0644);
KERNEL_SU_ALLOWLIST, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (IS_ERR(fp)) { if (IS_ERR(fp)) {
pr_err("save_allow_list create file failed: %ld\n", pr_err("save_allow_list create file failed: %ld\n", PTR_ERR(fp));
PTR_ERR(fp)); return;
goto unlock;
} }
// store magic and version // store magic and version
if (ksu_kernel_write_compat(fp, &magic, sizeof(magic), &off) != if (ksu_kernel_write_compat(fp, &magic, sizeof(magic), &off) !=
sizeof(magic)) { sizeof(magic)) {
pr_err("save_allow_list write magic failed.\n"); pr_err("save_allow_list write magic failed.\n");
goto close_file; goto exit;
} }
if (ksu_kernel_write_compat(fp, &version, sizeof(version), &off) != if (ksu_kernel_write_compat(fp, &version, sizeof(version), &off) !=
sizeof(version)) { sizeof(version)) {
pr_err("save_allow_list write version failed.\n"); pr_err("save_allow_list write version failed.\n");
goto close_file; goto exit;
}
// Save scan_all_users settings
if (ksu_kernel_write_compat(fp, &scan_setting, sizeof(scan_setting), &off) !=
sizeof(scan_setting)) {
pr_err("save_allow_list write scan_setting failed.\n");
goto exit;
} }
list_for_each (pos, &allow_list) { list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list); p = list_entry(pos, struct perm_data, list);
pr_info("save allow list, name: %s uid :%d, allow: %d\n", pr_info("save allow list, name: %s uid: %d, allow: %d\n",
p->profile.key, p->profile.current_uid, p->profile.key, p->profile.current_uid,
p->profile.allow_su); p->profile.allow_su);
@@ -414,43 +415,18 @@ static void do_persistent_allow_list(struct callback_head *_cb)
&off); &off);
} }
close_file: exit:
filp_close(fp, 0); filp_close(fp, 0);
unlock:
mutex_unlock(&allowlist_mutex);
kfree(_cb);
} }
void persistent_allow_list(void) static void do_load_allow_list(struct work_struct *work)
{
struct task_struct *tsk;
tsk = get_pid_task(find_vpid(1), PIDTYPE_PID);
if (!tsk) {
pr_err("save_allow_list find init task err\n");
return;
}
struct callback_head *cb =
kzalloc(sizeof(struct callback_head), GFP_KERNEL);
if (!cb) {
pr_err("save_allow_list alloc cb err\b");
goto put_task;
}
cb->func = do_persistent_allow_list;
task_work_add(tsk, cb, TWA_RESUME);
put_task:
put_task_struct(tsk);
}
void ksu_load_allow_list(void)
{ {
loff_t off = 0; loff_t off = 0;
ssize_t ret = 0; ssize_t ret = 0;
struct file *fp = NULL; struct file *fp = NULL;
u32 magic; u32 magic;
u32 version; u32 version;
u32 scan_setting = 0;
#ifdef CONFIG_KSU_DEBUG #ifdef CONFIG_KSU_DEBUG
// always allow adb shell by default // always allow adb shell by default
@@ -466,25 +442,43 @@ void ksu_load_allow_list(void)
// verify magic // verify magic
if (ksu_kernel_read_compat(fp, &magic, sizeof(magic), &off) != if (ksu_kernel_read_compat(fp, &magic, sizeof(magic), &off) !=
sizeof(magic) || sizeof(magic) ||
magic != FILE_MAGIC) { magic != FILE_MAGIC) {
pr_err("allowlist file invalid: %d!\n", magic); pr_err("allowlist file invalid: %d!\n", magic);
goto exit; goto exit;
} }
if (ksu_kernel_read_compat(fp, &version, sizeof(version), &off) != if (ksu_kernel_read_compat(fp, &version, sizeof(version), &off) !=
sizeof(version)) { sizeof(version)) {
pr_err("allowlist read version: %d failed\n", version); pr_err("allowlist read version: %d failed\n", version);
goto exit; goto exit;
} }
pr_info("allowlist version: %d\n", version); pr_info("allowlist version: %d\n", version);
if (version >= 4) {
if (ksu_kernel_read_compat(fp, &scan_setting, sizeof(scan_setting), &off) !=
sizeof(scan_setting)) {
pr_warn("allowlist read scan_setting failed, using default\n");
scan_setting = 0;
}
mutex_lock(&allowlist_mutex);
scan_all_users = (scan_setting != 0);
mutex_unlock(&allowlist_mutex);
pr_info("loaded scan_all_users: %d\n", scan_all_users);
} else {
mutex_lock(&allowlist_mutex);
scan_all_users = false;
mutex_unlock(&allowlist_mutex);
}
while (true) { while (true) {
struct app_profile profile; struct app_profile profile;
ret = ksu_kernel_read_compat(fp, &profile, sizeof(profile), ret = ksu_kernel_read_compat(fp, &profile, sizeof(profile),
&off); &off);
if (ret <= 0) { if (ret <= 0) {
pr_info("load_allow_list read err: %zd\n", ret); pr_info("load_allow_list read err: %zd\n", ret);
@@ -501,17 +495,11 @@ exit:
filp_close(fp, 0); filp_close(fp, 0);
} }
void ksu_prune_allowlist(bool (*is_uid_valid)(uid_t, char *, void *), void ksu_prune_allowlist(bool (*is_uid_valid)(uid_t, char *, void *), void *data)
void *data)
{ {
struct perm_data *np = NULL; struct perm_data *np = NULL;
struct perm_data *n = NULL; struct perm_data *n = NULL;
if (!ksu_boot_completed) {
pr_info("boot not completed, skip prune\n");
return;
}
bool modified = false; bool modified = false;
// TODO: use RCU! // TODO: use RCU!
mutex_lock(&allowlist_mutex); mutex_lock(&allowlist_mutex);
@@ -525,8 +513,7 @@ void ksu_prune_allowlist(bool (*is_uid_valid)(uid_t, char *, void *),
pr_info("prune uid: %d, package: %s\n", uid, package); pr_info("prune uid: %d, package: %s\n", uid, package);
list_del(&np->list); list_del(&np->list);
if (likely(uid <= BITMAP_UID_MAX)) { if (likely(uid <= BITMAP_UID_MAX)) {
allow_list_bitmap[uid / BITS_PER_BYTE] &= allow_list_bitmap[uid / BITS_PER_BYTE] &= ~(1 << (uid % BITS_PER_BYTE));
~(1 << (uid % BITS_PER_BYTE));
} }
remove_uid_from_arr(uid); remove_uid_from_arr(uid);
smp_mb(); smp_mb();
@@ -540,6 +527,17 @@ void ksu_prune_allowlist(bool (*is_uid_valid)(uid_t, char *, void *),
} }
} }
// make sure allow list works cross boot
static bool persistent_allow_list(void)
{
return ksu_queue_work(&ksu_save_work);
}
bool ksu_load_allow_list(void)
{
return ksu_queue_work(&ksu_load_work);
}
void ksu_allowlist_init(void) void ksu_allowlist_init(void)
{ {
int i; int i;
@@ -552,6 +550,9 @@ void ksu_allowlist_init(void)
INIT_LIST_HEAD(&allow_list); INIT_LIST_HEAD(&allow_list);
INIT_WORK(&ksu_save_work, do_save_allow_list);
INIT_WORK(&ksu_load_work, do_load_allow_list);
init_default_profiles(); init_default_profiles();
} }
@@ -560,6 +561,8 @@ void ksu_allowlist_exit(void)
struct perm_data *np = NULL; struct perm_data *np = NULL;
struct perm_data *n = NULL; struct perm_data *n = NULL;
do_save_allow_list(NULL);
// free allowlist // free allowlist
mutex_lock(&allowlist_mutex); mutex_lock(&allowlist_mutex);
list_for_each_entry_safe (np, n, &allow_list, list) { list_for_each_entry_safe (np, n, &allow_list, list) {
@@ -568,81 +571,3 @@ void ksu_allowlist_exit(void)
} }
mutex_unlock(&allowlist_mutex); mutex_unlock(&allowlist_mutex);
} }
#ifdef CONFIG_KSU_MANUAL_SU
bool ksu_temp_grant_root_once(uid_t uid)
{
struct app_profile profile = {
.version = KSU_APP_PROFILE_VER,
.allow_su = true,
.current_uid = uid,
};
const char *default_key = "com.temp.once";
struct perm_data *p = NULL;
struct list_head *pos = NULL;
bool found = false;
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
if (p->profile.current_uid == uid) {
strcpy(profile.key, p->profile.key);
found = true;
break;
}
}
if (!found) {
strcpy(profile.key, default_key);
}
profile.rp_config.profile.uid = default_root_profile.uid;
profile.rp_config.profile.gid = default_root_profile.gid;
profile.rp_config.profile.groups_count = default_root_profile.groups_count;
memcpy(profile.rp_config.profile.groups, default_root_profile.groups, sizeof(default_root_profile.groups));
memcpy(&profile.rp_config.profile.capabilities, &default_root_profile.capabilities, sizeof(default_root_profile.capabilities));
profile.rp_config.profile.namespaces = default_root_profile.namespaces;
strcpy(profile.rp_config.profile.selinux_domain, default_root_profile.selinux_domain);
bool ok = ksu_set_app_profile(&profile, false);
if (ok)
pr_info("pending_root: UID=%d granted and persisted\n", uid);
return ok;
}
void ksu_temp_revoke_root_once(uid_t uid)
{
struct app_profile profile = {
.version = KSU_APP_PROFILE_VER,
.allow_su = false,
.current_uid = uid,
};
const char *default_key = "com.temp.once";
struct perm_data *p = NULL;
struct list_head *pos = NULL;
bool found = false;
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
if (p->profile.current_uid == uid) {
strcpy(profile.key, p->profile.key);
found = true;
break;
}
}
if (!found) {
strcpy(profile.key, default_key);
}
profile.nrp_config.profile.umount_modules = default_non_root_profile.umount_modules;
strcpy(profile.rp_config.profile.selinux_domain, KSU_DEFAULT_SELINUX_DOMAIN);
ksu_set_app_profile(&profile, false);
persistent_allow_list();
pr_info("pending_root: UID=%d removed and persist updated\n", uid);
}
#endif

View File

@@ -2,36 +2,22 @@
#define __KSU_H_ALLOWLIST #define __KSU_H_ALLOWLIST
#include <linux/types.h> #include <linux/types.h>
#include <linux/uidgid.h> #include "ksu.h"
#include "app_profile.h"
#define PER_USER_RANGE 100000
#define FIRST_APPLICATION_UID 10000
#define LAST_APPLICATION_UID 19999
#define FIRST_ISOLATED_UID 99000
#define LAST_ISOLATED_UID 99999
void ksu_allowlist_init(void); void ksu_allowlist_init(void);
void ksu_allowlist_exit(void); void ksu_allowlist_exit(void);
void ksu_load_allow_list(void); bool ksu_load_allow_list(void);
void ksu_show_allow_list(void); void ksu_show_allow_list(void);
// Check if the uid is in allow list
bool __ksu_is_allow_uid(uid_t uid); bool __ksu_is_allow_uid(uid_t uid);
#define ksu_is_allow_uid(uid) unlikely(__ksu_is_allow_uid(uid)) #define ksu_is_allow_uid(uid) unlikely(__ksu_is_allow_uid(uid))
// Check if the uid is in allow list, or current is ksu domain root
bool __ksu_is_allow_uid_for_current(uid_t uid);
#define ksu_is_allow_uid_for_current(uid) \
unlikely(__ksu_is_allow_uid_for_current(uid))
bool ksu_get_allow_list(int *array, int *length, bool allow); bool ksu_get_allow_list(int *array, int *length, bool allow);
void ksu_prune_allowlist(bool (*is_uid_exist)(uid_t, char *, void *), void ksu_prune_allowlist(bool (*is_uid_exist)(uid_t, char *, void *), void *data);
void *data);
bool ksu_get_app_profile(struct app_profile *); bool ksu_get_app_profile(struct app_profile *);
bool ksu_set_app_profile(struct app_profile *, bool persist); bool ksu_set_app_profile(struct app_profile *, bool persist);
@@ -39,21 +25,8 @@ bool ksu_set_app_profile(struct app_profile *, bool persist);
bool ksu_uid_should_umount(uid_t uid); bool ksu_uid_should_umount(uid_t uid);
struct root_profile *ksu_get_root_profile(uid_t uid); struct root_profile *ksu_get_root_profile(uid_t uid);
static inline bool is_appuid(uid_t uid) bool ksu_set_scan_all_users(bool enabled);
{ bool ksu_get_scan_all_users(void);
uid_t appid = uid % PER_USER_RANGE;
return appid >= FIRST_APPLICATION_UID && appid <= LAST_APPLICATION_UID;
}
static inline bool is_isolated_process(uid_t uid)
{
uid_t appid = uid % PER_USER_RANGE;
return appid >= FIRST_ISOLATED_UID && appid <= LAST_ISOLATED_UID;
}
#ifdef CONFIG_KSU_MANUAL_SU
bool ksu_temp_grant_root_once(uid_t uid);
void ksu_temp_revoke_root_once(uid_t uid);
#endif
extern bool scan_all_users __read_mostly;
#endif #endif

View File

@@ -15,23 +15,26 @@
#endif #endif
#include "apk_sign.h" #include "apk_sign.h"
#include "dynamic_manager.h"
#include "klog.h" // IWYU pragma: keep #include "klog.h" // IWYU pragma: keep
#include "manager_sign.h"
#include "kernel_compat.h" #include "kernel_compat.h"
#include "manager_sign.h"
#include "dynamic_manager.h"
struct sdesc { struct sdesc {
struct shash_desc shash; struct shash_desc shash;
char ctx[]; char ctx[];
}; };
static apk_sign_key_t apk_sign_keys[] = { static struct apk_sign_key {
unsigned size;
const char *sha256;
} apk_sign_keys[] = {
{EXPECTED_SIZE_SHIRKNEKO, EXPECTED_HASH_SHIRKNEKO}, // SukiSU {EXPECTED_SIZE_SHIRKNEKO, EXPECTED_HASH_SHIRKNEKO}, // SukiSU
#ifdef CONFIG_KSU_MULTI_MANAGER_SUPPORT #ifdef CONFIG_KSU_MULTI_MANAGER_SUPPORT
{EXPECTED_SIZE_WEISHU, EXPECTED_HASH_WEISHU}, // Official {EXPECTED_SIZE_WEISHU, EXPECTED_HASH_WEISHU}, // Official
{EXPECTED_SIZE_5EC1CFF, EXPECTED_HASH_5EC1CFF}, // 5ec1cff/KernelSU {EXPECTED_SIZE_5EC1CFF, EXPECTED_HASH_5EC1CFF}, // 5ec1cff/KernelSU
{EXPECTED_SIZE_RSUNTK, EXPECTED_HASH_RSUNTK}, // rsuntk/KernelSU {EXPECTED_SIZE_RSUNTK, EXPECTED_HASH_RSUNTK}, // rsuntk/KernelSU
{EXPECTED_SIZE_NEKO, EXPECTED_HASH_NEKO}, // Neko/KernelSU {EXPECTED_SIZE_NEKO, EXPECTED_HASH_NEKO}, // Neko/KernelSU
#ifdef EXPECTED_SIZE #ifdef EXPECTED_SIZE
{EXPECTED_SIZE, EXPECTED_HASH}, // Custom {EXPECTED_SIZE, EXPECTED_HASH}, // Custom
#endif #endif
@@ -44,7 +47,7 @@ static struct sdesc *init_sdesc(struct crypto_shash *alg)
int size; int size;
size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
sdesc = kzalloc(size, GFP_KERNEL); sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) if (!sdesc)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
sdesc->shash.tfm = alg; sdesc->shash.tfm = alg;
@@ -52,7 +55,7 @@ static struct sdesc *init_sdesc(struct crypto_shash *alg)
} }
static int calc_hash(struct crypto_shash *alg, const unsigned char *data, static int calc_hash(struct crypto_shash *alg, const unsigned char *data,
unsigned int datalen, unsigned char *digest) unsigned int datalen, unsigned char *digest)
{ {
struct sdesc *sdesc; struct sdesc *sdesc;
int ret; int ret;
@@ -69,7 +72,7 @@ static int calc_hash(struct crypto_shash *alg, const unsigned char *data,
} }
static int ksu_sha256(const unsigned char *data, unsigned int datalen, static int ksu_sha256(const unsigned char *data, unsigned int datalen,
unsigned char *digest) unsigned char *digest)
{ {
struct crypto_shash *alg; struct crypto_shash *alg;
char *hash_alg_name = "sha256"; char *hash_alg_name = "sha256";
@@ -85,19 +88,16 @@ static int ksu_sha256(const unsigned char *data, unsigned int datalen,
return ret; return ret;
} }
static struct dynamic_sign_key dynamic_sign = DYNAMIC_SIGN_DEFAULT_CONFIG;
static bool check_dynamic_sign(struct file *fp, u32 size4, loff_t *pos, int *matched_index) static bool check_dynamic_sign(struct file *fp, u32 size4, loff_t *pos, int *matched_index)
{ {
struct dynamic_sign_key current_dynamic_key = dynamic_sign; unsigned int dynamic_size = 0;
const char *dynamic_hash = NULL;
if (ksu_get_dynamic_manager_config(&current_dynamic_key.size, &current_dynamic_key.hash)) { if (!ksu_get_dynamic_manager_config(&dynamic_size, &dynamic_hash)) {
pr_debug("Using dynamic manager config: size=0x%x, hash=%.16s...\n", return false;
current_dynamic_key.size, current_dynamic_key.hash);
} }
if (size4 != current_dynamic_key.size) { if (size4 != dynamic_size) {
return false; return false;
} }
@@ -120,9 +120,9 @@ static bool check_dynamic_sign(struct file *fp, u32 size4, loff_t *pos, int *mat
hash_str[SHA256_DIGEST_SIZE * 2] = '\0'; hash_str[SHA256_DIGEST_SIZE * 2] = '\0';
bin2hex(hash_str, digest, SHA256_DIGEST_SIZE); bin2hex(hash_str, digest, SHA256_DIGEST_SIZE);
pr_info("sha256: %s, expected: %s, index: dynamic\n", hash_str, current_dynamic_key.hash); pr_info(DM_LOG_PREFIX "dynamic sign verified sha256: %s, expected: %s, (index: %d)\n", hash_str, dynamic_hash, DYNAMIC_SIGN_INDEX);
if (strcmp(current_dynamic_key.hash, hash_str) == 0) { if (strcmp(dynamic_hash, hash_str) == 0) {
if (matched_index) { if (matched_index) {
*matched_index = DYNAMIC_SIGN_INDEX; *matched_index = DYNAMIC_SIGN_INDEX;
} }
@@ -135,7 +135,7 @@ static bool check_dynamic_sign(struct file *fp, u32 size4, loff_t *pos, int *mat
static bool check_block(struct file *fp, u32 *size4, loff_t *pos, u32 *offset, int *matched_index) static bool check_block(struct file *fp, u32 *size4, loff_t *pos, u32 *offset, int *matched_index)
{ {
int i; int i;
apk_sign_key_t sign_key; struct apk_sign_key sign_key;
bool signature_valid = false; bool signature_valid = false;
ksu_kernel_read_compat(fp, size4, 0x4, pos); // signer-sequence length ksu_kernel_read_compat(fp, size4, 0x4, pos); // signer-sequence length
@@ -222,8 +222,8 @@ static bool has_v1_signature_file(struct file *fp)
loff_t pos = 0; loff_t pos = 0;
while (ksu_kernel_read_compat(fp, &header, while (ksu_kernel_read_compat(fp, &header,
sizeof(struct zip_entry_header), &pos) == sizeof(struct zip_entry_header), &pos) ==
sizeof(struct zip_entry_header)) { sizeof(struct zip_entry_header)) {
if (header.signature != 0x04034b50) { if (header.signature != 0x04034b50) {
// ZIP magic: 'PK' // ZIP magic: 'PK'
return false; return false;
@@ -232,12 +232,12 @@ static bool has_v1_signature_file(struct file *fp)
if (header.file_name_length == sizeof(MANIFEST) - 1) { if (header.file_name_length == sizeof(MANIFEST) - 1) {
char fileName[sizeof(MANIFEST)]; char fileName[sizeof(MANIFEST)];
ksu_kernel_read_compat(fp, fileName, ksu_kernel_read_compat(fp, fileName,
header.file_name_length, &pos); header.file_name_length, &pos);
fileName[header.file_name_length] = '\0'; fileName[header.file_name_length] = '\0';
// Check if the entry matches META-INF/MANIFEST.MF // Check if the entry matches META-INF/MANIFEST.MF
if (strncmp(MANIFEST, fileName, sizeof(MANIFEST) - 1) == if (strncmp(MANIFEST, fileName, sizeof(MANIFEST) - 1) ==
0) { 0) {
return true; return true;
} }
} else { } else {
@@ -252,6 +252,39 @@ static bool has_v1_signature_file(struct file *fp)
return false; return false;
} }
/*
* small helper to check if lock is held
* false - file is stable
* true - file is being deleted/renamed
* possibly optional
*
*/
bool is_lock_held(const char *path)
{
struct path kpath;
// kern_path returns 0 on success
if (kern_path(path, 0, &kpath))
return true;
// just being defensive
if (!kpath.dentry) {
path_put(&kpath);
return true;
}
if (!spin_trylock(&kpath.dentry->d_lock)) {
pr_info("%s: lock held, bail out!\n", __func__);
path_put(&kpath);
return true;
}
// we hold it ourselves here!
spin_unlock(&kpath.dentry->d_lock);
path_put(&kpath);
return false;
}
static __always_inline bool check_v2_signature(char *path, bool check_multi_manager, int *signature_index) static __always_inline bool check_v2_signature(char *path, bool check_multi_manager, int *signature_index)
{ {
unsigned char buffer[0x11] = { 0 }; unsigned char buffer[0x11] = { 0 };
@@ -266,6 +299,23 @@ static __always_inline bool check_v2_signature(char *path, bool check_multi_mana
bool v3_1_signing_exist = false; bool v3_1_signing_exist = false;
int matched_index = -1; int matched_index = -1;
int i; int i;
struct path kpath;
if (kern_path(path, 0, &kpath))
return false;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
if (inode_is_locked(kpath.dentry->d_inode))
#else
if (mutex_is_locked(&kpath.dentry->d_inode->i_mutex))
#endif
{
pr_info("%s: inode is locked for %s\n", __func__, path);
path_put(&kpath);
return false;
}
path_put(&kpath);
struct file *fp = ksu_filp_open_compat(path, O_RDONLY, 0); struct file *fp = ksu_filp_open_compat(path, O_RDONLY, 0);
if (IS_ERR(fp)) { if (IS_ERR(fp)) {
pr_err("open %s error.\n", path); pr_err("open %s error.\n", path);
@@ -321,7 +371,7 @@ static __always_inline bool check_v2_signature(char *path, bool check_multi_mana
uint32_t id; uint32_t id;
uint32_t offset; uint32_t offset;
ksu_kernel_read_compat(fp, &size8, 0x8, ksu_kernel_read_compat(fp, &size8, 0x8,
&pos); // sequence length &pos); // sequence length
if (size8 == size_of_block) { if (size8 == size_of_block) {
break; break;
} }
@@ -350,7 +400,7 @@ static __always_inline bool check_v2_signature(char *path, bool check_multi_mana
if (v2_signing_blocks != 1) { if (v2_signing_blocks != 1) {
#ifdef CONFIG_KSU_DEBUG #ifdef CONFIG_KSU_DEBUG
pr_err("Unexpected v2 signature count: %d\n", pr_err("Unexpected v2 signature count: %d\n",
v2_signing_blocks); v2_signing_blocks);
#endif #endif
v2_signing_valid = false; v2_signing_valid = false;
} }
@@ -381,7 +431,7 @@ clean:
if (check_multi_manager) { if (check_multi_manager) {
// 0: ShirkNeko/SukiSU, DYNAMIC_SIGN_INDEX : Dynamic Sign // 0: ShirkNeko/SukiSU, DYNAMIC_SIGN_INDEX : Dynamic Sign
if (matched_index == 0 || matched_index == DYNAMIC_SIGN_INDEX) { if (matched_index == 0 || matched_index == DYNAMIC_SIGN_INDEX) {
pr_info("Multi-manager APK detected (dynamic_manager enabled): signature_index=%d\n", matched_index); pr_info("[ApkSign] multi-manager APK detected (signature_index=%d)\n", matched_index);
return true; return true;
} }
return false; return false;
@@ -419,10 +469,42 @@ module_param_cb(ksu_debug_manager_uid, &expected_size_ops,
bool is_manager_apk(char *path) bool is_manager_apk(char *path)
{ {
return check_v2_signature(path, false, NULL); int tries = 0;
while (tries++ < 10) {
if (!is_lock_held(path))
break;
pr_info("%s: waiting for %s\n", __func__, path);
msleep(100);
}
// let it go, if retry fails, check_v2_signature will fail to open it anyway
if (tries == 10) {
pr_info("%s: timeout for %s\n", __func__, path);
return false;
}
return check_v2_signature(path, false, NULL);
} }
bool is_dynamic_manager_apk(char *path, int *signature_index) bool is_dynamic_manager_apk(char *path, int *signature_index)
{ {
return check_v2_signature(path, true, signature_index); int tries = 0;
while (tries++ < 10) {
if (!is_lock_held(path))
break;
pr_info("%s: waiting for %s\n", __func__, path);
msleep(100);
}
// let it go, if retry fails, check_v2_signature will fail to open it anyway
if (tries == 10) {
pr_info("%s: timeout for %s\n", __func__, path);
return false;
}
return check_v2_signature(path, true, signature_index);
} }

View File

@@ -1,325 +0,0 @@
#include <linux/version.h>
#include <linux/capability.h>
#include <linux/cred.h>
#include <linux/err.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/proc_ns.h>
#include <linux/pid.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
#include <linux/sched/signal.h> // signal_struct
#include <linux/sched/task.h>
#endif
#include <linux/sched.h>
#include <linux/seccomp.h>
#include <linux/thread_info.h>
#include <linux/uidgid.h>
#include <linux/syscalls.h>
#include "objsec.h"
#include <linux/spinlock.h>
#include <linux/tty.h>
#include <linux/security.h>
#include "allowlist.h"
#include "app_profile.h"
#include "arch.h"
#include "kernel_compat.h"
#include "klog.h" // IWYU pragma: keep
#include "selinux/selinux.h"
#ifndef CONFIG_KSU_SUSFS
#include "syscall_hook_manager.h"
#endif
#include "sulog.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION (6, 7, 0)
static struct group_info root_groups = { .usage = REFCOUNT_INIT(2), };
#else
static struct group_info root_groups = { .usage = ATOMIC_INIT(2) };
#endif
static void setup_groups(struct root_profile *profile, struct cred *cred)
{
if (profile->groups_count > KSU_MAX_GROUPS) {
pr_warn("Failed to setgroups, too large group: %d!\n",
profile->uid);
return;
}
if (profile->groups_count == 1 && profile->groups[0] == 0) {
// setgroup to root and return early.
if (cred->group_info)
put_group_info(cred->group_info);
cred->group_info = get_group_info(&root_groups);
return;
}
u32 ngroups = profile->groups_count;
struct group_info *group_info = groups_alloc(ngroups);
if (!group_info) {
pr_warn("Failed to setgroups, ENOMEM for: %d\n", profile->uid);
return;
}
int i;
for (i = 0; i < ngroups; i++) {
gid_t gid = profile->groups[i];
kgid_t kgid = make_kgid(current_user_ns(), gid);
if (!gid_valid(kgid)) {
pr_warn("Failed to setgroups, invalid gid: %d\n", gid);
put_group_info(group_info);
return;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
group_info->gid[i] = kgid;
#else
GROUP_AT(group_info, i) = kgid;
#endif
}
groups_sort(group_info);
set_groups(cred, group_info);
put_group_info(group_info);
}
void disable_seccomp(struct task_struct *tsk)
{
if (unlikely(!tsk))
return;
assert_spin_locked(&tsk->sighand->siglock);
// disable seccomp
#if defined(CONFIG_GENERIC_ENTRY) && \
LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
clear_syscall_work(SECCOMP);
#else
clear_thread_flag(TIF_SECCOMP);
#endif
#ifdef CONFIG_SECCOMP
tsk->seccomp.mode = 0;
if (tsk->seccomp.filter) {
// 5.9+ have filter_count, but optional.
#ifdef KSU_OPTIONAL_SECCOMP_FILTER_CNT
atomic_set(&tsk->seccomp.filter_count, 0);
#endif
// some old kernel backport seccomp_filter_release..
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) && \
defined(KSU_OPTIONAL_SECCOMP_FILTER_RELEASE)
seccomp_filter_release(tsk);
#else
// never, ever call seccomp_filter_release on 6.10+ (no effect)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) && \
LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0))
seccomp_filter_release(tsk);
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
put_seccomp_filter(tsk);
#endif
tsk->seccomp.filter = NULL;
#endif
#endif
}
#endif
}
void escape_with_root_profile(void)
{
struct cred *cred;
// a bit useless, but we just want less ifdefs
struct task_struct *p = current;
if (current_euid().val == 0) {
pr_warn("Already root, don't escape!\n");
return;
}
cred = prepare_creds();
if (!cred) {
pr_warn("prepare_creds failed!\n");
return;
}
struct root_profile *profile = ksu_get_root_profile(cred->uid.val);
cred->uid.val = profile->uid;
cred->suid.val = profile->uid;
cred->euid.val = profile->uid;
cred->fsuid.val = profile->uid;
cred->gid.val = profile->gid;
cred->fsgid.val = profile->gid;
cred->sgid.val = profile->gid;
cred->egid.val = profile->gid;
cred->securebits = 0;
BUILD_BUG_ON(sizeof(profile->capabilities.effective) !=
sizeof(kernel_cap_t));
// setup capabilities
// we need CAP_DAC_READ_SEARCH becuase `/data/adb/ksud` is not accessible for non root process
// we add it here but don't add it to cap_inhertiable, it would be dropped automaticly after exec!
u64 cap_for_ksud =
profile->capabilities.effective | CAP_DAC_READ_SEARCH;
memcpy(&cred->cap_effective, &cap_for_ksud,
sizeof(cred->cap_effective));
memcpy(&cred->cap_permitted, &profile->capabilities.effective,
sizeof(cred->cap_permitted));
memcpy(&cred->cap_bset, &profile->capabilities.effective,
sizeof(cred->cap_bset));
setup_groups(profile, cred);
commit_creds(cred);
// Refer to kernel/seccomp.c: seccomp_set_mode_strict
// When disabling Seccomp, ensure that current->sighand->siglock is held during the operation.
spin_lock_irq(&p->sighand->siglock);
disable_seccomp(p);
spin_unlock_irq(&p->sighand->siglock);
setup_selinux(profile->selinux_domain);
#if __SULOG_GATE
ksu_sulog_report_su_grant(current_euid().val, NULL, "escape_to_root");
#endif
#ifndef CONFIG_KSU_SUSFS
struct task_struct *t;
for_each_thread (p, t) {
ksu_set_task_tracepoint_flag(t);
}
#endif
}
#ifdef CONFIG_KSU_MANUAL_SU
#include "ksud.h"
#ifndef DEVPTS_SUPER_MAGIC
#define DEVPTS_SUPER_MAGIC 0x1cd1
#endif
static int __manual_su_handle_devpts(struct inode *inode)
{
if (!current->mm) {
return 0;
}
uid_t uid = current_uid().val;
if (uid % 100000 < 10000) {
// not untrusted_app, ignore it
return 0;
}
if (likely(!ksu_is_allow_uid_for_current(uid)))
return 0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) || defined(KSU_OPTIONAL_SELINUX_INODE)
struct inode_security_struct *sec = selinux_inode(inode);
#else
struct inode_security_struct *sec =
(struct inode_security_struct *)inode->i_security;
#endif
if (ksu_file_sid && sec)
sec->sid = ksu_file_sid;
return 0;
}
void escape_to_root_for_cmd_su(uid_t target_uid, pid_t target_pid)
{
struct cred *newcreds;
struct task_struct *target_task;
unsigned long flags;
pr_info("cmd_su: escape_to_root_for_cmd_su called for UID: %d, PID: %d\n", target_uid, target_pid);
// Find target task by PID
rcu_read_lock();
target_task = pid_task(find_vpid(target_pid), PIDTYPE_PID);
if (!target_task) {
rcu_read_unlock();
pr_err("cmd_su: target task not found for PID: %d\n", target_pid);
#if __SULOG_GATE
ksu_sulog_report_su_grant(target_uid, "cmd_su", "target_not_found");
#endif
return;
}
get_task_struct(target_task);
rcu_read_unlock();
if (task_uid(target_task).val == 0) {
pr_warn("cmd_su: target task is already root, PID: %d\n", target_pid);
put_task_struct(target_task);
return;
}
newcreds = prepare_kernel_cred(target_task);
if (newcreds == NULL) {
pr_err("cmd_su: failed to allocate new cred for PID: %d\n", target_pid);
#if __SULOG_GATE
ksu_sulog_report_su_grant(target_uid, "cmd_su", "cred_alloc_failed");
#endif
put_task_struct(target_task);
return;
}
struct root_profile *profile = ksu_get_root_profile(target_uid);
newcreds->uid.val = profile->uid;
newcreds->suid.val = profile->uid;
newcreds->euid.val = profile->uid;
newcreds->fsuid.val = profile->uid;
newcreds->gid.val = profile->gid;
newcreds->fsgid.val = profile->gid;
newcreds->sgid.val = profile->gid;
newcreds->egid.val = profile->gid;
newcreds->securebits = 0;
u64 cap_for_cmd_su = profile->capabilities.effective | CAP_DAC_READ_SEARCH | CAP_SETUID | CAP_SETGID;
memcpy(&newcreds->cap_effective, &cap_for_cmd_su, sizeof(newcreds->cap_effective));
memcpy(&newcreds->cap_permitted, &profile->capabilities.effective, sizeof(newcreds->cap_permitted));
memcpy(&newcreds->cap_bset, &profile->capabilities.effective, sizeof(newcreds->cap_bset));
setup_groups(profile, newcreds);
task_lock(target_task);
const struct cred *old_creds = get_task_cred(target_task);
rcu_assign_pointer(target_task->real_cred, newcreds);
rcu_assign_pointer(target_task->cred, get_cred(newcreds));
task_unlock(target_task);
if (target_task->sighand) {
spin_lock_irqsave(&target_task->sighand->siglock, flags);
disable_seccomp(target_task);
spin_unlock_irqrestore(&target_task->sighand->siglock, flags);
}
setup_selinux(profile->selinux_domain);
put_cred(old_creds);
wake_up_process(target_task);
if (target_task->signal->tty) {
struct inode *inode = target_task->signal->tty->driver_data;
if (inode && inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) {
__manual_su_handle_devpts(inode);
}
}
put_task_struct(target_task);
#if __SULOG_GATE
ksu_sulog_report_su_grant(target_uid, "cmd_su", "manual_escalation");
#endif
#ifndef CONFIG_KSU_SUSFS
struct task_struct *p = current;
struct task_struct *t;
for_each_thread (p, t) {
ksu_set_task_tracepoint_flag(t);
}
#endif
pr_info("cmd_su: privilege escalation completed for UID: %d, PID: %d\n", target_uid, target_pid);
}
#endif

View File

@@ -1,67 +0,0 @@
#ifndef __KSU_H_APP_PROFILE
#define __KSU_H_APP_PROFILE
#include <linux/types.h>
// Forward declarations
struct cred;
#define KSU_APP_PROFILE_VER 2
#define KSU_MAX_PACKAGE_NAME 256
// NGROUPS_MAX for Linux is 65535 generally, but we only supports 32 groups.
#define KSU_MAX_GROUPS 32
#define KSU_SELINUX_DOMAIN 64
struct root_profile {
int32_t uid;
int32_t gid;
int32_t groups_count;
int32_t groups[KSU_MAX_GROUPS];
// kernel_cap_t is u32[2] for capabilities v3
struct {
u64 effective;
u64 permitted;
u64 inheritable;
} capabilities;
char selinux_domain[KSU_SELINUX_DOMAIN];
int32_t namespaces;
};
struct non_root_profile {
bool umount_modules;
};
struct app_profile {
// It may be utilized for backward compatibility, although we have never explicitly made any promises regarding this.
u32 version;
// this is usually the package of the app, but can be other value for special apps
char key[KSU_MAX_PACKAGE_NAME];
int32_t current_uid;
bool allow_su;
union {
struct {
bool use_default;
char template_name[KSU_MAX_PACKAGE_NAME];
struct root_profile profile;
} rp_config;
struct {
bool use_default;
struct non_root_profile profile;
} nrp_config;
};
};
// Escalate current process to root with the appropriate profile
void escape_with_root_profile(void);
void escape_to_root_for_cmd_su(uid_t target_uid, pid_t target_pid);
#endif

View File

@@ -19,15 +19,21 @@
#define __PT_IP_REG pc #define __PT_IP_REG pc
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
#define REBOOT_SYMBOL "__arm64_sys_reboot" #define PRCTL_SYMBOL "__arm64_sys_prctl"
#define SYS_READ_SYMBOL "__arm64_sys_read" #define SYS_READ_SYMBOL "__arm64_sys_read"
#define SYS_NEWFSTATAT_SYMBOL "__arm64_sys_newfstatat"
#define SYS_FSTATAT64_SYMBOL "__arm64_sys_fstatat64"
#define SYS_FACCESSAT_SYMBOL "__arm64_sys_faccessat"
#define SYS_EXECVE_SYMBOL "__arm64_sys_execve" #define SYS_EXECVE_SYMBOL "__arm64_sys_execve"
#define SYS_SETNS_SYMBOL __arm64_sys_setns #define SYS_EXECVE_COMPAT_SYMBOL "__arm64_compat_sys_execve"
#else #else
#define REBOOT_SYMBOL "sys_reboot" #define PRCTL_SYMBOL "sys_prctl"
#define SYS_READ_SYMBOL "sys_read" #define SYS_READ_SYMBOL "sys_read"
#define SYS_NEWFSTATAT_SYMBOL "sys_newfstatat"
#define SYS_FSTATAT64_SYMBOL "sys_fstatat64"
#define SYS_FACCESSAT_SYMBOL "sys_faccessat"
#define SYS_EXECVE_SYMBOL "sys_execve" #define SYS_EXECVE_SYMBOL "sys_execve"
#define SYS_SETNS_SYMBOL sys_setns #define SYS_EXECVE_COMPAT_SYMBOL "compat_sys_execve"
#endif #endif
#elif defined(__x86_64__) #elif defined(__x86_64__)
@@ -45,21 +51,26 @@
#define __PT_RC_REG ax #define __PT_RC_REG ax
#define __PT_SP_REG sp #define __PT_SP_REG sp
#define __PT_IP_REG ip #define __PT_IP_REG ip
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
#define REBOOT_SYMBOL "__x64_sys_reboot" #define PRCTL_SYMBOL "__x64_sys_prctl"
#define SYS_READ_SYMBOL "__x64_sys_read" #define SYS_READ_SYMBOL "__x64_sys_read"
#define SYS_NEWFSTATAT_SYMBOL "__x64_sys_newfstatat"
#define SYS_FSTATAT64_SYMBOL "__x64_sys_fstatat64"
#define SYS_FACCESSAT_SYMBOL "__x64_sys_faccessat"
#define SYS_EXECVE_SYMBOL "__x64_sys_execve" #define SYS_EXECVE_SYMBOL "__x64_sys_execve"
#define SYS_SETNS_SYMBOL __x64_sys_setns #define SYS_EXECVE_COMPAT_SYMBOL "__x64_compat_sys_execve"
#else #else
#define REBOOT_SYMBOL "sys_reboot" #define PRCTL_SYMBOL "sys_prctl"
#define SYS_READ_SYMBOL "sys_read" #define SYS_READ_SYMBOL "sys_read"
#define SYS_NEWFSTATAT_SYMBOL "sys_newfstatat"
#define SYS_FSTATAT64_SYMBOL "sys_fstatat64"
#define SYS_FACCESSAT_SYMBOL "sys_faccessat"
#define SYS_EXECVE_SYMBOL "sys_execve" #define SYS_EXECVE_SYMBOL "sys_execve"
#define SYS_SETNS_SYMBOL sys_setns #define SYS_EXECVE_COMPAT_SYMBOL "compat_sys_execve"
#endif #endif
#else #else
#ifdef KSU_KPROBES_HOOK #ifdef CONFIG_KSU_KPROBES_HOOK
#error "Unsupported arch" #error "Unsupported arch"
#endif #endif
#endif #endif

1684
kernel/core_hook.c Normal file

File diff suppressed because it is too large Load Diff

10
kernel/core_hook.h Normal file
View File

@@ -0,0 +1,10 @@
#ifndef __KSU_H_KSU_CORE
#define __KSU_H_KSU_CORE
#include <linux/init.h>
#include "apk_sign.h"
void __init ksu_core_init(void);
void ksu_core_exit(void);
#endif

View File

@@ -17,16 +17,16 @@
#include "dynamic_manager.h" #include "dynamic_manager.h"
#include "klog.h" // IWYU pragma: keep #include "klog.h" // IWYU pragma: keep
#include "manager.h"
#include "kernel_compat.h" #include "kernel_compat.h"
#include "manager.h"
#define MAX_MANAGERS 2 #define MAX_MANAGERS 2
// Dynamic sign configuration // Dynamic sign configuration
static struct dynamic_manager_config dynamic_manager = { static struct dynamic_manager_config dynamic_manager = {
.size = 0x300, .size = 0,
.hash = "0000000000000000000000000000000000000000000000000000000000000000", .hash = "",
.is_set = 0 .is_set = 0
}; };
// Multi-manager state // Multi-manager state
@@ -41,465 +41,484 @@ static struct work_struct clear_dynamic_manager_work;
bool ksu_is_dynamic_manager_enabled(void) bool ksu_is_dynamic_manager_enabled(void)
{ {
unsigned long flags; unsigned long flags;
bool enabled; bool enabled;
spin_lock_irqsave(&dynamic_manager_lock, flags); spin_lock_irqsave(&dynamic_manager_lock, flags);
enabled = dynamic_manager.is_set; enabled = dynamic_manager.is_set && dynamic_manager.size > 0 && strlen(dynamic_manager.hash) == 64;
spin_unlock_irqrestore(&dynamic_manager_lock, flags); spin_unlock_irqrestore(&dynamic_manager_lock, flags);
return enabled; return enabled;
} }
void ksu_add_manager(uid_t uid, int signature_index) void ksu_add_manager(uid_t uid, int signature_index)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
if (!ksu_is_dynamic_manager_enabled()) { if (!ksu_is_dynamic_manager_enabled()) {
pr_info("Dynamic sign not enabled, skipping multi-manager add\n"); return;
return; }
}
spin_lock_irqsave(&managers_lock, flags); spin_lock_irqsave(&managers_lock, flags);
// Check if manager already exists and update // Check if manager already exists and update
for (i = 0; i < MAX_MANAGERS; i++) { for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active && active_managers[i].uid == uid) { if (active_managers[i].is_active && active_managers[i].uid == uid) {
active_managers[i].signature_index = signature_index; active_managers[i].signature_index = signature_index;
spin_unlock_irqrestore(&managers_lock, flags); spin_unlock_irqrestore(&managers_lock, flags);
pr_info("Updated manager uid=%d, signature_index=%d\n", uid, signature_index); pr_info(DM_LOG_PREFIX "updated manager uid=%d, signature_index=%d\n", uid, signature_index);
return; return;
} }
} }
// Find free slot for new manager // Find free slot for new manager
for (i = 0; i < MAX_MANAGERS; i++) { for (i = 0; i < MAX_MANAGERS; i++) {
if (!active_managers[i].is_active) { if (!active_managers[i].is_active) {
active_managers[i].uid = uid; active_managers[i].uid = uid;
active_managers[i].signature_index = signature_index; active_managers[i].signature_index = signature_index;
active_managers[i].is_active = true; active_managers[i].is_active = true;
spin_unlock_irqrestore(&managers_lock, flags); spin_unlock_irqrestore(&managers_lock, flags);
pr_info("Added manager uid=%d, signature_index=%d\n", uid, signature_index); pr_info(DM_LOG_PREFIX "added manager uid=%d, signature_index=%d\n", uid, signature_index);
return; return;
} }
} }
spin_unlock_irqrestore(&managers_lock, flags); spin_unlock_irqrestore(&managers_lock, flags);
pr_warn("Failed to add manager, no free slots\n"); pr_warn(DM_LOG_PREFIX "failed to add manager, no free slots\n");
} }
void ksu_remove_manager(uid_t uid) void ksu_remove_manager(uid_t uid)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
if (!ksu_is_dynamic_manager_enabled()) { if (!ksu_is_dynamic_manager_enabled()) {
return; return;
} }
spin_lock_irqsave(&managers_lock, flags); spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) { for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active && active_managers[i].uid == uid) { if (active_managers[i].is_active && active_managers[i].uid == uid) {
active_managers[i].is_active = false; active_managers[i].is_active = false;
pr_info("Removed manager uid=%d\n", uid); pr_info(DM_LOG_PREFIX "removed manager uid=%d\n", uid);
break; break;
} }
} }
spin_unlock_irqrestore(&managers_lock, flags); spin_unlock_irqrestore(&managers_lock, flags);
} }
bool ksu_is_any_manager(uid_t uid) bool ksu_is_any_manager(uid_t uid)
{ {
unsigned long flags; unsigned long flags;
bool is_manager = false; bool is_manager = false;
int i; int i;
if (!ksu_is_dynamic_manager_enabled()) { if (!ksu_is_dynamic_manager_enabled()) {
return false; return false;
} }
spin_lock_irqsave(&managers_lock, flags); spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) { for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active && active_managers[i].uid == uid) { if (active_managers[i].is_active && active_managers[i].uid == uid) {
is_manager = true; is_manager = true;
break; break;
} }
} }
spin_unlock_irqrestore(&managers_lock, flags); spin_unlock_irqrestore(&managers_lock, flags);
return is_manager; return is_manager;
} }
int ksu_get_manager_signature_index(uid_t uid) int ksu_get_manager_signature_index(uid_t uid)
{ {
unsigned long flags; unsigned long flags;
int signature_index = -1; int signature_index = -1;
int i; int i;
// Check traditional manager first // Check traditional manager first
if (ksu_manager_uid != KSU_INVALID_UID && uid == ksu_manager_uid) { if (ksu_manager_uid != KSU_INVALID_UID && uid == ksu_manager_uid) {
return DYNAMIC_SIGN_INDEX; return 0;
} }
if (!ksu_is_dynamic_manager_enabled()) { if (!ksu_is_dynamic_manager_enabled()) {
return -1; return -1;
} }
spin_lock_irqsave(&managers_lock, flags); spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) { for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active && active_managers[i].uid == uid) { if (active_managers[i].is_active && active_managers[i].uid == uid) {
signature_index = active_managers[i].signature_index; signature_index = active_managers[i].signature_index;
break; break;
} }
} }
spin_unlock_irqrestore(&managers_lock, flags); spin_unlock_irqrestore(&managers_lock, flags);
return signature_index; return signature_index;
}
static void ksu_invalidate_dynamic_managers(void)
{
unsigned long flags;
int i;
bool had_active = false;
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active) {
pr_info(DM_LOG_PREFIX "invalidating dynamic manager uid=%d\n", active_managers[i].uid);
active_managers[i].is_active = false;
had_active = true;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
if (had_active) {
pr_info(DM_LOG_PREFIX "all dynamic managers invalidated\n");
}
} }
static void clear_dynamic_manager(void) static void clear_dynamic_manager(void)
{ {
unsigned long flags; ksu_invalidate_dynamic_managers();
int i;
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active) {
pr_info("Clearing dynamic manager uid=%d (signature_index=%d) for rescan\n",
active_managers[i].uid, active_managers[i].signature_index);
active_managers[i].is_active = false;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
}
int ksu_get_active_managers(struct manager_list_info *info)
{
unsigned long flags;
int i, count = 0;
if (!info) {
return -EINVAL;
}
// Add traditional manager first
if (ksu_manager_uid != KSU_INVALID_UID && count < 2) {
info->managers[count].uid = ksu_manager_uid;
info->managers[count].signature_index = 0;
count++;
}
// Add dynamic managers
if (ksu_is_dynamic_manager_enabled()) {
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS && count < 2; i++) {
if (active_managers[i].is_active) {
info->managers[count].uid = active_managers[i].uid;
info->managers[count].signature_index = active_managers[i].signature_index;
count++;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
}
info->count = count;
return 0;
} }
static void do_save_dynamic_manager(struct work_struct *work) static void do_save_dynamic_manager(struct work_struct *work)
{ {
u32 magic = DYNAMIC_MANAGER_FILE_MAGIC; u32 magic = DYNAMIC_MANAGER_FILE_MAGIC;
u32 version = DYNAMIC_MANAGER_FILE_VERSION; u32 version = DYNAMIC_MANAGER_FILE_VERSION;
struct dynamic_manager_config config_to_save; struct dynamic_manager_config config_to_save;
loff_t off = 0; loff_t off = 0;
unsigned long flags; unsigned long flags;
struct file *fp; struct file *fp;
spin_lock_irqsave(&dynamic_manager_lock, flags); spin_lock_irqsave(&dynamic_manager_lock, flags);
config_to_save = dynamic_manager; config_to_save = dynamic_manager;
spin_unlock_irqrestore(&dynamic_manager_lock, flags); spin_unlock_irqrestore(&dynamic_manager_lock, flags);
if (!config_to_save.is_set) { if (!config_to_save.is_set) {
pr_info("Dynamic sign config not set, skipping save\n"); return;
return; }
}
fp = ksu_filp_open_compat(KERNEL_SU_DYNAMIC_MANAGER, O_WRONLY | O_CREAT | O_TRUNC, 0644); fp = ksu_filp_open_compat(KERNEL_SU_DYNAMIC_MANAGER, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (IS_ERR(fp)) { if (IS_ERR(fp)) {
pr_err("save_dynamic_manager create file failed: %ld\n", PTR_ERR(fp)); pr_err(DM_LOG_PREFIX "save config failed: %ld\n", PTR_ERR(fp));
return; return;
} }
if (ksu_kernel_write_compat(fp, &magic, sizeof(magic), &off) != sizeof(magic)) { if (ksu_kernel_write_compat(fp, &magic, sizeof(magic), &off) != sizeof(magic) ||
pr_err("save_dynamic_manager write magic failed.\n"); ksu_kernel_write_compat(fp, &version, sizeof(version), &off) != sizeof(version) ||
goto exit; ksu_kernel_write_compat(fp, &config_to_save, sizeof(config_to_save), &off) != sizeof(config_to_save)) {
} pr_err(DM_LOG_PREFIX "save config write failed\n");
} else {
pr_info(DM_LOG_PREFIX "config saved successfully\n");
}
if (ksu_kernel_write_compat(fp, &version, sizeof(version), &off) != sizeof(version)) { filp_close(fp, 0);
pr_err("save_dynamic_manager write version failed.\n");
goto exit;
}
if (ksu_kernel_write_compat(fp, &config_to_save, sizeof(config_to_save), &off) != sizeof(config_to_save)) {
pr_err("save_dynamic_manager write config failed.\n");
goto exit;
}
pr_info("Dynamic sign config saved successfully\n");
exit:
filp_close(fp, 0);
} }
static void do_load_dynamic_manager(struct work_struct *work) static void do_load_dynamic_manager(struct work_struct *work)
{ {
loff_t off = 0; loff_t off = 0;
ssize_t ret = 0; ssize_t ret = 0;
struct file *fp = NULL; struct file *fp = NULL;
u32 magic; u32 magic;
u32 version; u32 version;
struct dynamic_manager_config loaded_config; struct dynamic_manager_config loaded_config;
unsigned long flags; unsigned long flags;
int i; int i;
fp = ksu_filp_open_compat(KERNEL_SU_DYNAMIC_MANAGER, O_RDONLY, 0); fp = ksu_filp_open_compat(KERNEL_SU_DYNAMIC_MANAGER, O_RDONLY, 0);
if (IS_ERR(fp)) { if (IS_ERR(fp)) {
if (PTR_ERR(fp) == -ENOENT) { if (PTR_ERR(fp) != -ENOENT) {
pr_info("No saved dynamic manager config found\n"); pr_err(DM_LOG_PREFIX "load config failed: %ld\n", PTR_ERR(fp));
} else { }
pr_err("load_dynamic_manager open file failed: %ld\n", PTR_ERR(fp)); return;
} }
return;
}
if (ksu_kernel_read_compat(fp, &magic, sizeof(magic), &off) != sizeof(magic) || if (ksu_kernel_read_compat(fp, &magic, sizeof(magic), &off) != sizeof(magic) ||
magic != DYNAMIC_MANAGER_FILE_MAGIC) { magic != DYNAMIC_MANAGER_FILE_MAGIC) {
pr_err("dynamic manager file invalid magic: %x!\n", magic); pr_err(DM_LOG_PREFIX "invalid magic: %x\n", magic);
goto exit; goto exit;
} }
if (ksu_kernel_read_compat(fp, &version, sizeof(version), &off) != sizeof(version)) { if (ksu_kernel_read_compat(fp, &version, sizeof(version), &off) != sizeof(version)) {
pr_err("dynamic manager read version failed\n"); pr_err(DM_LOG_PREFIX "read version failed\n");
goto exit; goto exit;
} }
pr_info("dynamic manager file version: %d\n", version); ret = ksu_kernel_read_compat(fp, &loaded_config, sizeof(loaded_config), &off);
if (ret != sizeof(loaded_config)) {
pr_err(DM_LOG_PREFIX "read config failed: %zd\n", ret);
goto exit;
}
ret = ksu_kernel_read_compat(fp, &loaded_config, sizeof(loaded_config), &off); if (loaded_config.size < 0x100 || loaded_config.size > 0x1000) {
if (ret <= 0) { pr_err(DM_LOG_PREFIX "invalid size: 0x%x\n", loaded_config.size);
pr_info("load_dynamic_manager read err: %zd\n", ret); goto exit;
goto exit; }
}
if (ret != sizeof(loaded_config)) { if (strlen(loaded_config.hash) != 64) {
pr_err("load_dynamic_manager read incomplete config: %zd/%zu\n", ret, sizeof(loaded_config)); pr_err(DM_LOG_PREFIX "invalid hash length: %zu\n", strlen(loaded_config.hash));
goto exit; goto exit;
} }
if (loaded_config.size < 0x100 || loaded_config.size > 0x1000) { // Validate hash format
pr_err("Invalid saved config size: 0x%x\n", loaded_config.size); for (i = 0; i < 64; i++) {
goto exit; char c = loaded_config.hash[i];
} if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f'))) {
pr_err(DM_LOG_PREFIX "invalid hash character at position %d: %c\n", i, c);
goto exit;
}
}
if (strlen(loaded_config.hash) != 64) { spin_lock_irqsave(&dynamic_manager_lock, flags);
pr_err("Invalid saved config hash length: %zu\n", strlen(loaded_config.hash)); dynamic_manager = loaded_config;
goto exit; spin_unlock_irqrestore(&dynamic_manager_lock, flags);
}
// Validate hash format pr_info(DM_LOG_PREFIX "config loaded: size=0x%x, hash=%.16s...\n",
for (i = 0; i < 64; i++) { loaded_config.size, loaded_config.hash);
char c = loaded_config.hash[i];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f'))) {
pr_err("Invalid saved config hash character at position %d: %c\n", i, c);
goto exit;
}
}
spin_lock_irqsave(&dynamic_manager_lock, flags);
dynamic_manager = loaded_config;
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
pr_info("Dynamic sign config loaded: size=0x%x, hash=%.16s...\n",
loaded_config.size, loaded_config.hash);
exit: exit:
filp_close(fp, 0); filp_close(fp, 0);
} }
static bool persistent_dynamic_manager(void) static bool persistent_dynamic_manager(void)
{ {
return ksu_queue_work(&save_dynamic_manager_work); return ksu_queue_work(&save_dynamic_manager_work);
} }
static void do_clear_dynamic_manager(struct work_struct *work) static void do_clear_dynamic_manager(struct work_struct *work)
{ {
loff_t off = 0; loff_t off = 0;
struct file *fp; struct file *fp;
char zero_buffer[512]; char zero_buffer[512];
memset(zero_buffer, 0, sizeof(zero_buffer)); memset(zero_buffer, 0, sizeof(zero_buffer));
fp = ksu_filp_open_compat(KERNEL_SU_DYNAMIC_MANAGER, O_WRONLY | O_CREAT | O_TRUNC, 0644); fp = ksu_filp_open_compat(KERNEL_SU_DYNAMIC_MANAGER, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (IS_ERR(fp)) { if (IS_ERR(fp)) {
pr_err("clear_dynamic_manager create file failed: %ld\n", PTR_ERR(fp)); pr_err(DM_LOG_PREFIX "clear config file failed: %ld\n", PTR_ERR(fp));
return; return;
} }
// Write null bytes to overwrite the file content // Write null bytes to overwrite the file content
if (ksu_kernel_write_compat(fp, zero_buffer, sizeof(zero_buffer), &off) != sizeof(zero_buffer)) { if (ksu_kernel_write_compat(fp, zero_buffer, sizeof(zero_buffer), &off) != sizeof(zero_buffer)) {
pr_err("clear_dynamic_manager write null bytes failed.\n"); pr_err(DM_LOG_PREFIX "clear config write failed\n");
} else { }
pr_info("Dynamic sign config file cleared successfully\n");
}
filp_close(fp, 0); filp_close(fp, 0);
} }
static bool clear_dynamic_manager_file(void) static bool clear_dynamic_manager_file(void)
{ {
return ksu_queue_work(&clear_dynamic_manager_work); return ksu_queue_work(&clear_dynamic_manager_work);
} }
int ksu_handle_dynamic_manager(struct dynamic_manager_user_config *config) int ksu_handle_dynamic_manager(struct dynamic_manager_user_config *config)
{ {
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
int i; int i;
if (!config) { if (!config) {
return -EINVAL; return -EINVAL;
} }
switch (config->operation) { switch (config->operation) {
case DYNAMIC_MANAGER_OP_SET: case DYNAMIC_MANAGER_OP_SET:
if (config->size < 0x100 || config->size > 0x1000) { if (config->size < 0x100 || config->size > 0x1000) {
pr_err("invalid size: 0x%x\n", config->size); pr_err(DM_LOG_PREFIX "invalid size: 0x%x\n", config->size);
return -EINVAL; return -EINVAL;
} }
if (strlen(config->hash) != 64) { if (strlen(config->hash) != 64) {
pr_err("invalid hash length: %zu\n", strlen(config->hash)); pr_err(DM_LOG_PREFIX "invalid hash length: %zu\n", strlen(config->hash));
return -EINVAL; return -EINVAL;
} }
// Validate hash format // Validate hash format
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
char c = config->hash[i]; char c = config->hash[i];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f'))) { if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f'))) {
pr_err("invalid hash character at position %d: %c\n", i, c); pr_err(DM_LOG_PREFIX "invalid hash character at position %d: %c\n", i, c);
return -EINVAL; return -EINVAL;
} }
} }
spin_lock_irqsave(&dynamic_manager_lock, flags); clear_dynamic_manager();
dynamic_manager.size = config->size;
spin_lock_irqsave(&dynamic_manager_lock, flags);
dynamic_manager.size = config->size;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
strscpy(dynamic_manager.hash, config->hash, sizeof(dynamic_manager.hash)); strscpy(dynamic_manager.hash, config->hash, sizeof(dynamic_manager.hash));
#else #else
strlcpy(dynamic_manager.hash, config->hash, sizeof(dynamic_manager.hash)); strlcpy(dynamic_manager.hash, config->hash, sizeof(dynamic_manager.hash));
#endif #endif
dynamic_manager.is_set = 1; dynamic_manager.is_set = 1;
spin_unlock_irqrestore(&dynamic_manager_lock, flags); spin_unlock_irqrestore(&dynamic_manager_lock, flags);
persistent_dynamic_manager(); persistent_dynamic_manager();
pr_info("dynamic manager updated: size=0x%x, hash=%.16s... (multi-manager enabled)\n", pr_info(DM_LOG_PREFIX "config updated and activated: size=0x%x, hash=%.16s...\n",
config->size, config->hash); config->size, config->hash);
break; break;
case DYNAMIC_MANAGER_OP_GET: case DYNAMIC_MANAGER_OP_GET:
spin_lock_irqsave(&dynamic_manager_lock, flags); spin_lock_irqsave(&dynamic_manager_lock, flags);
if (dynamic_manager.is_set) { if (dynamic_manager.is_set && dynamic_manager.size > 0 && strlen(dynamic_manager.hash) == 64) {
config->size = dynamic_manager.size; config->size = dynamic_manager.size;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
strscpy(config->hash, dynamic_manager.hash, sizeof(config->hash)); strscpy(config->hash, dynamic_manager.hash, sizeof(config->hash));
#else #else
strlcpy(config->hash, dynamic_manager.hash, sizeof(config->hash)); strlcpy(config->hash, dynamic_manager.hash, sizeof(config->hash));
#endif #endif
ret = 0; ret = 0;
} else { } else {
ret = -ENODATA; ret = -ENODATA;
} }
spin_unlock_irqrestore(&dynamic_manager_lock, flags); spin_unlock_irqrestore(&dynamic_manager_lock, flags);
break; break;
case DYNAMIC_MANAGER_OP_CLEAR: case DYNAMIC_MANAGER_OP_CLEAR:
spin_lock_irqsave(&dynamic_manager_lock, flags); // 改进:先无效化所有动态管理器
dynamic_manager.size = 0x300; pr_info(DM_LOG_PREFIX "clearing dynamic manager config\n");
strcpy(dynamic_manager.hash, "0000000000000000000000000000000000000000000000000000000000000000"); clear_dynamic_manager();
dynamic_manager.is_set = 0;
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
// Clear only dynamic managers, preserve default manager spin_lock_irqsave(&dynamic_manager_lock, flags);
clear_dynamic_manager(); dynamic_manager.size = 0;
memset(dynamic_manager.hash, 0, sizeof(dynamic_manager.hash));
dynamic_manager.is_set = 0;
spin_unlock_irqrestore(&dynamic_manager_lock, flags);
// Clear file using the same method as save // Clear file
clear_dynamic_manager_file(); clear_dynamic_manager_file();
pr_info("Dynamic sign config cleared (multi-manager disabled)\n"); pr_info(DM_LOG_PREFIX "config cleared\n");
break; break;
default: default:
pr_err("Invalid dynamic manager operation: %d\n", config->operation); pr_err(DM_LOG_PREFIX "invalid operation: %d\n", config->operation);
return -EINVAL; return -EINVAL;
} }
return ret; return ret;
} }
bool ksu_load_dynamic_manager(void) bool ksu_load_dynamic_manager(void)
{ {
return ksu_queue_work(&load_dynamic_manager_work); return ksu_queue_work(&load_dynamic_manager_work);
} }
void ksu_dynamic_manager_init(void) void ksu_dynamic_manager_init(void)
{ {
int i; int i;
INIT_WORK(&save_dynamic_manager_work, do_save_dynamic_manager); INIT_WORK(&save_dynamic_manager_work, do_save_dynamic_manager);
INIT_WORK(&load_dynamic_manager_work, do_load_dynamic_manager); INIT_WORK(&load_dynamic_manager_work, do_load_dynamic_manager);
INIT_WORK(&clear_dynamic_manager_work, do_clear_dynamic_manager); INIT_WORK(&clear_dynamic_manager_work, do_clear_dynamic_manager);
// Initialize manager slots // Initialize manager slots
for (i = 0; i < MAX_MANAGERS; i++) { for (i = 0; i < MAX_MANAGERS; i++) {
active_managers[i].is_active = false; active_managers[i].is_active = false;
} }
ksu_load_dynamic_manager(); ksu_load_dynamic_manager();
pr_info("Dynamic sign initialized with conditional multi-manager support\n"); pr_info(DM_LOG_PREFIX "init\n");
} }
void ksu_dynamic_manager_exit(void) void ksu_dynamic_manager_exit(void)
{ {
clear_dynamic_manager(); clear_dynamic_manager();
// Save current config before exit // Save current config before exit
do_save_dynamic_manager(NULL); do_save_dynamic_manager(NULL);
pr_info("Dynamic sign exited with persistent storage\n"); pr_info(DM_LOG_PREFIX "exited\n");
} }
// Get dynamic manager configuration for signature verification // Get dynamic manager configuration for signature verification
bool ksu_get_dynamic_manager_config(unsigned int *size, const char **hash) bool ksu_get_dynamic_manager_config(unsigned int *size, const char **hash)
{ {
unsigned long flags; unsigned long flags;
bool valid = false; bool valid = false;
spin_lock_irqsave(&dynamic_manager_lock, flags); spin_lock_irqsave(&dynamic_manager_lock, flags);
if (dynamic_manager.is_set) { if (dynamic_manager.is_set && dynamic_manager.size > 0 && strlen(dynamic_manager.hash) == 64) {
if (size) *size = dynamic_manager.size; if (size) *size = dynamic_manager.size;
if (hash) *hash = dynamic_manager.hash; if (hash) *hash = dynamic_manager.hash;
valid = true; valid = true;
} }
spin_unlock_irqrestore(&dynamic_manager_lock, flags); spin_unlock_irqrestore(&dynamic_manager_lock, flags);
return valid; return valid;
}
int ksu_get_active_managers(struct manager_list_info *info)
{
unsigned long flags;
int i, count = 0;
if (!info) {
return -EINVAL;
}
memset(info, 0, sizeof(*info));
if (ksu_manager_uid != KSU_INVALID_UID && count < ARRAY_SIZE(info->managers)) {
info->managers[count].uid = ksu_manager_uid;
info->managers[count].signature_index = 0;
count++;
pr_info(DM_LOG_PREFIX "added traditional manager: uid=%d\n", ksu_manager_uid);
}
if (ksu_is_dynamic_manager_enabled() && count < ARRAY_SIZE(info->managers)) {
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS && count < ARRAY_SIZE(info->managers); i++) {
if (active_managers[i].is_active) {
info->managers[count].uid = active_managers[i].uid;
info->managers[count].signature_index = active_managers[i].signature_index;
count++;
pr_info(DM_LOG_PREFIX "added dynamic manager: uid=%d, signature_index=%d\n",
active_managers[i].uid, active_managers[i].signature_index);
}
}
spin_unlock_irqrestore(&managers_lock, flags);
}
info->count = count;
pr_info(DM_LOG_PREFIX "total active managers: %d\n", count);
return 0;
}
bool ksu_has_dynamic_managers(void)
{
unsigned long flags;
bool has_managers = false;
int i;
if (!ksu_is_dynamic_manager_enabled()) {
return false;
}
spin_lock_irqsave(&managers_lock, flags);
for (i = 0; i < MAX_MANAGERS; i++) {
if (active_managers[i].is_active) {
has_managers = true;
break;
}
}
spin_unlock_irqrestore(&managers_lock, flags);
return has_managers;
} }

View File

@@ -9,43 +9,41 @@
#define KERNEL_SU_DYNAMIC_MANAGER "/data/adb/ksu/.dynamic_manager" #define KERNEL_SU_DYNAMIC_MANAGER "/data/adb/ksu/.dynamic_manager"
#define DYNAMIC_SIGN_INDEX 100 #define DYNAMIC_SIGN_INDEX 100
#define DM_LOG_PREFIX "[Dynamic Manager] "
struct dynamic_sign_key { struct dynamic_sign_key {
unsigned int size; unsigned int size;
const char *hash; const char *hash;
}; };
#define DYNAMIC_SIGN_DEFAULT_CONFIG { \ #define DYNAMIC_SIGN_DEFAULT_CONFIG { \
.size = 0x300, \ .size = 0, \
.hash = "0000000000000000000000000000000000000000000000000000000000000000" \ .hash = "" \
} }
struct dynamic_manager_config { struct dynamic_manager_config {
unsigned int size; unsigned int size;
char hash[65]; char hash[65];
int is_set; int is_set;
}; };
struct manager_info { struct manager_info {
uid_t uid; uid_t uid;
int signature_index; int signature_index;
bool is_active; bool is_active;
}; };
// Dynamic sign operations
void ksu_dynamic_manager_init(void);
void ksu_dynamic_manager_exit(void);
int ksu_handle_dynamic_manager(struct dynamic_manager_user_config *config);
bool ksu_load_dynamic_manager(void);
bool ksu_is_dynamic_manager_enabled(void); bool ksu_is_dynamic_manager_enabled(void);
// Multi-manager operations
void ksu_add_manager(uid_t uid, int signature_index); void ksu_add_manager(uid_t uid, int signature_index);
void ksu_remove_manager(uid_t uid); void ksu_remove_manager(uid_t uid);
bool ksu_is_any_manager(uid_t uid); bool ksu_is_any_manager(uid_t uid);
int ksu_get_manager_signature_index(uid_t uid); int ksu_get_manager_signature_index(uid_t uid);
int ksu_get_active_managers(struct manager_list_info *info); int ksu_handle_dynamic_manager(struct dynamic_manager_user_config *config);
bool ksu_load_dynamic_manager(void);
// Configuration access for signature verification void ksu_dynamic_manager_init(void);
void ksu_dynamic_manager_exit(void);
bool ksu_get_dynamic_manager_config(unsigned int *size, const char **hash); bool ksu_get_dynamic_manager_config(unsigned int *size, const char **hash);
int ksu_get_active_managers(struct manager_list_info *info);
bool ksu_has_dynamic_managers(void);
#endif #endif

View File

@@ -1,176 +0,0 @@
#include "feature.h"
#include "klog.h" // IWYU pragma: keep
#include <linux/mutex.h>
static const struct ksu_feature_handler *feature_handlers[KSU_FEATURE_MAX];
static DEFINE_MUTEX(feature_mutex);
int ksu_register_feature_handler(const struct ksu_feature_handler *handler)
{
if (!handler) {
pr_err("feature: register handler is NULL\n");
return -EINVAL;
}
if (handler->feature_id >= KSU_FEATURE_MAX) {
pr_err("feature: invalid feature_id %u\n", handler->feature_id);
return -EINVAL;
}
if (!handler->get_handler && !handler->set_handler) {
pr_err("feature: no handler provided for feature %u\n",
handler->feature_id);
return -EINVAL;
}
mutex_lock(&feature_mutex);
if (feature_handlers[handler->feature_id]) {
pr_warn("feature: handler for %u already registered, overwriting\n",
handler->feature_id);
}
feature_handlers[handler->feature_id] = handler;
pr_info("feature: registered handler for %s (id=%u)\n",
handler->name ? handler->name : "unknown", handler->feature_id);
mutex_unlock(&feature_mutex);
return 0;
}
int ksu_unregister_feature_handler(u32 feature_id)
{
int ret = 0;
if (feature_id >= KSU_FEATURE_MAX) {
pr_err("feature: invalid feature_id %u\n", feature_id);
return -EINVAL;
}
mutex_lock(&feature_mutex);
if (!feature_handlers[feature_id]) {
pr_warn("feature: no handler registered for %u\n", feature_id);
ret = -ENOENT;
goto out;
}
feature_handlers[feature_id] = NULL;
pr_info("feature: unregistered handler for id=%u\n", feature_id);
out:
mutex_unlock(&feature_mutex);
return ret;
}
int ksu_get_feature(u32 feature_id, u64 *value, bool *supported)
{
int ret = 0;
const struct ksu_feature_handler *handler;
if (feature_id >= KSU_FEATURE_MAX) {
pr_err("feature: invalid feature_id %u\n", feature_id);
return -EINVAL;
}
if (!value || !supported) {
pr_err("feature: invalid parameters\n");
return -EINVAL;
}
mutex_lock(&feature_mutex);
handler = feature_handlers[feature_id];
if (!handler) {
*supported = false;
*value = 0;
pr_debug("feature: feature %u not supported\n", feature_id);
goto out;
}
*supported = true;
if (!handler->get_handler) {
pr_warn("feature: no get_handler for feature %u\n", feature_id);
ret = -EOPNOTSUPP;
goto out;
}
ret = handler->get_handler(value);
if (ret) {
pr_err("feature: get_handler for %u failed: %d\n", feature_id,
ret);
}
out:
mutex_unlock(&feature_mutex);
return ret;
}
int ksu_set_feature(u32 feature_id, u64 value)
{
int ret = 0;
const struct ksu_feature_handler *handler;
if (feature_id >= KSU_FEATURE_MAX) {
pr_err("feature: invalid feature_id %u\n", feature_id);
return -EINVAL;
}
mutex_lock(&feature_mutex);
handler = feature_handlers[feature_id];
if (!handler) {
pr_err("feature: feature %u not registered\n", feature_id);
ret = -EOPNOTSUPP;
goto out;
}
if (!handler->set_handler) {
pr_warn("feature: no set_handler for feature %u\n", feature_id);
ret = -EOPNOTSUPP;
goto out;
}
ret = handler->set_handler(value);
if (ret) {
pr_err("feature: set_handler for %u failed: %d\n", feature_id,
ret);
}
out:
mutex_unlock(&feature_mutex);
return ret;
}
void ksu_feature_init(void)
{
int i;
for (i = 0; i < KSU_FEATURE_MAX; i++) {
feature_handlers[i] = NULL;
}
pr_info("feature: feature management initialized\n");
}
void ksu_feature_exit(void)
{
int i;
mutex_lock(&feature_mutex);
for (i = 0; i < KSU_FEATURE_MAX; i++) {
feature_handlers[i] = NULL;
}
mutex_unlock(&feature_mutex);
pr_info("feature: feature management cleaned up\n");
}

View File

@@ -1,37 +0,0 @@
#ifndef __KSU_H_FEATURE
#define __KSU_H_FEATURE
#include <linux/types.h>
enum ksu_feature_id {
KSU_FEATURE_SU_COMPAT = 0,
KSU_FEATURE_KERNEL_UMOUNT = 1,
KSU_FEATURE_ENHANCED_SECURITY = 2,
KSU_FEATURE_SULOG = 3,
KSU_FEATURE_MAX
};
typedef int (*ksu_feature_get_t)(u64 *value);
typedef int (*ksu_feature_set_t)(u64 value);
struct ksu_feature_handler {
u32 feature_id;
const char *name;
ksu_feature_get_t get_handler;
ksu_feature_set_t set_handler;
};
int ksu_register_feature_handler(const struct ksu_feature_handler *handler);
int ksu_unregister_feature_handler(u32 feature_id);
int ksu_get_feature(u32 feature_id, u64 *value, bool *supported);
int ksu_set_feature(u32 feature_id, u64 value);
void ksu_feature_init(void);
void ksu_feature_exit(void);
#endif // __KSU_H_FEATURE

View File

@@ -1,402 +0,0 @@
#include <linux/export.h>
#include <linux/anon_inodes.h>
#include <linux/aio.h> // kernel 3.18
#include <linux/capability.h>
#include <linux/cred.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/version.h>
#include "klog.h" // IWYU pragma: keep
#include "selinux/selinux.h"
#include "file_wrapper.h"
static loff_t ksu_wrapper_llseek(struct file *fp, loff_t off, int flags) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->llseek(data->orig, off, flags);
}
static ssize_t ksu_wrapper_read(struct file *fp, char __user *ptr, size_t sz, loff_t *off) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->read(orig, ptr, sz, off);
}
static ssize_t ksu_wrapper_write(struct file *fp, const char __user *ptr, size_t sz, loff_t *off) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->write(orig, ptr, sz, off);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
static ssize_t ksu_wrapper_read_iter(struct kiocb *iocb, struct iov_iter *iovi) {
struct ksu_file_wrapper* data = iocb->ki_filp->private_data;
struct file* orig = data->orig;
iocb->ki_filp = orig;
return orig->f_op->read_iter(iocb, iovi);
}
static ssize_t ksu_wrapper_write_iter(struct kiocb *iocb, struct iov_iter *iovi) {
struct ksu_file_wrapper* data = iocb->ki_filp->private_data;
struct file* orig = data->orig;
iocb->ki_filp = orig;
return orig->f_op->write_iter(iocb, iovi);
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
static int ksu_wrapper_iopoll(struct kiocb *kiocb, struct io_comp_batch* icb, unsigned int v) {
struct ksu_file_wrapper* data = kiocb->ki_filp->private_data;
struct file* orig = data->orig;
kiocb->ki_filp = orig;
return orig->f_op->iopoll(kiocb, icb, v);
}
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)
static int ksu_wrapper_iopoll(struct kiocb *kiocb, bool spin) {
struct ksu_file_wrapper* data = kiocb->ki_filp->private_data;
struct file* orig = data->orig;
kiocb->ki_filp = orig;
return orig->f_op->iopoll(kiocb, spin);
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && (LINUX_VERSION_CODE > KERNEL_VERSION(3, 11, 0) || defined(KSU_HAS_ITERATE_DIR))
static int ksu_wrapper_iterate (struct file *fp, struct dir_context *dc) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->iterate(orig, dc);
}
#endif
// int (*readdir) (struct file *, void *, filldir_t);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) && !defined(KSU_HAS_ITERATE_DIR)
static int ksu_wrapper_readdir(struct file *fp, void *ptr, filldir_t filler) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->readdir(orig, ptr, filler);
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
static int ksu_wrapper_iterate_shared(struct file *fp, struct dir_context *dc) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->iterate_shared(orig, dc);
}
#endif
// typedef unsigned __bitwise __poll_t;
static unsigned __bitwise ksu_wrapper_poll(struct file *fp, struct poll_table_struct *pts) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->poll(orig, pts);
}
static long ksu_wrapper_unlocked_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->unlocked_ioctl(orig, cmd, arg);
}
static long ksu_wrapper_compat_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->compat_ioctl(orig, cmd, arg);
}
static int ksu_wrapper_mmap(struct file *fp, struct vm_area_struct * vma) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->mmap(orig, vma);
}
// static unsigned long mmap_supported_flags {}
static int ksu_wrapper_open(struct inode *ino, struct file *fp) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
struct inode *orig_ino = file_inode(orig);
return orig->f_op->open(orig_ino, orig);
}
static int ksu_wrapper_flush(struct file *fp, fl_owner_t id) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->flush(orig, id);
}
static int ksu_wrapper_fsync(struct file *fp, loff_t off1, loff_t off2, int datasync) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->fsync(orig, off1, off2, datasync);
}
static int ksu_wrapper_fasync(int arg, struct file *fp, int arg2) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->fasync(arg, orig, arg2);
}
static int ksu_wrapper_lock(struct file *fp, int arg1, struct file_lock *fl) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
return orig->f_op->lock(orig, arg1, fl);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)
static ssize_t ksu_wrapper_sendpage(struct file *fp, struct page *pg, int arg1, size_t sz, loff_t *off, int arg2) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->sendpage) {
return orig->f_op->sendpage(orig, pg, arg1, sz, off, arg2);
}
return -EINVAL;
}
#endif
static unsigned long ksu_wrapper_get_unmapped_area(struct file *fp, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->get_unmapped_area) {
return orig->f_op->get_unmapped_area(orig, arg1, arg2, arg3, arg4);
}
return -EINVAL;
}
// static int ksu_wrapper_check_flags(int arg) {}
static int ksu_wrapper_flock(struct file *fp, int arg1, struct file_lock *fl) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->flock) {
return orig->f_op->flock(orig, arg1, fl);
}
return -EINVAL;
}
static ssize_t ksu_wrapper_splice_write(struct pipe_inode_info * pii, struct file *fp, loff_t *off, size_t sz, unsigned int arg1) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->splice_write) {
return orig->f_op->splice_write(pii, orig, off, sz, arg1);
}
return -EINVAL;
}
static ssize_t ksu_wrapper_splice_read(struct file *fp, loff_t *off, struct pipe_inode_info *pii, size_t sz, unsigned int arg1) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->splice_read) {
return orig->f_op->splice_read(orig, off, pii, sz, arg1);
}
return -EINVAL;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
void ksu_wrapper_splice_eof(struct file *fp) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->splice_eof) {
return orig->f_op->splice_eof(orig);
}
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0)
static int ksu_wrapper_setlease(struct file *fp, int arg1, struct file_lease **fl, void **p) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->setlease) {
return orig->f_op->setlease(orig, arg1, fl, p);
}
return -EINVAL;
}
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
static int ksu_wrapper_setlease(struct file *fp, int arg1, struct file_lock **fl, void **p) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->setlease) {
return orig->f_op->setlease(orig, arg1, fl, p);
}
return -EINVAL;
}
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) // int (*setlease)(struct file *, long, struct file_lock **, void **);
static int ksu_wrapper_setlease(struct file *fp, long arg1, struct file_lock **fl, void **p) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->setlease) {
return orig->f_op->setlease(orig, arg1, fl, p);
}
return -EINVAL;
}
#else // int (*setlease)(struct file *, long, struct file_lock **);
static int ksu_wrapper_setlease(struct file *fp, long arg1, struct file_lock **fl) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->setlease) {
return orig->f_op->setlease(orig, arg1, fl);
}
return -EINVAL;
}
#endif
static long ksu_wrapper_fallocate(struct file *fp, int mode, loff_t offset, loff_t len) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->fallocate) {
return orig->f_op->fallocate(orig, mode, offset, len);
}
return -EINVAL;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
static void ksu_wrapper_show_fdinfo(struct seq_file *m, struct file *f) {
struct ksu_file_wrapper* data = f->private_data;
struct file* orig = data->orig;
if (orig->f_op->show_fdinfo) {
orig->f_op->show_fdinfo(m, orig);
}
}
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
static int ksu_wrapper_show_fdinfo(struct seq_file *m, struct file *f) {
struct ksu_file_wrapper* data = f->private_data;
struct file* orig = data->orig;
if (orig->f_op->show_fdinfo) {
orig->f_op->show_fdinfo(m, orig);
}
return -EINVAL;
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
// https://cs.android.com/android/kernel/superproject/+/common-android-mainline:common/fs/read_write.c;l=1593-1606;drc=398da7defe218d3e51b0f3bdff75147e28125b60
static ssize_t ksu_wrapper_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out,
loff_t pos_out, size_t len, unsigned int flags) {
struct ksu_file_wrapper* data = file_out->private_data;
struct file* orig = data->orig;
return orig->f_op->copy_file_range(file_in, pos_in, orig, pos_out, len, flags);
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
// no REMAP_FILE_DEDUP: use file_in
// https://cs.android.com/android/kernel/superproject/+/common-android-mainline:common/fs/read_write.c;l=1598-1599;drc=398da7defe218d3e51b0f3bdff75147e28125b60
// https://cs.android.com/android/kernel/superproject/+/common-android-mainline:common/fs/remap_range.c;l=403-404;drc=398da7defe218d3e51b0f3bdff75147e28125b60
// REMAP_FILE_DEDUP: use file_out
// https://cs.android.com/android/kernel/superproject/+/common-android-mainline:common/fs/remap_range.c;l=483-484;drc=398da7defe218d3e51b0f3bdff75147e28125b60
static loff_t ksu_wrapper_remap_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags) {
if (remap_flags & REMAP_FILE_DEDUP) {
struct ksu_file_wrapper* data = file_out->private_data;
struct file* orig = data->orig;
return orig->f_op->remap_file_range(file_in, pos_in, orig, pos_out, len, remap_flags);
} else {
struct ksu_file_wrapper* data = file_in->private_data;
struct file* orig = data->orig;
return orig->f_op->remap_file_range(orig, pos_in, file_out, pos_out, len, remap_flags);
}
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
static int ksu_wrapper_fadvise(struct file *fp, loff_t off1, loff_t off2, int flags) {
struct ksu_file_wrapper* data = fp->private_data;
struct file* orig = data->orig;
if (orig->f_op->fadvise) {
return orig->f_op->fadvise(orig, off1, off2, flags);
}
return -EINVAL;
}
#endif
static int ksu_wrapper_release(struct inode *inode, struct file *filp) {
ksu_delete_file_wrapper(filp->private_data);
return 0;
}
struct ksu_file_wrapper* ksu_create_file_wrapper(struct file* fp) {
struct ksu_file_wrapper* p = kcalloc(sizeof(struct ksu_file_wrapper), 1, GFP_KERNEL);
if (!p) {
return NULL;
}
get_file(fp);
p->orig = fp;
p->ops.owner = THIS_MODULE;
p->ops.llseek = fp->f_op->llseek ? ksu_wrapper_llseek : NULL;
p->ops.read = fp->f_op->read ? ksu_wrapper_read : NULL;
p->ops.write = fp->f_op->write ? ksu_wrapper_write : NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
p->ops.read_iter = fp->f_op->read_iter ? ksu_wrapper_read_iter : NULL;
p->ops.write_iter = fp->f_op->write_iter ? ksu_wrapper_write_iter : NULL;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
p->ops.iopoll = fp->f_op->iopoll ? ksu_wrapper_iopoll : NULL;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && (LINUX_VERSION_CODE > KERNEL_VERSION(3, 11, 0) || defined(KSU_HAS_ITERATE_DIR))
p->ops.iterate = fp->f_op->iterate ? ksu_wrapper_iterate : NULL;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) && !defined(KSU_HAS_ITERATE_DIR)
p->ops.readdir = fp->f_op->readdir ? ksu_wrapper_readdir : NULL;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
p->ops.iterate_shared = fp->f_op->iterate_shared ? ksu_wrapper_iterate_shared : NULL;
#endif
p->ops.poll = fp->f_op->poll ? ksu_wrapper_poll : NULL;
p->ops.unlocked_ioctl = fp->f_op->unlocked_ioctl ? ksu_wrapper_unlocked_ioctl : NULL;
p->ops.compat_ioctl = fp->f_op->compat_ioctl ? ksu_wrapper_compat_ioctl : NULL;
p->ops.mmap = fp->f_op->mmap ? ksu_wrapper_mmap : NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0)
p->ops.fop_flags = fp->f_op->fop_flags;
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
p->ops.mmap_supported_flags = fp->f_op->mmap_supported_flags;
#endif
p->ops.open = fp->f_op->open ? ksu_wrapper_open : NULL;
p->ops.flush = fp->f_op->flush ? ksu_wrapper_flush : NULL;
p->ops.release = ksu_wrapper_release;
p->ops.fsync = fp->f_op->fsync ? ksu_wrapper_fsync : NULL;
p->ops.fasync = fp->f_op->fasync ? ksu_wrapper_fasync : NULL;
p->ops.lock = fp->f_op->lock ? ksu_wrapper_lock : NULL;
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)
p->ops.sendpage = fp->f_op->sendpage ? ksu_wrapper_sendpage : NULL;
#endif
p->ops.get_unmapped_area = fp->f_op->get_unmapped_area ? ksu_wrapper_get_unmapped_area : NULL;
p->ops.check_flags = fp->f_op->check_flags;
p->ops.flock = fp->f_op->flock ? ksu_wrapper_flock : NULL;
p->ops.splice_write = fp->f_op->splice_write ? ksu_wrapper_splice_write : NULL;
p->ops.splice_read = fp->f_op->splice_read ? ksu_wrapper_splice_read : NULL;
p->ops.setlease = fp->f_op->setlease ? ksu_wrapper_setlease : NULL;
p->ops.fallocate = fp->f_op->fallocate ? ksu_wrapper_fallocate : NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
p->ops.show_fdinfo = fp->f_op->show_fdinfo ? ksu_wrapper_show_fdinfo : NULL;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
p->ops.copy_file_range = fp->f_op->copy_file_range ? ksu_wrapper_copy_file_range : NULL;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
p->ops.remap_file_range = fp->f_op->remap_file_range ? ksu_wrapper_remap_file_range : NULL;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
p->ops.fadvise = fp->f_op->fadvise ? ksu_wrapper_fadvise : NULL;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)
p->ops.splice_eof = fp->f_op->splice_eof ? ksu_wrapper_splice_eof : NULL;
#endif
return p;
}
void ksu_delete_file_wrapper(struct ksu_file_wrapper* data) {
fput((struct file*) data->orig);
kfree(data);
}

View File

@@ -1,14 +0,0 @@
#ifndef KSU_FILE_WRAPPER_H
#define KSU_FILE_WRAPPER_H
#include <linux/file.h>
#include <linux/fs.h>
struct ksu_file_wrapper {
struct file *orig;
struct file_operations ops;
};
struct ksu_file_wrapper *ksu_create_file_wrapper(struct file *fp);
void ksu_delete_file_wrapper(struct ksu_file_wrapper *data);
#endif // KSU_FILE_WRAPPER_H

28
kernel/include/ksu_hook.h Normal file
View File

@@ -0,0 +1,28 @@
#ifndef __KSU_H_KSHOOK
#define __KSU_H_KSHOOK
#include <linux/fs.h>
#include <linux/types.h>
// For sucompat
int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode,
int *flags);
int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags);
// For ksud
int ksu_handle_vfs_read(struct file **file_ptr, char __user **buf_ptr,
size_t *count_ptr, loff_t **pos);
// For ksud and sucompat
int ksu_handle_execveat(int *fd, struct filename **filename_ptr, void *argv,
void *envp, int *flags);
// For volume button
int ksu_handle_input_handle_event(unsigned int *type, unsigned int *code,
int *value);
#endif

View File

@@ -1,5 +1,6 @@
#include <linux/version.h> #include <linux/version.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/nsproxy.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
#include <linux/sched/task.h> #include <linux/sched/task.h>
#else #else
@@ -7,19 +8,17 @@
#endif #endif
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include "klog.h" // IWYU pragma: keep #include "klog.h" // IWYU pragma: keep
#include "kernel_compat.h" #include "kernel_compat.h" // Add check Huawei Device
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \
defined(CONFIG_IS_HW_HISI) || defined(CONFIG_KSU_ALLOWLIST_WORKAROUND) defined(CONFIG_IS_HW_HISI) || \
defined(CONFIG_KSU_ALLOWLIST_WORKAROUND)
#include <linux/key.h> #include <linux/key.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/cred.h> #include <linux/cred.h>
#include <linux/lsm_hooks.h>
extern int install_session_keyring_to_cred(struct cred *, struct key *);
struct key *init_session_keyring = NULL; struct key *init_session_keyring = NULL;
static int install_session_keyring(struct key *keyring) static inline int install_session_keyring(struct key *keyring)
{ {
struct cred *new; struct cred *new;
int ret; int ret;
@@ -38,24 +37,82 @@ static int install_session_keyring(struct key *keyring)
} }
#endif #endif
extern struct task_struct init_task;
// mnt_ns context switch for environment that android_init->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns, such as WSA
struct ksu_ns_fs_saved {
struct nsproxy *ns;
struct fs_struct *fs;
};
static void ksu_save_ns_fs(struct ksu_ns_fs_saved *ns_fs_saved)
{
ns_fs_saved->ns = current->nsproxy;
ns_fs_saved->fs = current->fs;
}
static void ksu_load_ns_fs(struct ksu_ns_fs_saved *ns_fs_saved)
{
current->nsproxy = ns_fs_saved->ns;
current->fs = ns_fs_saved->fs;
}
static bool android_context_saved_checked = false;
static bool android_context_saved_enabled = false;
static struct ksu_ns_fs_saved android_context_saved;
void ksu_android_ns_fs_check(void)
{
if (android_context_saved_checked)
return;
android_context_saved_checked = true;
task_lock(current);
if (current->nsproxy && current->fs &&
current->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns) {
android_context_saved_enabled = true;
pr_info("android context saved enabled due to init mnt_ns(%p) != android mnt_ns(%p)\n",
current->nsproxy->mnt_ns, init_task.nsproxy->mnt_ns);
ksu_save_ns_fs(&android_context_saved);
} else {
pr_info("android context saved disabled\n");
}
task_unlock(current);
}
struct file *ksu_filp_open_compat(const char *filename, int flags, umode_t mode) struct file *ksu_filp_open_compat(const char *filename, int flags, umode_t mode)
{ {
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \
defined(CONFIG_IS_HW_HISI) || defined(CONFIG_KSU_ALLOWLIST_WORKAROUND) defined(CONFIG_IS_HW_HISI) || \
defined(CONFIG_KSU_ALLOWLIST_WORKAROUND)
if (init_session_keyring != NULL && !current_cred()->session_keyring && if (init_session_keyring != NULL && !current_cred()->session_keyring &&
(current->flags & PF_WQ_WORKER)) { (current->flags & PF_WQ_WORKER)) {
pr_info("installing init session keyring for older kernel\n"); pr_info("installing init session keyring for older kernel\n");
install_session_keyring(init_session_keyring); install_session_keyring(init_session_keyring);
} }
#endif #endif
return filp_open(filename, flags, mode); // switch mnt_ns even if current is not wq_worker, to ensure what we open is the correct file in android mnt_ns, rather than user created mnt_ns
struct ksu_ns_fs_saved saved;
if (android_context_saved_enabled) {
pr_info("start switch current nsproxy and fs to android context\n");
task_lock(current);
ksu_save_ns_fs(&saved);
ksu_load_ns_fs(&android_context_saved);
task_unlock(current);
}
struct file *fp = filp_open(filename, flags, mode);
if (android_context_saved_enabled) {
task_lock(current);
ksu_load_ns_fs(&saved);
task_unlock(current);
pr_info("switch current nsproxy and fs back to saved successfully\n");
}
return fp;
} }
ssize_t ksu_kernel_read_compat(struct file *p, void *buf, size_t count, ssize_t ksu_kernel_read_compat(struct file *p, void *buf, size_t count,
loff_t *pos) loff_t *pos)
{ {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) || \ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) || defined(KSU_OPTIONAL_KERNEL_READ)
defined(KSU_OPTIONAL_KERNEL_READ)
return kernel_read(p, buf, count, pos); return kernel_read(p, buf, count, pos);
#else #else
loff_t offset = pos ? *pos : 0; loff_t offset = pos ? *pos : 0;
@@ -70,8 +127,7 @@ ssize_t ksu_kernel_read_compat(struct file *p, void *buf, size_t count,
ssize_t ksu_kernel_write_compat(struct file *p, const void *buf, size_t count, ssize_t ksu_kernel_write_compat(struct file *p, const void *buf, size_t count,
loff_t *pos) loff_t *pos)
{ {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) || \ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) || defined(KSU_OPTIONAL_KERNEL_WRITE)
defined(KSU_OPTIONAL_KERNEL_WRITE)
return kernel_write(p, buf, count, pos); return kernel_write(p, buf, count, pos);
#else #else
loff_t offset = pos ? *pos : 0; loff_t offset = pos ? *pos : 0;
@@ -83,8 +139,7 @@ ssize_t ksu_kernel_write_compat(struct file *p, const void *buf, size_t count,
#endif #endif
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) || \ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) || defined(KSU_OPTIONAL_STRNCPY)
defined(KSU_OPTIONAL_STRNCPY)
long ksu_strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long ksu_strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
long count) long count)
{ {
@@ -123,3 +178,33 @@ long ksu_strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
return ret; return ret;
} }
#endif #endif
long ksu_strncpy_from_user_retry(char *dst, const void __user *unsafe_addr,
long count)
{
long ret;
ret = ksu_strncpy_from_user_nofault(dst, unsafe_addr, count);
if (likely(ret >= 0))
return ret;
// we faulted! fallback to slow path
if (unlikely(!ksu_access_ok(unsafe_addr, count))) {
#ifdef CONFIG_KSU_DEBUG
pr_err("%s: faulted!\n", __func__);
#endif
return -EFAULT;
}
// why we don't do like how strncpy_from_user_nofault?
ret = strncpy_from_user(dst, unsafe_addr, count);
if (ret >= count) {
ret = count;
dst[ret - 1] = '\0';
} else if (likely(ret >= 0)) {
ret++;
}
return ret;
}

View File

@@ -3,19 +3,47 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/task_work.h> #include <linux/cred.h>
#include <linux/fdtable.h>
#include "ss/policydb.h" #include "ss/policydb.h"
#include "linux/key.h" #include "linux/key.h"
/**
* list_count_nodes - count the number of nodes in a list
* @head: the head of the list
*
* This function iterates over the list starting from @head and counts
* the number of nodes in the list. It does not modify the list.
*
* Context: Any context. The function is safe to call in any context,
* including interrupt context, as it does not sleep or allocate
* memory.
*
* Return: the number of nodes in the list (excluding the head)
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)
static inline __maybe_unused size_t list_count_nodes(const struct list_head *head)
{
const struct list_head *pos;
size_t count = 0;
if (!head)
return 0;
list_for_each(pos, head)
count++;
return count;
}
#endif
/* /*
* Adapt to Huawei HISI kernel without affecting other kernels , * Adapt to Huawei HISI kernel without affecting other kernels ,
* Huawei Hisi Kernel EBITMAP Enable or Disable Flag , * Huawei Hisi Kernel EBITMAP Enable or Disable Flag ,
* From ss/ebitmap.h * From ss/ebitmap.h
*/ */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) && \ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) && \
(LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) || \ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) || \
(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && \ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && \
(LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
#ifdef HISI_SELINUX_EBITMAP_RO #ifdef HISI_SELINUX_EBITMAP_RO
#define CONFIG_IS_HW_HISI #define CONFIG_IS_HW_HISI
@@ -32,42 +60,28 @@
extern long ksu_strncpy_from_user_nofault(char *dst, extern long ksu_strncpy_from_user_nofault(char *dst,
const void __user *unsafe_addr, const void __user *unsafe_addr,
long count); long count);
extern long ksu_strncpy_from_user_retry(char *dst,
const void __user *unsafe_addr,
long count);
extern struct file *ksu_filp_open_compat(const char *filename, int flags, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \
umode_t mode); defined(CONFIG_IS_HW_HISI) || \
extern ssize_t ksu_kernel_read_compat(struct file *p, void *buf, size_t count, defined(CONFIG_KSU_ALLOWLIST_WORKAROUND)
loff_t *pos);
extern ssize_t ksu_kernel_write_compat(struct file *p, const void *buf,
size_t count, loff_t *pos);
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \
defined(CONFIG_IS_HW_HISI) || defined(CONFIG_KSU_ALLOWLIST_WORKAROUND)
extern struct key *init_session_keyring; extern struct key *init_session_keyring;
#endif #endif
extern void ksu_android_ns_fs_check(void);
extern struct file *ksu_filp_open_compat(const char *filename, int flags,
umode_t mode);
extern ssize_t ksu_kernel_read_compat(struct file *p, void *buf, size_t count,
loff_t *pos);
extern ssize_t ksu_kernel_write_compat(struct file *p, const void *buf,
size_t count, loff_t *pos);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
#define ksu_access_ok(addr, size) access_ok(addr, size) #define ksu_access_ok(addr, size) access_ok(addr, size)
#else #else
#define ksu_access_ok(addr, size) access_ok(VERIFY_READ, addr, size) #define ksu_access_ok(addr, size) access_ok(VERIFY_READ, addr, size)
#endif #endif
// Linux >= 5.7
// task_work_add (struct, struct, enum)
// Linux pre-5.7
// task_work_add (struct, struct, bool)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)
#ifndef TWA_RESUME
#define TWA_RESUME true
#endif
#endif
static inline int do_close_fd(unsigned int fd)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
return close_fd(fd);
#else
return __close_fd(current->files, fd);
#endif
}
#endif #endif

View File

@@ -1,224 +0,0 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/task_work.h>
#include <linux/version.h>
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/nsproxy.h>
#include <linux/path.h>
#include <linux/printk.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#ifndef KSU_HAS_PATH_UMOUNT
#include <linux/syscalls.h>
#endif
#include "manager.h"
#include "kernel_umount.h"
#include "klog.h" // IWYU pragma: keep
#include "kernel_compat.h"
#include "allowlist.h"
#include "selinux/selinux.h"
#include "feature.h"
#include "ksud.h"
#include "sulog.h"
#include "umount_manager.h"
static bool ksu_kernel_umount_enabled = true;
static int kernel_umount_feature_get(u64 *value)
{
*value = ksu_kernel_umount_enabled ? 1 : 0;
return 0;
}
static int kernel_umount_feature_set(u64 value)
{
bool enable = value != 0;
ksu_kernel_umount_enabled = enable;
pr_info("kernel_umount: set to %d\n", enable);
return 0;
}
static const struct ksu_feature_handler kernel_umount_handler = {
.feature_id = KSU_FEATURE_KERNEL_UMOUNT,
.name = "kernel_umount",
.get_handler = kernel_umount_feature_get,
.set_handler = kernel_umount_feature_set,
};
#ifdef CONFIG_KSU_SUSFS
extern bool susfs_is_log_enabled;
#endif // #ifdef CONFIG_KSU_SUSFS
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) || \
defined(KSU_HAS_PATH_UMOUNT)
extern int path_umount(struct path *path, int flags);
static void ksu_umount_mnt(const char *__never_use_mnt, struct path *path,
int flags)
{
int err = path_umount(path, flags);
if (err) {
pr_info("umount %s failed: %d\n", path->dentry->d_iname, err);
}
}
#else
static void ksu_sys_umount(const char *mnt, int flags)
{
char __user *usermnt = (char __user *)mnt;
mm_segment_t old_fs;
old_fs = get_fs();
set_fs(KERNEL_DS);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0)
ksys_umount(usermnt, flags);
#else
sys_umount(usermnt, flags); // cuz asmlinkage long sys##name
#endif
set_fs(old_fs);
}
#define ksu_umount_mnt(mnt, __unused, flags) \
({ \
path_put(__unused); \
ksu_sys_umount(mnt, flags); \
})
#endif
void try_umount(const char *mnt, int flags)
{
struct path path;
int err = kern_path(mnt, 0, &path);
if (err) {
return;
}
if (path.dentry != path.mnt->mnt_root) {
// it is not root mountpoint, maybe umounted by others already.
path_put(&path);
return;
}
ksu_umount_mnt(mnt, &path, flags);
}
struct umount_tw {
struct callback_head cb;
const struct cred *old_cred;
};
static void umount_tw_func(struct callback_head *cb)
{
struct umount_tw *tw = container_of(cb, struct umount_tw, cb);
const struct cred *saved = NULL;
if (tw->old_cred) {
saved = override_creds(tw->old_cred);
}
struct mount_entry *entry;
down_read(&mount_list_lock);
list_for_each_entry(entry, &mount_list, list) {
pr_info("%s: unmounting: %s flags 0x%x\n", __func__, entry->umountable, entry->flags);
try_umount(entry->umountable, entry->flags);
}
up_read(&mount_list_lock);
ksu_umount_manager_execute_all(tw->old_cred);
if (saved)
revert_creds(saved);
if (tw->old_cred)
put_cred(tw->old_cred);
kfree(tw);
}
int ksu_handle_umount(uid_t old_uid, uid_t new_uid)
{
struct umount_tw *tw;
// if there isn't any module mounted, just ignore it!
if (!ksu_module_mounted) {
return 0;
}
if (!ksu_kernel_umount_enabled) {
return 0;
}
#ifndef CONFIG_KSU_SUSFS
// There are 5 scenarios:
// 1. Normal app: zygote -> appuid
// 2. Isolated process forked from zygote: zygote -> isolated_process
// 3. App zygote forked from zygote: zygote -> appuid
// 4. Isolated process froked from app zygote: appuid -> isolated_process (already handled by 3)
// 5. Isolated process froked from webview zygote (no need to handle, app cannot run custom code)
if (!is_appuid(new_uid) && !is_isolated_process(new_uid)) {
return 0;
}
if (!ksu_uid_should_umount(new_uid) && !is_isolated_process(new_uid)) {
return 0;
}
// check old process's selinux context, if it is not zygote, ignore it!
// because some su apps may setuid to untrusted_app but they are in global mount namespace
// when we umount for such process, that is a disaster!
// also handle case 4 and 5
bool is_zygote_child = is_zygote(get_current_cred());
if (!is_zygote_child) {
pr_info("handle umount ignore non zygote child: %d\n", current->pid);
return 0;
}
#endif // #ifndef CONFIG_KSU_SUSFS
// umount the target mnt
pr_info("handle umount for uid: %d, pid: %d\n", new_uid, current->pid);
#if __SULOG_GATE
ksu_sulog_report_syscall(new_uid, NULL, "setuid", NULL);
#endif
tw = kzalloc(sizeof(*tw), GFP_ATOMIC);
if (!tw)
return 0;
tw->old_cred = get_current_cred();
tw->cb.func = umount_tw_func;
int err = task_work_add(current, &tw->cb, TWA_RESUME);
if (err) {
if (tw->old_cred) {
put_cred(tw->old_cred);
}
kfree(tw);
pr_warn("unmount add task_work failed\n");
}
return 0;
}
void ksu_kernel_umount_init(void)
{
int rc = 0;
rc = ksu_umount_manager_init();
if (rc) {
pr_err("Failed to initialize umount manager: %d\n", rc);
}
if (ksu_register_feature_handler(&kernel_umount_handler)) {
pr_err("Failed to register kernel_umount feature handler\n");
}
}
void ksu_kernel_umount_exit(void)
{
ksu_umount_manager_exit();
ksu_unregister_feature_handler(KSU_FEATURE_KERNEL_UMOUNT);
}

View File

@@ -1,25 +0,0 @@
#ifndef __KSU_H_KERNEL_UMOUNT
#define __KSU_H_KERNEL_UMOUNT
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rwsem.h>
void ksu_kernel_umount_init(void);
void ksu_kernel_umount_exit(void);
// Handler function to be called from setresuid hook
int ksu_handle_umount(uid_t old_uid, uid_t new_uid);
// for the umount list
struct mount_entry {
char *umountable;
unsigned int flags;
struct list_head list;
};
extern struct list_head mount_list;
extern struct rw_semaphore mount_list_lock;
void try_umount(const char *mnt, int flags);
#endif

View File

@@ -4,6 +4,3 @@ obj-y += super_access.o
ccflags-y += -Wno-implicit-function-declaration -Wno-strict-prototypes -Wno-int-conversion -Wno-gcc-compat ccflags-y += -Wno-implicit-function-declaration -Wno-strict-prototypes -Wno-int-conversion -Wno-gcc-compat
ccflags-y += -Wno-declaration-after-statement -Wno-unused-function ccflags-y += -Wno-declaration-after-statement -Wno-unused-function
ccflags-y += -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
ccflags-y += -I$(objtree)/security/selinux -include $(srctree)/include/uapi/asm-generic/errno.h

View File

@@ -13,7 +13,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <asm/elf.h> #include <asm/elf.h> /* 包含 ARM64 重定位类型定义 */
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/string.h> #include <linux/string.h>
@@ -29,72 +29,74 @@
#include "../allowlist.h" #include "../allowlist.h"
#include "../manager.h" #include "../manager.h"
static int sukisu_is_su_allow_uid(uid_t uid) unsigned long sukisu_compact_find_symbol(const char* name);
{
return ksu_is_allow_uid_for_current(uid) ? 1 : 0; // ======================================================================
// 兼容函数 for KPM
static
int sukisu_is_su_allow_uid(uid_t uid) {
return ksu_is_allow_uid(uid) ? 1 : 0;
} }
static int sukisu_get_ap_mod_exclude(uid_t uid) static
{ int sukisu_get_ap_mod_exclude(uid_t uid) {
return 0; /* Not supported */ // Not supported
return 0;
} }
static int sukisu_is_uid_should_umount(uid_t uid) static
{ int sukisu_is_uid_should_umount(uid_t uid) {
return ksu_uid_should_umount(uid) ? 1 : 0; return ksu_uid_should_umount(uid) ? 1 : 0;
} }
static int sukisu_is_current_uid_manager(void) static
{ int sukisu_is_current_uid_manager() {
return is_manager(); return is_manager();
} }
static uid_t sukisu_get_manager_uid(void) static
{ uid_t sukisu_get_manager_uid() {
return ksu_manager_uid; return ksu_manager_uid;
} }
static void sukisu_set_manager_uid(uid_t uid, int force) // ======================================================================
{
if (force || ksu_manager_uid == -1)
ksu_manager_uid = uid;
}
struct CompactAddressSymbol { struct CompactAddressSymbol {
const char *symbol_name; const char* symbol_name;
void *addr; void* addr;
}; };
unsigned long sukisu_compact_find_symbol(const char *name); static struct CompactAddressSymbol address_symbol [] = {
{ "kallsyms_lookup_name", &kallsyms_lookup_name },
static struct CompactAddressSymbol address_symbol[] = { { "compact_find_symbol", &sukisu_compact_find_symbol },
{ "kallsyms_lookup_name", &kallsyms_lookup_name }, { "is_run_in_sukisu_ultra", (void*)1 },
{ "compact_find_symbol", &sukisu_compact_find_symbol }, { "is_su_allow_uid", &sukisu_is_su_allow_uid },
{ "is_run_in_sukisu_ultra", (void *)1 }, { "get_ap_mod_exclude", &sukisu_get_ap_mod_exclude },
{ "is_su_allow_uid", &sukisu_is_su_allow_uid }, { "is_uid_should_umount", &sukisu_is_uid_should_umount },
{ "get_ap_mod_exclude", &sukisu_get_ap_mod_exclude }, { "is_current_uid_manager", &sukisu_is_current_uid_manager },
{ "is_uid_should_umount", &sukisu_is_uid_should_umount }, { "get_manager_uid", &sukisu_get_manager_uid }
{ "is_current_uid_manager", &sukisu_is_current_uid_manager },
{ "get_manager_uid", &sukisu_get_manager_uid },
{ "sukisu_set_manager_uid", &sukisu_set_manager_uid }
}; };
unsigned long sukisu_compact_find_symbol(const char* name) unsigned long sukisu_compact_find_symbol(const char* name) {
{ int i;
int i; unsigned long addr;
unsigned long addr;
for (i = 0; i < (sizeof(address_symbol) / sizeof(struct CompactAddressSymbol)); i++) { // 先自己在地址表部分查出来
struct CompactAddressSymbol *symbol = &address_symbol[i]; for(i = 0; i < (sizeof(address_symbol) / sizeof(struct CompactAddressSymbol)); i++) {
struct CompactAddressSymbol* symbol = &address_symbol[i];
if(strcmp(name, symbol->symbol_name) == 0) {
return (unsigned long) symbol->addr;
}
}
if (strcmp(name, symbol->symbol_name) == 0) // 通过内核来查
return (unsigned long)symbol->addr; addr = kallsyms_lookup_name(name);
} if(addr) {
return addr;
}
addr = kallsyms_lookup_name(name); return 0;
if (addr)
return addr;
return 0;
} }
EXPORT_SYMBOL(sukisu_compact_find_symbol); EXPORT_SYMBOL(sukisu_compact_find_symbol);

View File

@@ -1,6 +1,6 @@
#ifndef __SUKISU_KPM_COMPACT_H #ifndef ___SUKISU_KPM_COMPACT_H
#define __SUKISU_KPM_COMPACT_H #define ___SUKISU_KPM_COMPACT_H
extern unsigned long sukisu_compact_find_symbol(const char *name); unsigned long sukisu_compact_find_symbol(const char* name);
#endif #endif

View File

@@ -8,11 +8,13 @@
* 集成了 ELF 解析、内存布局、符号处理、重定位(支持 ARM64 重定位类型) * 集成了 ELF 解析、内存布局、符号处理、重定位(支持 ARM64 重定位类型)
* 并参照KernelPatch的标准KPM格式实现加载和控制 * 并参照KernelPatch的标准KPM格式实现加载和控制
*/ */
#include <linux/export.h>
#include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/kernfs.h> #include <linux/kernfs.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/elf.h> #include <linux/elf.h>
@@ -21,263 +23,162 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <asm/elf.h> #include <asm/elf.h> /* 包含 ARM64 重定位类型定义 */
#include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
#include <linux/version.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) && defined(CONFIG_MODULES) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) && defined(CONFIG_MODULES)
#include <linux/moduleloader.h> #include <linux/moduleloader.h> // 需要启用 CONFIG_MODULES
#endif #endif
#include "kpm.h" #include "kpm.h"
#include "compact.h" #include "compact.h"
#include "../kernel_compat.h"
#define KPM_NAME_LEN 32
#define KPM_ARGS_LEN 1024
#ifndef NO_OPTIMIZE #ifndef NO_OPTIMIZE
#if defined(__GNUC__) && !defined(__clang__) #if defined(__GNUC__) && !defined(__clang__)
#define NO_OPTIMIZE __attribute__((optimize("O0"))) #define NO_OPTIMIZE __attribute__((optimize("O0")))
#elif defined(__clang__) #elif defined(__clang__)
#define NO_OPTIMIZE __attribute__((optnone)) #define NO_OPTIMIZE __attribute__((optnone))
#else #else
#define NO_OPTIMIZE #define NO_OPTIMIZE
#endif #endif
#endif #endif
noinline NO_OPTIMIZE void sukisu_kpm_load_module_path(const char *path, // ============================================================================================
const char *args, void *ptr, int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_load_module_path). "
"path=%s args=%s ptr=%p\n", path, args, ptr);
__asm__ volatile("nop"); noinline
NO_OPTIMIZE
void sukisu_kpm_load_module_path(const char* path, const char* args, void* ptr, void __user* result) {
// This is a KPM module stub.
int res = -1;
printk("KPM: Stub function called (sukisu_kpm_load_module_path). path=%s args=%s ptr=%p\n", path, args, ptr);
__asm__ volatile("nop"); // 精确控制循环不被优化
if(copy_to_user(result, &res, sizeof(res)) < 1) printk("KPM: Copy to user failed.");
} }
noinline
NO_OPTIMIZE
void sukisu_kpm_unload_module(const char* name, void* ptr, void __user* result) {
// This is a KPM module stub.
int res = -1;
printk("KPM: Stub function called (sukisu_kpm_unload_module). name=%s ptr=%p\n", name, ptr);
__asm__ volatile("nop"); // 精确控制循环不被优化
if(copy_to_user(result, &res, sizeof(res)) < 1) printk("KPM: Copy to user failed.");
}
noinline
NO_OPTIMIZE
void sukisu_kpm_num(void __user* result) {
// This is a KPM module stub.
int res = 0;
printk("KPM: Stub function called (sukisu_kpm_num).\n");
__asm__ volatile("nop"); // 精确控制循环不被优化
if(copy_to_user(result, &res, sizeof(res)) < 1) printk("KPM: Copy to user failed.");
}
noinline
NO_OPTIMIZE
void sukisu_kpm_info(const char* name, void __user* out, void __user* result) {
// This is a KPM module stub.
int res = -1;
printk("KPM: Stub function called (sukisu_kpm_info). name=%s buffer=%p\n", name, out);
__asm__ volatile("nop"); // 精确控制循环不被优化
if(copy_to_user(result, &res, sizeof(res)) < 1) printk("KPM: Copy to user failed.");
}
noinline
NO_OPTIMIZE
void sukisu_kpm_list(void __user* out, unsigned int bufferSize, void __user* result) {
// This is a KPM module stub.
int res = -1;
printk("KPM: Stub function called (sukisu_kpm_list). buffer=%p size=%d\n", out, bufferSize);
if(copy_to_user(result, &res, sizeof(res)) < 1) printk("KPM: Copy to user failed.");
}
noinline
NO_OPTIMIZE
void sukisu_kpm_control(void __user* name, void __user* args, void __user* result) {
// This is a KPM module stub.
int res = -1;
printk("KPM: Stub function called (sukisu_kpm_control). name=%p args=%p\n", name, args);
__asm__ volatile("nop"); // 精确控制循环不被优化
if(copy_to_user(result, &res, sizeof(res)) < 1) printk("KPM: Copy to user failed.");
}
noinline
NO_OPTIMIZE
void sukisu_kpm_version(void __user* out, unsigned int bufferSize, void __user* result) {
int res = -1;
printk("KPM: Stub function called (sukisu_kpm_version). buffer=%p size=%d\n", out, bufferSize);
if(copy_to_user(result, &res, sizeof(res)) < 1) printk("KPM: Copy to user failed.");
}
EXPORT_SYMBOL(sukisu_kpm_load_module_path); EXPORT_SYMBOL(sukisu_kpm_load_module_path);
noinline NO_OPTIMIZE void sukisu_kpm_unload_module(const char *name,
void *ptr, int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_unload_module). "
"name=%s ptr=%p\n", name, ptr);
__asm__ volatile("nop");
}
EXPORT_SYMBOL(sukisu_kpm_unload_module); EXPORT_SYMBOL(sukisu_kpm_unload_module);
noinline NO_OPTIMIZE void sukisu_kpm_num(int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_num).\n");
__asm__ volatile("nop");
}
EXPORT_SYMBOL(sukisu_kpm_num); EXPORT_SYMBOL(sukisu_kpm_num);
noinline NO_OPTIMIZE void sukisu_kpm_info(const char *name, char *buf, int bufferSize,
int *size)
{
pr_info("kpm: Stub function called (sukisu_kpm_info). "
"name=%s buffer=%p\n", name, buf);
__asm__ volatile("nop");
}
EXPORT_SYMBOL(sukisu_kpm_info); EXPORT_SYMBOL(sukisu_kpm_info);
noinline NO_OPTIMIZE void sukisu_kpm_list(void *out, int bufferSize,
int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_list). "
"buffer=%p size=%d\n", out, bufferSize);
}
EXPORT_SYMBOL(sukisu_kpm_list); EXPORT_SYMBOL(sukisu_kpm_list);
EXPORT_SYMBOL(sukisu_kpm_version);
noinline NO_OPTIMIZE void sukisu_kpm_control(const char *name, const char *args, long arg_len,
int *result)
{
pr_info("kpm: Stub function called (sukisu_kpm_control). "
"name=%p args=%p arg_len=%ld\n", name, args, arg_len);
__asm__ volatile("nop");
}
EXPORT_SYMBOL(sukisu_kpm_control); EXPORT_SYMBOL(sukisu_kpm_control);
noinline NO_OPTIMIZE void sukisu_kpm_version(char *buf, int bufferSize) noinline
int sukisu_handle_kpm(unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5)
{ {
pr_info("kpm: Stub function called (sukisu_kpm_version). " if(arg2 == SUKISU_KPM_LOAD) {
"buffer=%p\n", buf); char kernel_load_path[256] = { 0 };
char kernel_args_buffer[256] = { 0 };
if(arg3 == 0) {
return -1;
}
strncpy_from_user((char*)&kernel_load_path, (const char __user *)arg3, 255);
if(arg4 != 0) {
strncpy_from_user((char*)&kernel_args_buffer, (const char __user *)arg4, 255);
}
sukisu_kpm_load_module_path((const char*)&kernel_load_path, (const char*) &kernel_args_buffer, NULL, (void __user*) arg5);
} else if(arg2 == SUKISU_KPM_UNLOAD) {
char kernel_name_buffer[256] = { 0 };
if(arg3 == 0) {
return -1;
}
strncpy_from_user((char*)&kernel_name_buffer, (const char __user *)arg3, 255);
sukisu_kpm_unload_module((const char*) &kernel_name_buffer, NULL, (void __user*) arg5);
} else if(arg2 == SUKISU_KPM_NUM) {
sukisu_kpm_num((void __user*) arg5);
} else if(arg2 == SUKISU_KPM_INFO) {
char kernel_name_buffer[256] = { 0 };
if(arg3 == 0 || arg4 == 0) {
return -1;
}
strncpy_from_user((char*)&kernel_name_buffer, (const char __user *)arg3, 255);
sukisu_kpm_info((const char*) &kernel_name_buffer, (char __user*) arg4, (void __user*) arg5);
} else if(arg2 == SUKISU_KPM_LIST) {
sukisu_kpm_list((char __user*) arg3, (unsigned int) arg4, (void __user*) arg5);
} else if(arg2 == SUKISU_KPM_VERSION) {
sukisu_kpm_version((char __user*) arg3, (unsigned int) arg4, (void __user*) arg5);
} else if(arg2 == SUKISU_KPM_CONTROL) {
sukisu_kpm_control((char __user*) arg3, (char __user*) arg4, (void __user*) arg5);
}
return 0;
} }
EXPORT_SYMBOL(sukisu_kpm_version);
noinline int sukisu_handle_kpm(unsigned long control_code, unsigned long arg1, unsigned long arg2, int sukisu_is_kpm_control_code(unsigned long arg2) {
unsigned long result_code) return (arg2 >= CMD_KPM_CONTROL && arg2 <= CMD_KPM_CONTROL_MAX) ? 1 : 0;
{
int res = -1;
if (control_code == SUKISU_KPM_LOAD) {
char kernel_load_path[256];
char kernel_args_buffer[256];
if (arg1 == 0) {
res = -EINVAL;
goto exit;
}
if (!ksu_access_ok(arg1, sizeof(kernel_load_path))) {
goto invalid_arg;
}
strncpy_from_user((char *)&kernel_load_path, (const char *)arg1, sizeof(kernel_load_path));
if (arg2 != 0) {
if (!ksu_access_ok(arg2, sizeof(kernel_args_buffer))) {
goto invalid_arg;
}
strncpy_from_user((char *)&kernel_args_buffer, (const char *)arg2, sizeof(kernel_args_buffer));
}
sukisu_kpm_load_module_path((const char *)&kernel_load_path,
(const char *)&kernel_args_buffer, NULL, &res);
} else if (control_code == SUKISU_KPM_UNLOAD) {
char kernel_name_buffer[256];
if (arg1 == 0) {
res = -EINVAL;
goto exit;
}
if (!ksu_access_ok(arg1, sizeof(kernel_name_buffer))) {
goto invalid_arg;
}
strncpy_from_user((char *)&kernel_name_buffer, (const char *)arg1, sizeof(kernel_name_buffer));
sukisu_kpm_unload_module((const char *)&kernel_name_buffer, NULL, &res);
} else if (control_code == SUKISU_KPM_NUM) {
sukisu_kpm_num(&res);
} else if (control_code == SUKISU_KPM_INFO) {
char kernel_name_buffer[256];
char buf[256];
int size;
if (arg1 == 0 || arg2 == 0) {
res = -EINVAL;
goto exit;
}
if (!ksu_access_ok(arg1, sizeof(kernel_name_buffer))) {
goto invalid_arg;
}
strncpy_from_user((char *)&kernel_name_buffer, (const char __user *)arg1, sizeof(kernel_name_buffer));
sukisu_kpm_info((const char *)&kernel_name_buffer, (char *)&buf, sizeof(buf), &size);
if (!ksu_access_ok(arg2, size)) {
goto invalid_arg;
}
res = copy_to_user(arg2, &buf, size);
} else if (control_code == SUKISU_KPM_LIST) {
char buf[1024];
int len = (int) arg2;
if (len <= 0) {
res = -EINVAL;
goto exit;
}
if (!ksu_access_ok(arg2, len)) {
goto invalid_arg;
}
sukisu_kpm_list((char *)&buf, sizeof(buf), &res);
if (res > len) {
res = -ENOBUFS;
goto exit;
}
if (copy_to_user(arg1, &buf, len) != 0)
pr_info("kpm: Copy to user failed.");
} else if (control_code == SUKISU_KPM_CONTROL) {
char kpm_name[KPM_NAME_LEN] = { 0 };
char kpm_args[KPM_ARGS_LEN] = { 0 };
if (!ksu_access_ok(arg1, sizeof(kpm_name))) {
goto invalid_arg;
}
if (!ksu_access_ok(arg2, sizeof(kpm_args))) {
goto invalid_arg;
}
long name_len = strncpy_from_user((char *)&kpm_name, (const char __user *)arg1, sizeof(kpm_name));
if (name_len <= 0) {
res = -EINVAL;
goto exit;
}
long arg_len = strncpy_from_user((char *)&kpm_args, (const char __user *)arg2, sizeof(kpm_args));
sukisu_kpm_control((const char *)&kpm_name, (const char *)&kpm_args, arg_len, &res);
} else if (control_code == SUKISU_KPM_VERSION) {
char buffer[256] = {0};
sukisu_kpm_version((char*) &buffer, sizeof(buffer));
unsigned int outlen = (unsigned int) arg2;
int len = strlen(buffer);
if (len >= outlen) len = outlen - 1;
res = copy_to_user(arg1, &buffer, len + 1);
}
exit:
if (copy_to_user(result_code, &res, sizeof(res)) != 0)
pr_info("kpm: Copy to user failed.");
return 0;
invalid_arg:
pr_err("kpm: invalid pointer detected! arg1: %px arg2: %px\n", (void *)arg1, (void *)arg2);
res = -EFAULT;
goto exit;
} }
EXPORT_SYMBOL(sukisu_handle_kpm); EXPORT_SYMBOL(sukisu_handle_kpm);
int sukisu_is_kpm_control_code(unsigned long control_code) {
return (control_code >= CMD_KPM_CONTROL &&
control_code <= CMD_KPM_CONTROL_MAX) ? 1 : 0;
}
int do_kpm(void __user *arg)
{
struct ksu_kpm_cmd cmd;
if (copy_from_user(&cmd, arg, sizeof(cmd))) {
pr_err("kpm: copy_from_user failed\n");
return -EFAULT;
}
if (!ksu_access_ok(cmd.control_code, sizeof(int))) {
pr_err("kpm: invalid control_code pointer %px\n", (void *)cmd.control_code);
return -EFAULT;
}
if (!ksu_access_ok(cmd.result_code, sizeof(int))) {
pr_err("kpm: invalid result_code pointer %px\n", (void *)cmd.result_code);
return -EFAULT;
}
return sukisu_handle_kpm(cmd.control_code, cmd.arg1, cmd.arg2, cmd.result_code);
}

View File

@@ -1,70 +1,44 @@
#ifndef __SUKISU_KPM_H #ifndef ___SUKISU_KPM_H
#define __SUKISU_KPM_H #define ___SUKISU_KPM_H
#include <linux/types.h> int sukisu_handle_kpm(unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5);
#include <linux/ioctl.h> int sukisu_is_kpm_control_code(unsigned long arg2);
struct ksu_kpm_cmd { // KPM控制代码
__aligned_u64 __user control_code; #define CMD_KPM_CONTROL 28
__aligned_u64 __user arg1; #define CMD_KPM_CONTROL_MAX 35
__aligned_u64 __user arg2;
__aligned_u64 __user result_code;
};
int sukisu_handle_kpm(unsigned long control_code, unsigned long arg1, unsigned long arg2, unsigned long result_code); // 控制代码
int sukisu_is_kpm_control_code(unsigned long control_code);
int do_kpm(void __user *arg);
#define KSU_IOCTL_KPM _IOC(_IOC_READ|_IOC_WRITE, 'K', 200, 0) // prctl(xxx, 28, "PATH", "ARGS")
// success return 0, error return -N
#define SUKISU_KPM_LOAD 28
/* KPM Control Code */ // prctl(xxx, 29, "NAME")
#define CMD_KPM_CONTROL 1 // success return 0, error return -N
#define CMD_KPM_CONTROL_MAX 10 #define SUKISU_KPM_UNLOAD 29
/* Control Code */ // num = prctl(xxx, 30)
/* // error return -N
* prctl(xxx, 1, "PATH", "ARGS") // success return +num or 0
* success return 0, error return -N #define SUKISU_KPM_NUM 30
*/
#define SUKISU_KPM_LOAD 1
/* // prctl(xxx, 31, Buffer, BufferSize)
* prctl(xxx, 2, "NAME") // success return +out, error return -N
* success return 0, error return -N #define SUKISU_KPM_LIST 31
*/
#define SUKISU_KPM_UNLOAD 2
/* // prctl(xxx, 32, "NAME", Buffer[256])
* num = prctl(xxx, 3) // success return +out, error return -N
* error return -N #define SUKISU_KPM_INFO 32
* success return +num or 0
*/
#define SUKISU_KPM_NUM 3
/* // prctl(xxx, 33, "NAME", "ARGS")
* prctl(xxx, 4, Buffer, BufferSize) // success return KPM's result value
* success return +out, error return -N // error return -N
*/ #define SUKISU_KPM_CONTROL 33
#define SUKISU_KPM_LIST 4
/* // prctl(xxx, 34, buffer, bufferSize)
* prctl(xxx, 5, "NAME", Buffer[256]) // success return KPM's result value
* success return +out, error return -N // error return -N
*/ #define SUKISU_KPM_VERSION 34
#define SUKISU_KPM_INFO 5
/*
* prctl(xxx, 6, "NAME", "ARGS")
* success return KPM's result value
* error return -N
*/
#define SUKISU_KPM_CONTROL 6
/*
* prctl(xxx, 7, buffer, bufferSize)
* success return KPM's result value
* error return -N
*/
#define SUKISU_KPM_VERSION 7
#endif #endif

View File

@@ -13,7 +13,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <asm/elf.h> #include <asm/elf.h> /* 包含 ARM64 重定位类型定义 */
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/string.h> #include <linux/string.h>
@@ -24,259 +24,278 @@
#include <linux/version.h> #include <linux/version.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/mount.h>
#include <linux/kprobes.h>
#include <linux/mm_types.h>
#include <linux/netlink.h>
#include <linux/sched.h>
#include <../fs/mount.h>
#include "kpm.h" #include "kpm.h"
#include "compact.h" #include "compact.h"
#include <linux/types.h>
#include <linux/stddef.h>
// 结构体成员元数据
struct DynamicStructMember { struct DynamicStructMember {
const char *name; const char* name;
size_t size; size_t size;
size_t offset; size_t offset;
}; };
// 结构体元数据(包含总大小)
struct DynamicStructInfo { struct DynamicStructInfo {
const char *name; const char* name;
size_t count; size_t count;
size_t total_size; size_t total_size;
struct DynamicStructMember *members; struct DynamicStructMember* members;
}; };
// 定义结构体元数据的宏(直接使用 struct 名称)
#define DYNAMIC_STRUCT_BEGIN(struct_name) \ #define DYNAMIC_STRUCT_BEGIN(struct_name) \
static struct DynamicStructMember struct_name##_members[] = { static struct DynamicStructMember struct_name##_members[] = {
#define DEFINE_MEMBER(struct_name, member) \ #define DEFINE_MEMBER(struct_name, member) \
{ \ { \
.name = #member, \ .name = #member, \
.size = sizeof(((struct struct_name *)0)->member), \ .size = sizeof(((struct struct_name*)0)->member), \
.offset = offsetof(struct struct_name, member) \ .offset = offsetof(struct struct_name, member) \
}, },
#define DYNAMIC_STRUCT_END(struct_name) \ #define DYNAMIC_STRUCT_END(struct_name) \
}; \ }; \
static struct DynamicStructInfo struct_name##_info = { \ static struct DynamicStructInfo struct_name##_info = { \
.name = #struct_name, \ .name = #struct_name, \
.count = sizeof(struct_name##_members) / sizeof(struct DynamicStructMember), \ .count = sizeof(struct_name##_members) / sizeof(struct DynamicStructMember), \
.total_size = sizeof(struct struct_name), \ .total_size = sizeof(struct struct_name), \
.members = struct_name##_members \ .members = struct_name##_members \
}; };
// ==================================================================================
#include <linux/version.h>
#define KERNEL_VERSION_6_1 KERNEL_VERSION(6, 1, 0)
#define KERNEL_VERSION_5_15 KERNEL_VERSION(5, 15, 0)
#define KERNEL_VERSION_6_12 KERNEL_VERSION(6, 12, 0)
#define KERNEL_VERSION_4_10 KERNEL_VERSION(4, 10, 0)
#include <../fs/mount.h>
#include <linux/mount.h>
// 定义元数据
DYNAMIC_STRUCT_BEGIN(mount) DYNAMIC_STRUCT_BEGIN(mount)
DEFINE_MEMBER(mount, mnt_parent) DEFINE_MEMBER(mount, mnt_parent)
DEFINE_MEMBER(mount, mnt) DEFINE_MEMBER(mount, mnt)
DEFINE_MEMBER(mount, mnt_id) DEFINE_MEMBER(mount, mnt_id)
DEFINE_MEMBER(mount, mnt_group_id) DEFINE_MEMBER(mount, mnt_group_id)
DEFINE_MEMBER(mount, mnt_expiry_mark) DEFINE_MEMBER(mount, mnt_expiry_mark)
DEFINE_MEMBER(mount, mnt_master) DEFINE_MEMBER(mount, mnt_master)
DEFINE_MEMBER(mount, mnt_devname) DEFINE_MEMBER(mount, mnt_devname)
DYNAMIC_STRUCT_END(mount) DYNAMIC_STRUCT_END(mount)
DYNAMIC_STRUCT_BEGIN(vfsmount) DYNAMIC_STRUCT_BEGIN(vfsmount)
DEFINE_MEMBER(vfsmount, mnt_root) DEFINE_MEMBER(vfsmount, mnt_root)
DEFINE_MEMBER(vfsmount, mnt_sb) DEFINE_MEMBER(vfsmount, mnt_sb)
DEFINE_MEMBER(vfsmount, mnt_flags) DEFINE_MEMBER(vfsmount, mnt_flags)
DYNAMIC_STRUCT_END(vfsmount) DYNAMIC_STRUCT_END(vfsmount)
DYNAMIC_STRUCT_BEGIN(mnt_namespace) DYNAMIC_STRUCT_BEGIN(mnt_namespace)
DEFINE_MEMBER(mnt_namespace, ns) DEFINE_MEMBER(mnt_namespace, ns)
DEFINE_MEMBER(mnt_namespace, root) DEFINE_MEMBER(mnt_namespace, root)
DEFINE_MEMBER(mnt_namespace, seq) DEFINE_MEMBER(mnt_namespace, seq)
DEFINE_MEMBER(mnt_namespace, mounts) DEFINE_MEMBER(mnt_namespace, mounts)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION_5_15
DEFINE_MEMBER(mnt_namespace, count) DEFINE_MEMBER(mnt_namespace, count)
#endif #endif
DYNAMIC_STRUCT_END(mnt_namespace) DYNAMIC_STRUCT_END(mnt_namespace)
#include <linux/kprobes.h>
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
DYNAMIC_STRUCT_BEGIN(kprobe) DYNAMIC_STRUCT_BEGIN(kprobe)
DEFINE_MEMBER(kprobe, addr) DEFINE_MEMBER(kprobe, addr)
DEFINE_MEMBER(kprobe, symbol_name) DEFINE_MEMBER(kprobe, symbol_name)
DEFINE_MEMBER(kprobe, offset) DEFINE_MEMBER(kprobe, offset)
DEFINE_MEMBER(kprobe, pre_handler) DEFINE_MEMBER(kprobe, pre_handler)
DEFINE_MEMBER(kprobe, post_handler) DEFINE_MEMBER(kprobe, post_handler)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION_5_15
DEFINE_MEMBER(kprobe, fault_handler) DEFINE_MEMBER(kprobe, fault_handler)
#endif #endif
DEFINE_MEMBER(kprobe, flags) DEFINE_MEMBER(kprobe, flags)
DYNAMIC_STRUCT_END(kprobe) DYNAMIC_STRUCT_END(kprobe)
#endif #endif
#include <linux/mm.h>
#include <linux/mm_types.h>
DYNAMIC_STRUCT_BEGIN(vm_area_struct) DYNAMIC_STRUCT_BEGIN(vm_area_struct)
DEFINE_MEMBER(vm_area_struct,vm_start) DEFINE_MEMBER(vm_area_struct,vm_start)
DEFINE_MEMBER(vm_area_struct,vm_end) DEFINE_MEMBER(vm_area_struct,vm_end)
DEFINE_MEMBER(vm_area_struct,vm_flags) DEFINE_MEMBER(vm_area_struct,vm_flags)
DEFINE_MEMBER(vm_area_struct,anon_vma) DEFINE_MEMBER(vm_area_struct,anon_vma)
DEFINE_MEMBER(vm_area_struct,vm_pgoff) DEFINE_MEMBER(vm_area_struct,vm_pgoff)
DEFINE_MEMBER(vm_area_struct,vm_file) DEFINE_MEMBER(vm_area_struct,vm_file)
DEFINE_MEMBER(vm_area_struct,vm_private_data) DEFINE_MEMBER(vm_area_struct,vm_private_data)
#ifdef CONFIG_ANON_VMA_NAME #ifdef CONFIG_ANON_VMA_NAME
DEFINE_MEMBER(vm_area_struct, anon_name) DEFINE_MEMBER(vm_area_struct, anon_name)
#endif #endif
DEFINE_MEMBER(vm_area_struct, vm_ops) DEFINE_MEMBER(vm_area_struct, vm_ops)
DYNAMIC_STRUCT_END(vm_area_struct) DYNAMIC_STRUCT_END(vm_area_struct)
DYNAMIC_STRUCT_BEGIN(vm_operations_struct) DYNAMIC_STRUCT_BEGIN(vm_operations_struct)
DEFINE_MEMBER(vm_operations_struct, open) DEFINE_MEMBER(vm_operations_struct, open)
DEFINE_MEMBER(vm_operations_struct, close) DEFINE_MEMBER(vm_operations_struct, close)
DEFINE_MEMBER(vm_operations_struct, name) DEFINE_MEMBER(vm_operations_struct, name)
DEFINE_MEMBER(vm_operations_struct, access) DEFINE_MEMBER(vm_operations_struct, access)
DYNAMIC_STRUCT_END(vm_operations_struct) DYNAMIC_STRUCT_END(vm_operations_struct)
#include <linux/netlink.h>
DYNAMIC_STRUCT_BEGIN(netlink_kernel_cfg) DYNAMIC_STRUCT_BEGIN(netlink_kernel_cfg)
DEFINE_MEMBER(netlink_kernel_cfg, groups) DEFINE_MEMBER(netlink_kernel_cfg, groups)
DEFINE_MEMBER(netlink_kernel_cfg, flags) DEFINE_MEMBER(netlink_kernel_cfg, flags)
DEFINE_MEMBER(netlink_kernel_cfg, input) DEFINE_MEMBER(netlink_kernel_cfg, input)
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 11, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION_6_12
DEFINE_MEMBER(netlink_kernel_cfg, cb_mutex) DEFINE_MEMBER(netlink_kernel_cfg, cb_mutex)
#endif #endif
DEFINE_MEMBER(netlink_kernel_cfg, bind) DEFINE_MEMBER(netlink_kernel_cfg, bind)
DEFINE_MEMBER(netlink_kernel_cfg, unbind) DEFINE_MEMBER(netlink_kernel_cfg, unbind)
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION_6_1
DEFINE_MEMBER(netlink_kernel_cfg, compare) DEFINE_MEMBER(netlink_kernel_cfg, compare)
#endif #endif
DYNAMIC_STRUCT_END(netlink_kernel_cfg) DYNAMIC_STRUCT_END(netlink_kernel_cfg)
#include <linux/sched.h>
DYNAMIC_STRUCT_BEGIN(task_struct) DYNAMIC_STRUCT_BEGIN(task_struct)
DEFINE_MEMBER(task_struct, pid) DEFINE_MEMBER(task_struct, pid)
DEFINE_MEMBER(task_struct, tgid) DEFINE_MEMBER(task_struct, tgid)
DEFINE_MEMBER(task_struct, cred) DEFINE_MEMBER(task_struct, cred)
DEFINE_MEMBER(task_struct, real_cred) DEFINE_MEMBER(task_struct, real_cred)
DEFINE_MEMBER(task_struct, comm) DEFINE_MEMBER(task_struct, comm)
DEFINE_MEMBER(task_struct, parent) DEFINE_MEMBER(task_struct, parent)
DEFINE_MEMBER(task_struct, group_leader) DEFINE_MEMBER(task_struct, group_leader)
DEFINE_MEMBER(task_struct, mm) DEFINE_MEMBER(task_struct, mm)
DEFINE_MEMBER(task_struct, active_mm) DEFINE_MEMBER(task_struct, active_mm)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
DEFINE_MEMBER(task_struct, pids[PIDTYPE_PID].pid) DEFINE_MEMBER(task_struct, pids[PIDTYPE_PID].pid)
#else #else
DEFINE_MEMBER(task_struct, thread_pid) DEFINE_MEMBER(task_struct, thread_pid)
#endif #endif
DEFINE_MEMBER(task_struct, files) DEFINE_MEMBER(task_struct, files)
DEFINE_MEMBER(task_struct, seccomp) DEFINE_MEMBER(task_struct, seccomp)
#ifdef CONFIG_THREAD_INFO_IN_TASK #ifdef CONFIG_THREAD_INFO_IN_TASK
DEFINE_MEMBER(task_struct, thread_info) DEFINE_MEMBER(task_struct, thread_info)
#endif #endif
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
DEFINE_MEMBER(task_struct, cgroups) DEFINE_MEMBER(task_struct, cgroups)
#endif #endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) #if LINUX_VERSION_CODE > KERNEL_VERSION_4_10
#ifdef CONFIG_SECURITY #ifdef CONFIG_SECURITY
DEFINE_MEMBER(task_struct, security) DEFINE_MEMBER(task_struct, security)
#endif #endif
#endif #endif
DEFINE_MEMBER(task_struct, thread) DEFINE_MEMBER(task_struct, thread)
DYNAMIC_STRUCT_END(task_struct) DYNAMIC_STRUCT_END(task_struct)
// =====================================================================================================================
#define STRUCT_INFO(name) &(name##_info) #define STRUCT_INFO(name) &(name##_info)
static struct DynamicStructInfo *dynamic_struct_infos[] = { static
STRUCT_INFO(mount), struct DynamicStructInfo* dynamic_struct_infos[] = {
STRUCT_INFO(vfsmount), STRUCT_INFO(mount),
STRUCT_INFO(mnt_namespace), STRUCT_INFO(vfsmount),
#ifdef CONFIG_KPROBES STRUCT_INFO(mnt_namespace),
STRUCT_INFO(kprobe), #ifdef CONFIG_KPROBES
#endif STRUCT_INFO(kprobe),
STRUCT_INFO(vm_area_struct), #endif
STRUCT_INFO(vm_operations_struct), STRUCT_INFO(vm_area_struct),
STRUCT_INFO(netlink_kernel_cfg), STRUCT_INFO(vm_operations_struct),
STRUCT_INFO(task_struct) STRUCT_INFO(netlink_kernel_cfg),
STRUCT_INFO(task_struct)
}; };
/* // return 0 if successful
* return 0 if successful // return -1 if struct not defined
* return -1 if struct not defined int sukisu_super_find_struct(
*/ const char* struct_name,
int sukisu_super_find_struct(const char *struct_name, size_t *out_size, int *out_members) size_t* out_size,
{ int* out_members
for (size_t i = 0; i < (sizeof(dynamic_struct_infos) / sizeof(dynamic_struct_infos[0])); i++) { ) {
struct DynamicStructInfo *info = dynamic_struct_infos[i]; size_t i;
for(i = 0; i < (sizeof(dynamic_struct_infos) / sizeof(dynamic_struct_infos[0])); i++) {
if (strcmp(struct_name, info->name) == 0) { struct DynamicStructInfo* info = dynamic_struct_infos[i];
if (out_size) if(strcmp(struct_name, info->name) == 0) {
*out_size = info->total_size; if(out_size)
*out_size = info->total_size;
if (out_members) if(out_members)
*out_members = info->count; *out_members = info->count;
return 0;
return 0; }
} }
} return -1;
return -1;
} }
EXPORT_SYMBOL(sukisu_super_find_struct); EXPORT_SYMBOL(sukisu_super_find_struct);
/* // Dynamic access struct
* Dynamic access struct // return 0 if successful
* return 0 if successful // return -1 if struct not defined
* return -1 if struct not defined // return -2 if member not defined
* return -2 if member not defined int sukisu_super_access (
*/ const char* struct_name,
int sukisu_super_access(const char *struct_name, const char *member_name, size_t *out_offset, const char* member_name,
size_t *out_size) size_t* out_offset,
{ size_t* out_size
for (size_t i = 0; i < (sizeof(dynamic_struct_infos) / sizeof(dynamic_struct_infos[0])); i++) { ) {
struct DynamicStructInfo *info = dynamic_struct_infos[i]; size_t i;
for(i = 0; i < (sizeof(dynamic_struct_infos) / sizeof(dynamic_struct_infos[0])); i++) {
if (strcmp(struct_name, info->name) == 0) { struct DynamicStructInfo* info = dynamic_struct_infos[i];
for (size_t i1 = 0; i1 < info->count; i1++) { if(strcmp(struct_name, info->name) == 0) {
if (strcmp(info->members[i1].name, member_name) == 0) { size_t i1;
if (out_offset) for (i1 = 0; i1 < info->count; i1++) {
*out_offset = info->members[i].offset; if (strcmp(info->members[i1].name, member_name) == 0) {
if(out_offset)
if (out_size) *out_offset = info->members[i].offset;
*out_size = info->members[i].size; if(out_size)
*out_size = info->members[i].size;
return 0; return 0;
} }
} }
return -2;
return -2; }
} }
} return -1;
return -1;
} }
EXPORT_SYMBOL(sukisu_super_access); EXPORT_SYMBOL(sukisu_super_access);
// 动态 container_of 宏
#define DYNAMIC_CONTAINER_OF(offset, member_ptr) ({ \ #define DYNAMIC_CONTAINER_OF(offset, member_ptr) ({ \
(offset != (size_t)-1) ? (void*)((char*)(member_ptr) - offset) : NULL; \ (offset != (size_t)-1) ? (void*)((char*)(member_ptr) - offset) : NULL; \
}) })
/* // Dynamic container_of
* Dynamic container_of // return 0 if success
* return 0 if success // return -1 if current struct not defined
* return -1 if current struct not defined // return -2 if target member not defined
* return -2 if target member not defined int sukisu_super_container_of(
*/ const char* struct_name,
int sukisu_super_container_of(const char *struct_name, const char *member_name, void *ptr, const char* member_name,
void **out_ptr) void* ptr,
{ void** out_ptr
if (ptr == NULL) ) {
return -3; if(ptr == NULL) {
return -3;
for (size_t i = 0; i < (sizeof(dynamic_struct_infos) / sizeof(dynamic_struct_infos[0])); i++) { }
struct DynamicStructInfo *info = dynamic_struct_infos[i]; size_t i;
for(i = 0; i < (sizeof(dynamic_struct_infos) / sizeof(dynamic_struct_infos[0])); i++) {
if (strcmp(struct_name, info->name) == 0) { struct DynamicStructInfo* info = dynamic_struct_infos[i];
for (size_t i1 = 0; i1 < info->count; i1++) { if(strcmp(struct_name, info->name) == 0) {
if (strcmp(info->members[i1].name, member_name) == 0) { size_t i1;
*out_ptr = (void *)DYNAMIC_CONTAINER_OF(info->members[i1].offset, ptr); for (i1 = 0; i1 < info->count; i1++) {
if (strcmp(info->members[i1].name, member_name) == 0) {
return 0; *out_ptr = (void*) DYNAMIC_CONTAINER_OF(info->members[i1].offset, ptr);
} return 0;
} }
}
return -2; return -2;
} }
} }
return -1;
return -1;
} }
EXPORT_SYMBOL(sukisu_super_container_of); EXPORT_SYMBOL(sukisu_super_container_of);

View File

@@ -6,10 +6,34 @@
#include "kpm.h" #include "kpm.h"
#include "compact.h" #include "compact.h"
extern int sukisu_super_find_struct(const char *struct_name, size_t *out_size, int *out_members); // return 0 if successful
extern int sukisu_super_access(const char *struct_name, const char *member_name, size_t *out_offset, // return -1 if struct not defined
size_t *out_size); int sukisu_super_find_struct(
extern int sukisu_super_container_of(const char *struct_name, const char *member_name, void *ptr, const char* struct_name,
void **out_ptr); size_t* out_size,
int* out_members
);
// Dynamic access struct
// return 0 if successful
// return -1 if struct not defined
// return -2 if member not defined
int sukisu_super_access (
const char* struct_name,
const char* member_name,
size_t* out_offset,
size_t* out_size
);
// Dynamic container_of
// return 0 if success
// return -1 if current struct not defined
// return -2 if target member not defined
int sukisu_super_container_of(
const char* struct_name,
const char* member_name,
void* ptr,
void** out_ptr
);
#endif #endif

View File

@@ -3,28 +3,35 @@
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <generated/utsrelease.h>
#include <generated/compile.h> #include "allowlist.h"
#include <linux/version.h> /* LINUX_VERSION_CODE, KERNEL_VERSION macros */ #include "arch.h"
#include "core_hook.h"
#include "klog.h" // IWYU pragma: keep
#include "ksu.h"
#include "throne_tracker.h"
#ifdef CONFIG_KSU_SUSFS #ifdef CONFIG_KSU_SUSFS
#include <linux/susfs.h> #include <linux/susfs.h>
#endif #endif
#include "allowlist.h" #ifdef CONFIG_KSU_CMDLINE
#include "ksu.h" #include <linux/init.h>
#include "feature.h"
#include "klog.h" // IWYU pragma: keep
#include "throne_tracker.h"
#ifndef CONFIG_KSU_SUSFS
#include "syscall_hook_manager.h"
#endif
#include "ksud.h"
#include "supercalls.h"
#include "sulog.h" // use get_ksu_state()!
#include "throne_comm.h" unsigned int enable_kernelsu = 1; // enabled by default
#include "dynamic_manager.h" static int __init read_kernelsu_state(char *s)
{
if (s)
enable_kernelsu = simple_strtoul(s, NULL, 0);
return 1;
}
__setup("kernelsu.enabled=", read_kernelsu_state);
bool get_ksu_state(void) { return enable_kernelsu >= 1; }
#else
bool get_ksu_state(void) { return true; }
#endif /* CONFIG_KSU_CMDLINE */
static struct workqueue_struct *ksu_workqueue; static struct workqueue_struct *ksu_workqueue;
@@ -33,44 +40,56 @@ bool ksu_queue_work(struct work_struct *work)
return queue_work(ksu_workqueue, work); return queue_work(ksu_workqueue, work);
} }
void sukisu_custom_config_init(void) extern int ksu_handle_execveat_sucompat(int *fd, struct filename **filename_ptr,
void *argv, void *envp, int *flags);
extern int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
void *argv, void *envp, int *flags);
int ksu_handle_execveat(int *fd, struct filename **filename_ptr, void *argv,
void *envp, int *flags)
{ {
ksu_handle_execveat_ksud(fd, filename_ptr, argv, envp, flags);
return ksu_handle_execveat_sucompat(fd, filename_ptr, argv, envp,
flags);
} }
void sukisu_custom_config_exit(void) extern void ksu_sucompat_init(void);
{ extern void ksu_sucompat_exit(void);
ksu_uid_exit(); extern void ksu_ksud_init(void);
ksu_throne_comm_exit(); extern void ksu_ksud_exit(void);
ksu_dynamic_manager_exit(); #ifdef CONFIG_KSU_TRACEPOINT_HOOK
#if __SULOG_GATE extern void ksu_trace_register();
ksu_sulog_exit(); extern void ksu_trace_unregister();
#endif #endif
}
int __init kernelsu_init(void) int __init kernelsu_init(void)
{ {
pr_info("Initialized on: %s (%s) with driver version: %u\n", pr_info("kernelsu.enabled=%d\n",
UTS_RELEASE, UTS_MACHINE, KSU_VERSION); (int)get_ksu_state());
#ifdef CONFIG_KSU_CMDLINE
if (!get_ksu_state()) {
pr_info_once("drivers is disabled.");
return 0;
}
#endif
#ifdef CONFIG_KSU_DEBUG #ifdef CONFIG_KSU_DEBUG
pr_alert("*************************************************************"); pr_alert("*************************************************************");
pr_alert("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **"); pr_alert("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **");
pr_alert("** **"); pr_alert("** **");
pr_alert("** You are running KernelSU in DEBUG mode **"); pr_alert("** You are running KernelSU in DEBUG mode **");
pr_alert("** **"); pr_alert("** **");
pr_alert("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **"); pr_alert("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **");
pr_alert("*************************************************************"); pr_alert("*************************************************************");
#endif #endif
ksu_feature_init(); #ifdef CONFIG_KSU_SUSFS
susfs_init();
#endif
ksu_lsm_hook_init(); ksu_core_init();
ksu_supercalls_init();
sukisu_custom_config_init();
ksu_syscall_hook_manager_init();
ksu_workqueue = alloc_ordered_workqueue("kernelsu_work_queue", 0); ksu_workqueue = alloc_ordered_workqueue("kernelsu_work_queue", 0);
@@ -78,12 +97,16 @@ int __init kernelsu_init(void)
ksu_throne_tracker_init(); ksu_throne_tracker_init();
#ifdef CONFIG_KSU_SUSFS ksu_sucompat_init();
susfs_init();
#ifdef CONFIG_KSU_KPROBES_HOOK
ksu_ksud_init();
#else
pr_debug("init ksu driver\n");
#endif #endif
#if defined(CONFIG_KPROBES) && !defined(CONFIG_KSU_SUSFS) #ifdef CONFIG_KSU_TRACEPOINT_HOOK
ksu_ksud_init(); ksu_trace_register();
#endif #endif
#ifdef MODULE #ifdef MODULE
@@ -94,28 +117,30 @@ int __init kernelsu_init(void)
return 0; return 0;
} }
extern void ksu_observer_exit(void);
void kernelsu_exit(void) void kernelsu_exit(void)
{ {
#ifdef CONFIG_KSU_CMDLINE
if (!get_ksu_state()) {
return;
}
#endif
ksu_allowlist_exit(); ksu_allowlist_exit();
ksu_observer_exit();
ksu_throne_tracker_exit(); ksu_throne_tracker_exit();
destroy_workqueue(ksu_workqueue); destroy_workqueue(ksu_workqueue);
#if defined(CONFIG_KPROBES) && !defined(CONFIG_KSU_SUSFS) #ifdef CONFIG_KSU_KPROBES_HOOK
ksu_ksud_exit(); ksu_ksud_exit();
#endif #endif
ksu_syscall_hook_manager_exit(); #ifdef CONFIG_KSU_TRACEPOINT_HOOK
ksu_trace_unregister();
#endif
sukisu_custom_config_exit(); ksu_sucompat_exit();
ksu_supercalls_exit(); ksu_core_exit();
ksu_feature_exit();
} }
module_init(kernelsu_init); module_init(kernelsu_init);
@@ -125,10 +150,7 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("weishu"); MODULE_AUTHOR("weishu");
MODULE_DESCRIPTION("Android KernelSU"); MODULE_DESCRIPTION("Android KernelSU");
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)
MODULE_IMPORT_NS("VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver");
#else
MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver); MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
#endif #endif
#endif

View File

@@ -7,12 +7,42 @@
#define KERNEL_SU_VERSION KSU_VERSION #define KERNEL_SU_VERSION KSU_VERSION
#define KERNEL_SU_OPTION 0xDEADBEEF #define KERNEL_SU_OPTION 0xDEADBEEF
extern bool ksu_uid_scanner_enabled; #define CMD_GRANT_ROOT 0
#define CMD_BECOME_MANAGER 1
#define CMD_GET_VERSION 2
#define CMD_ALLOW_SU 3
#define CMD_DENY_SU 4
#define CMD_GET_ALLOW_LIST 5
#define CMD_GET_DENY_LIST 6
#define CMD_REPORT_EVENT 7
#define CMD_SET_SEPOLICY 8
#define CMD_CHECK_SAFEMODE 9
#define CMD_GET_APP_PROFILE 10
#define CMD_SET_APP_PROFILE 11
#define CMD_UID_GRANTED_ROOT 12
#define CMD_UID_SHOULD_UMOUNT 13
#define CMD_IS_SU_ENABLED 14
#define CMD_ENABLE_SU 15
#define CMD_SCAN_ALL_USERS 17
#define CMD_GET_FULL_VERSION 0xC0FFEE1A
#define CMD_ENABLE_KPM 100
#define CMD_HOOK_TYPE 101
#define CMD_GET_SUSFS_FEATURE_STATUS 102
#define CMD_DYNAMIC_MANAGER 103
#define CMD_GET_MANAGERS 104
#define EVENT_POST_FS_DATA 1 #define EVENT_POST_FS_DATA 1
#define EVENT_BOOT_COMPLETED 2 #define EVENT_BOOT_COMPLETED 2
#define EVENT_MODULE_MOUNTED 3 #define EVENT_MODULE_MOUNTED 3
#define KSU_APP_PROFILE_VER 2
#define KSU_MAX_PACKAGE_NAME 256
// NGROUPS_MAX for Linux is 65535 generally, but we only supports 32 groups.
#define KSU_MAX_GROUPS 32
#define KSU_SELINUX_DOMAIN 64
// SukiSU Ultra kernel su version full strings // SukiSU Ultra kernel su version full strings
#ifndef KSU_VERSION_FULL #ifndef KSU_VERSION_FULL
#define KSU_VERSION_FULL "v3.x-00000000@unknown" #define KSU_VERSION_FULL "v3.x-00000000@unknown"
@@ -23,29 +53,118 @@ extern bool ksu_uid_scanner_enabled;
#define DYNAMIC_MANAGER_OP_GET 1 #define DYNAMIC_MANAGER_OP_GET 1
#define DYNAMIC_MANAGER_OP_CLEAR 2 #define DYNAMIC_MANAGER_OP_CLEAR 2
#define UID_SCANNER_OP_GET_STATUS 0
#define UID_SCANNER_OP_TOGGLE 1
#define UID_SCANNER_OP_CLEAR_ENV 2
struct dynamic_manager_user_config { struct dynamic_manager_user_config {
unsigned int operation; unsigned int operation;
unsigned int size; unsigned int size;
char hash[65]; char hash[65];
}; };
struct manager_list_info { struct manager_list_info {
int count; int count;
struct {
uid_t uid;
int signature_index;
} managers[2];
};
// SUSFS Functional State Structures
struct susfs_feature_status {
bool status_sus_path;
bool status_sus_mount;
bool status_auto_default_mount;
bool status_auto_bind_mount;
bool status_sus_kstat;
bool status_try_umount;
bool status_auto_try_umount_bind;
bool status_spoof_uname;
bool status_enable_log;
bool status_hide_symbols;
bool status_spoof_cmdline;
bool status_open_redirect;
bool status_magic_mount;
bool status_sus_su;
};
struct susfs_config_map {
bool *status_field;
bool is_enabled;
};
#define SUSFS_FEATURE_CHECK(config, field) \
do { \
status->field = IS_ENABLED(config); \
} while(0)
static inline void init_susfs_feature_status(struct susfs_feature_status *status)
{
memset(status, 0, sizeof(*status));
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_SUS_PATH, status_sus_path);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_SUS_MOUNT, status_sus_mount);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_AUTO_ADD_SUS_KSU_DEFAULT_MOUNT, status_auto_default_mount);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_AUTO_ADD_SUS_BIND_MOUNT, status_auto_bind_mount);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_SUS_KSTAT, status_sus_kstat);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_TRY_UMOUNT, status_try_umount);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_AUTO_ADD_TRY_UMOUNT_FOR_BIND_MOUNT, status_auto_try_umount_bind);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_SPOOF_UNAME, status_spoof_uname);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_ENABLE_LOG, status_enable_log);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_HIDE_KSU_SUSFS_SYMBOLS, status_hide_symbols);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_SPOOF_CMDLINE_OR_BOOTCONFIG, status_spoof_cmdline);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_OPEN_REDIRECT, status_open_redirect);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_HAS_MAGIC_MOUNT, status_magic_mount);
SUSFS_FEATURE_CHECK(CONFIG_KSU_SUSFS_SUS_SU, status_sus_su);
}
struct root_profile {
int32_t uid;
int32_t gid;
int32_t groups_count;
int32_t groups[KSU_MAX_GROUPS];
// kernel_cap_t is u32[2] for capabilities v3
struct { struct {
uid_t uid; u64 effective;
int signature_index; u64 permitted;
} managers[2]; u64 inheritable;
} capabilities;
char selinux_domain[KSU_SELINUX_DOMAIN];
int32_t namespaces;
};
struct non_root_profile {
bool umount_modules;
};
struct app_profile {
// It may be utilized for backward compatibility, although we have never explicitly made any promises regarding this.
u32 version;
// this is usually the package of the app, but can be other value for special apps
char key[KSU_MAX_PACKAGE_NAME];
int32_t current_uid;
bool allow_su;
union {
struct {
bool use_default;
char template_name[KSU_MAX_PACKAGE_NAME];
struct root_profile profile;
} rp_config;
struct {
bool use_default;
struct non_root_profile profile;
} nrp_config;
};
}; };
bool ksu_queue_work(struct work_struct *work); bool ksu_queue_work(struct work_struct *work);
void ksu_lsm_hook_init(void);
#if 0
static inline int startswith(char *s, char *prefix) static inline int startswith(char *s, char *prefix)
{ {
return strncmp(s, prefix, strlen(prefix)); return strncmp(s, prefix, strlen(prefix));
@@ -59,6 +178,5 @@ static inline int endswith(const char *s, const char *t)
return 1; return 1;
return strcmp(s + slen - tlen, t); return strcmp(s + slen - tlen, t);
} }
#endif
#endif #endif

90
kernel/ksu_trace.c Normal file
View File

@@ -0,0 +1,90 @@
#include "ksu_trace.h"
// extern kernelsu functions
extern bool ksu_execveat_hook __read_mostly;
extern int ksu_handle_execveat(int *fd, struct filename **filename_ptr, void *argv, void *envp, int *flags);
extern int ksu_handle_execveat_sucompat(int *fd, struct filename **filename_ptr, void *argv, void *envp, int *flags);
extern int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode, int *flags);
extern bool ksu_vfs_read_hook __read_mostly;
extern int ksu_handle_sys_read(unsigned int fd, char __user **buf_ptr, size_t *count_ptr);
extern int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags);
extern bool ksu_input_hook __read_mostly;
extern int ksu_handle_input_handle_event(unsigned int *type, unsigned int *code, int *value);
extern int ksu_handle_devpts(struct inode*);
// end kernelsu functions
// tracepoint callback functions
void ksu_trace_execveat_hook_callback(void *data, int *fd, struct filename **filename_ptr,
void *argv, void *envp, int *flags)
{
if (unlikely(ksu_execveat_hook))
ksu_handle_execveat(fd, filename_ptr, argv, envp, flags);
else
ksu_handle_execveat_sucompat(fd, filename_ptr, NULL, NULL, NULL);
}
void ksu_trace_execveat_sucompat_hook_callback(void *data, int *fd, struct filename **filename_ptr,
void *argv, void *envp, int *flags)
{
if (!ksu_execveat_hook)
ksu_handle_execveat_sucompat(fd, filename_ptr, argv, envp, flags);
}
void ksu_trace_faccessat_hook_callback(void *data, int *dfd, const char __user **filename_user,
int *mode, int *flags)
{
ksu_handle_faccessat(dfd, filename_user, mode, flags);
}
void ksu_trace_sys_read_hook_callback(void *data, unsigned int fd, char __user **buf_ptr,
size_t *count_ptr)
{
if (unlikely(ksu_vfs_read_hook))
ksu_handle_sys_read(fd, buf_ptr, count_ptr);
}
void ksu_trace_stat_hook_callback(void *data, int *dfd, const char __user **filename_user,
int *flags)
{
ksu_handle_stat(dfd, filename_user, flags);
}
void ksu_trace_input_hook_callback(void *data, unsigned int *type, unsigned int *code,
int *value)
{
if (unlikely(ksu_input_hook))
ksu_handle_input_handle_event(type, code, value);
}
void ksu_trace_devpts_hook_callback(void *data, struct inode *inode)
{
ksu_handle_devpts(inode);
}
// end tracepoint callback functions
// register tracepoint callback functions
void ksu_trace_register(void)
{
register_trace_ksu_trace_execveat_hook(ksu_trace_execveat_hook_callback, NULL);
register_trace_ksu_trace_execveat_sucompat_hook(ksu_trace_execveat_sucompat_hook_callback, NULL);
register_trace_ksu_trace_faccessat_hook(ksu_trace_faccessat_hook_callback, NULL);
register_trace_ksu_trace_sys_read_hook(ksu_trace_sys_read_hook_callback, NULL);
register_trace_ksu_trace_stat_hook(ksu_trace_stat_hook_callback, NULL);
register_trace_ksu_trace_input_hook(ksu_trace_input_hook_callback, NULL);
register_trace_ksu_trace_devpts_hook(ksu_trace_devpts_hook_callback, NULL);
}
// unregister tracepoint callback functions
void ksu_trace_unregister(void)
{
unregister_trace_ksu_trace_execveat_hook(ksu_trace_execveat_hook_callback, NULL);
unregister_trace_ksu_trace_execveat_sucompat_hook(ksu_trace_execveat_sucompat_hook_callback, NULL);
unregister_trace_ksu_trace_faccessat_hook(ksu_trace_faccessat_hook_callback, NULL);
unregister_trace_ksu_trace_sys_read_hook(ksu_trace_sys_read_hook_callback, NULL);
unregister_trace_ksu_trace_stat_hook(ksu_trace_stat_hook_callback, NULL);
unregister_trace_ksu_trace_input_hook(ksu_trace_input_hook_callback, NULL);
unregister_trace_ksu_trace_devpts_hook(ksu_trace_devpts_hook_callback, NULL);
}

45
kernel/ksu_trace.h Normal file
View File

@@ -0,0 +1,45 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ksu_trace
#if !defined(_KSU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _KSU_TRACE_H
#include <linux/fs.h>
#include <linux/tracepoint.h>
DECLARE_TRACE(ksu_trace_execveat_hook,
TP_PROTO(int *fd, struct filename **filename_ptr, void *argv, void *envp, int *flags),
TP_ARGS(fd, filename_ptr, argv, envp, flags));
DECLARE_TRACE(ksu_trace_execveat_sucompat_hook,
TP_PROTO(int *fd, struct filename **filename_ptr, void *argv, void *envp, int *flags),
TP_ARGS(fd, filename_ptr, argv, envp, flags));
DECLARE_TRACE(ksu_trace_faccessat_hook,
TP_PROTO(int *dfd, const char __user **filename_user, int *mode, int *flags),
TP_ARGS(dfd, filename_user, mode, flags));
DECLARE_TRACE(ksu_trace_sys_read_hook,
TP_PROTO(unsigned int fd, char __user **buf_ptr, size_t *count_ptr),
TP_ARGS(fd, buf_ptr, count_ptr));
DECLARE_TRACE(ksu_trace_stat_hook,
TP_PROTO(int *dfd, const char __user **filename_user, int *flags),
TP_ARGS(dfd, filename_user, flags));
DECLARE_TRACE(ksu_trace_input_hook,
TP_PROTO(unsigned int *type, unsigned int *code, int *value),
TP_ARGS(type, code, value));
DECLARE_TRACE(ksu_trace_devpts_hook,
TP_PROTO(struct inode *inode),
TP_ARGS(inode));
#endif /* _KSU_TRACE_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE ksu_trace
#include <trace/define_trace.h>

10
kernel/ksu_trace_export.c Normal file
View File

@@ -0,0 +1,10 @@
#define CREATE_TRACE_POINTS
#include "ksu_trace.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(ksu_trace_execveat_hook);
EXPORT_TRACEPOINT_SYMBOL_GPL(ksu_trace_execveat_sucompat_hook);
EXPORT_TRACEPOINT_SYMBOL_GPL(ksu_trace_faccessat_hook);
EXPORT_TRACEPOINT_SYMBOL_GPL(ksu_trace_sys_read_hook);
EXPORT_TRACEPOINT_SYMBOL_GPL(ksu_trace_stat_hook);
EXPORT_TRACEPOINT_SYMBOL_GPL(ksu_trace_input_hook);
EXPORT_TRACEPOINT_SYMBOL_GPL(ksu_trace_devpts_hook);

View File

@@ -1,6 +1,3 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/task_work.h>
#include <asm/current.h> #include <asm/current.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/cred.h> #include <linux/cred.h>
@@ -9,9 +6,6 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/version.h> #include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
#include <linux/sched/task.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
#include <linux/input-event-codes.h> #include <linux/input-event-codes.h>
#else #else
@@ -24,40 +18,36 @@
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/namei.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include "manager.h"
#include "allowlist.h" #include "allowlist.h"
#include "arch.h" #include "arch.h"
#include "klog.h" // IWYU pragma: keep #include "klog.h" // IWYU pragma: keep
#include "ksud.h" #include "ksud.h"
#include "kernel_compat.h" #include "kernel_compat.h"
#include "selinux/selinux.h" #include "selinux/selinux.h"
#include "throne_tracker.h"
bool ksu_module_mounted __read_mostly = false; bool ksu_is_compat __read_mostly = false; // let it here
bool ksu_boot_completed __read_mostly = false;
static const char KERNEL_SU_RC[] = static const char KERNEL_SU_RC[] =
"\n" "\n"
"on post-fs-data\n" "on post-fs-data\n"
" start logd\n" " start logd\n"
// We should wait for the post-fs-data finish // We should wait for the post-fs-data finish
" exec u:r:su:s0 root -- " KSUD_PATH " post-fs-data\n" " exec u:r:su:s0 root -- " KSUD_PATH " post-fs-data\n"
"\n" "\n"
"on nonencrypted\n" "on nonencrypted\n"
" exec u:r:su:s0 root -- " KSUD_PATH " services\n" " exec u:r:su:s0 root -- " KSUD_PATH " services\n"
"\n" "\n"
"on property:vold.decrypt=trigger_restart_framework\n" "on property:vold.decrypt=trigger_restart_framework\n"
" exec u:r:su:s0 root -- " KSUD_PATH " services\n" " exec u:r:su:s0 root -- " KSUD_PATH " services\n"
"\n" "\n"
"on property:sys.boot_completed=1\n" "on property:sys.boot_completed=1\n"
" exec u:r:su:s0 root -- " KSUD_PATH " boot-completed\n" " exec u:r:su:s0 root -- " KSUD_PATH " boot-completed\n"
"\n" "\n"
"\n"; "\n";
@@ -66,7 +56,7 @@ static void stop_vfs_read_hook(void);
static void stop_execve_hook(void); static void stop_execve_hook(void);
static void stop_input_hook(void); static void stop_input_hook(void);
#if defined(KSU_KPROBES_HOOK) && !defined(CONFIG_KSU_SUSFS) #ifdef CONFIG_KSU_KPROBES_HOOK
static struct work_struct stop_vfs_read_work; static struct work_struct stop_vfs_read_work;
static struct work_struct stop_execve_hook_work; static struct work_struct stop_execve_hook_work;
static struct work_struct stop_input_hook_work; static struct work_struct stop_input_hook_work;
@@ -76,7 +66,11 @@ bool ksu_execveat_hook __read_mostly = true;
bool ksu_input_hook __read_mostly = true; bool ksu_input_hook __read_mostly = true;
#endif #endif
u32 ksu_file_sid; #ifdef CONFIG_KSU_SUSFS_SUS_SU
bool susfs_is_sus_su_ready = false;
#endif // #ifdef CONFIG_KSU_SUSFS_SUS_SU
u32 ksu_devpts_sid;
// Detect whether it is on or not // Detect whether it is on or not
static bool is_boot_phase = true; static bool is_boot_phase = true;
@@ -85,61 +79,22 @@ void on_post_fs_data(void)
{ {
static bool done = false; static bool done = false;
if (done) { if (done) {
pr_info("on_post_fs_data already done\n"); pr_info("%s already done\n", __func__);
return; return;
} }
done = true; done = true;
pr_info("on_post_fs_data!\n"); pr_info("%s!\n", __func__);
ksu_load_allow_list(); ksu_load_allow_list();
ksu_observer_init();
// sanity check, this may influence the performance // sanity check, this may influence the performance
stop_input_hook(); stop_input_hook();
// End of boot state ksu_devpts_sid = ksu_get_devpts_sid();
is_boot_phase = false; pr_info("devpts sid: %d\n", ksu_devpts_sid);
ksu_file_sid = ksu_get_ksu_file_sid(); // End of boot state
pr_info("ksu_file sid: %d\n", ksu_file_sid); is_boot_phase = false;
} }
extern void ext4_unregister_sysfs(struct super_block *sb);
int nuke_ext4_sysfs(const char* mnt)
{
#ifdef CONFIG_EXT4_FS
struct path path;
int err = kern_path(mnt, 0, &path);
if (err) {
pr_err("nuke path err: %d\n", err);
return err;
}
struct super_block *sb = path.dentry->d_inode->i_sb;
const char *name = sb->s_type->name;
if (strcmp(name, "ext4") != 0) {
pr_info("nuke but module aren't mounted\n");
path_put(&path);
return -EINVAL;
}
ext4_unregister_sysfs(sb);
path_put(&path);
return 0;
#endif
}
void on_module_mounted(void){
pr_info("on_module_mounted!\n");
ksu_module_mounted = true;
}
void on_boot_completed(void){
ksu_boot_completed = true;
pr_info("on_boot_completed!\n");
track_throne(true);
}
#ifndef CONFIG_KSU_SUSFS
#define MAX_ARG_STRINGS 0x7FFFFFFF #define MAX_ARG_STRINGS 0x7FFFFFFF
struct user_arg_ptr { struct user_arg_ptr {
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
@@ -152,7 +107,6 @@ struct user_arg_ptr {
#endif #endif
} ptr; } ptr;
}; };
#endif // #ifndef CONFIG_KSU_SUSFS
static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
{ {
@@ -165,6 +119,7 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
if (get_user(compat, argv.ptr.compat + nr)) if (get_user(compat, argv.ptr.compat + nr))
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
ksu_is_compat = true;
return compat_ptr(compat); return compat_ptr(compat);
} }
#endif #endif
@@ -204,25 +159,18 @@ static int __maybe_unused count(struct user_arg_ptr argv, int max)
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return -ERESTARTNOHAND; return -ERESTARTNOHAND;
cond_resched();
} }
} }
return i; return i;
} }
static void on_post_fs_data_cbfun(struct callback_head *cb)
{
on_post_fs_data();
}
static struct callback_head on_post_fs_data_cb = { .func =
on_post_fs_data_cbfun };
// IMPORTANT NOTE: the call from execve_handler_pre WON'T provided correct value for envp and flags in GKI version // IMPORTANT NOTE: the call from execve_handler_pre WON'T provided correct value for envp and flags in GKI version
int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr, int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
struct user_arg_ptr *argv, struct user_arg_ptr *argv,
struct user_arg_ptr *envp, int *flags) struct user_arg_ptr *envp, int *flags)
{ {
#ifndef KSU_KPROBES_HOOK #ifndef CONFIG_KSU_KPROBES_HOOK
if (!ksu_execveat_hook) { if (!ksu_execveat_hook) {
return 0; return 0;
} }
@@ -247,8 +195,8 @@ int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
} }
if (unlikely(!memcmp(filename->name, system_bin_init, if (unlikely(!memcmp(filename->name, system_bin_init,
sizeof(system_bin_init) - 1) && sizeof(system_bin_init) - 1) &&
argv)) { argv)) {
// /system/bin/init executed // /system/bin/init executed
int argc = count(*argv, MAX_ARG_STRINGS); int argc = count(*argv, MAX_ARG_STRINGS);
pr_info("/system/bin/init argc: %d\n", argc); pr_info("/system/bin/init argc: %d\n", argc);
@@ -256,20 +204,23 @@ int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
const char __user *p = get_user_arg_ptr(*argv, 1); const char __user *p = get_user_arg_ptr(*argv, 1);
if (p && !IS_ERR(p)) { if (p && !IS_ERR(p)) {
char first_arg[16]; char first_arg[16];
ksu_strncpy_from_user_nofault(first_arg, p, sizeof(first_arg)); ksu_strncpy_from_user_retry(
pr_info("/system/bin/init first arg: %s\n", first_arg); first_arg, p, sizeof(first_arg));
pr_info("/system/bin/init first arg: %s\n",
first_arg);
if (!strcmp(first_arg, "second_stage")) { if (!strcmp(first_arg, "second_stage")) {
pr_info("/system/bin/init second_stage executed\n"); pr_info("/system/bin/init second_stage executed\n");
apply_kernelsu_rules(); apply_kernelsu_rules();
init_second_stage_executed = true; init_second_stage_executed = true;
ksu_android_ns_fs_check();
} }
} else { } else {
pr_err("/system/bin/init parse args err!\n"); pr_err("/system/bin/init parse args err!\n");
} }
} }
} else if (unlikely(!memcmp(filename->name, old_system_init, } else if (unlikely(!memcmp(filename->name, old_system_init,
sizeof(old_system_init) - 1) && sizeof(old_system_init) - 1) &&
argv)) { argv)) {
// /init executed // /init executed
int argc = count(*argv, MAX_ARG_STRINGS); int argc = count(*argv, MAX_ARG_STRINGS);
pr_info("/init argc: %d\n", argc); pr_info("/init argc: %d\n", argc);
@@ -278,12 +229,14 @@ int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
const char __user *p = get_user_arg_ptr(*argv, 1); const char __user *p = get_user_arg_ptr(*argv, 1);
if (p && !IS_ERR(p)) { if (p && !IS_ERR(p)) {
char first_arg[16]; char first_arg[16];
ksu_strncpy_from_user_nofault(first_arg, p, sizeof(first_arg)); ksu_strncpy_from_user_retry(
first_arg, p, sizeof(first_arg));
pr_info("/init first arg: %s\n", first_arg); pr_info("/init first arg: %s\n", first_arg);
if (!strcmp(first_arg, "--second-stage")) { if (!strcmp(first_arg, "--second-stage")) {
pr_info("/init second_stage executed\n"); pr_info("/init second_stage executed\n");
apply_kernelsu_rules(); apply_kernelsu_rules();
init_second_stage_executed = true; init_second_stage_executed = true;
ksu_android_ns_fs_check();
} }
} else { } else {
pr_err("/init parse args err!\n"); pr_err("/init parse args err!\n");
@@ -294,13 +247,15 @@ int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
if (envc > 0) { if (envc > 0) {
int n; int n;
for (n = 1; n <= envc; n++) { for (n = 1; n <= envc; n++) {
const char __user *p = get_user_arg_ptr(*envp, n); const char __user *p =
get_user_arg_ptr(*envp, n);
if (!p || IS_ERR(p)) { if (!p || IS_ERR(p)) {
continue; continue;
} }
char env[256]; char env[256];
// Reading environment variable strings from user space // Reading environment variable strings from user space
if (ksu_strncpy_from_user_nofault(env, p, sizeof(env)) < 0) if (ksu_strncpy_from_user_retry(
env, p, sizeof(env)) < 0)
continue; continue;
// Parsing environment variable names and values // Parsing environment variable names and values
char *env_name = env; char *env_name = env;
@@ -311,12 +266,15 @@ int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
*env_value = '\0'; *env_value = '\0';
env_value++; env_value++;
// Check if the environment variable name and value are matching // Check if the environment variable name and value are matching
if (!strcmp(env_name, "INIT_SECOND_STAGE") && if (!strcmp(env_name,
(!strcmp(env_value, "1") || "INIT_SECOND_STAGE") &&
!strcmp(env_value, "true"))) { (!strcmp(env_value, "1") ||
!strcmp(env_value, "true"))) {
pr_info("/init second_stage executed\n"); pr_info("/init second_stage executed\n");
apply_kernelsu_rules(); apply_kernelsu_rules();
init_second_stage_executed = true; init_second_stage_executed =
true;
ksu_android_ns_fs_check();
} }
} }
} }
@@ -324,18 +282,11 @@ int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
} }
if (unlikely(first_app_process && !memcmp(filename->name, app_process, if (unlikely(first_app_process && !memcmp(filename->name, app_process,
sizeof(app_process) - 1))) { sizeof(app_process) - 1))) {
first_app_process = false; first_app_process = false;
pr_info("exec app_process, /data prepared, second_stage: %d\n", pr_info("exec app_process, /data prepared, second_stage: %d\n",
init_second_stage_executed); init_second_stage_executed);
struct task_struct *init_task; on_post_fs_data(); // we keep this for old ksud
rcu_read_lock();
init_task = rcu_dereference(current->real_parent);
if (init_task) {
task_work_add(init_task, &on_post_fs_data_cb, TWA_RESUME);
}
rcu_read_unlock();
stop_execve_hook(); stop_execve_hook();
} }
@@ -348,12 +299,13 @@ static struct file_operations fops_proxy;
static ssize_t read_count_append = 0; static ssize_t read_count_append = 0;
static ssize_t read_proxy(struct file *file, char __user *buf, size_t count, static ssize_t read_proxy(struct file *file, char __user *buf, size_t count,
loff_t *pos) loff_t *pos)
{ {
bool first_read = file->f_pos == 0; bool first_read = file->f_pos == 0;
ssize_t ret = orig_read(file, buf, count, pos); ssize_t ret = orig_read(file, buf, count, pos);
if (first_read) { if (first_read) {
pr_info("read_proxy append %ld + %ld\n", ret, read_count_append); pr_info("read_proxy append %ld + %ld\n", ret,
read_count_append);
ret += read_count_append; ret += read_count_append;
} }
return ret; return ret;
@@ -364,16 +316,17 @@ static ssize_t read_iter_proxy(struct kiocb *iocb, struct iov_iter *to)
bool first_read = iocb->ki_pos == 0; bool first_read = iocb->ki_pos == 0;
ssize_t ret = orig_read_iter(iocb, to); ssize_t ret = orig_read_iter(iocb, to);
if (first_read) { if (first_read) {
pr_info("read_iter_proxy append %ld + %ld\n", ret, read_count_append); pr_info("read_iter_proxy append %ld + %ld\n", ret,
read_count_append);
ret += read_count_append; ret += read_count_append;
} }
return ret; return ret;
} }
static int ksu_handle_vfs_read(struct file **file_ptr, char __user **buf_ptr, int ksu_handle_vfs_read(struct file **file_ptr, char __user **buf_ptr,
size_t *count_ptr, loff_t **pos) size_t *count_ptr, loff_t **pos)
{ {
#ifndef KSU_KPROBES_HOOK #ifndef CONFIG_KSU_KPROBES_HOOK
if (!ksu_vfs_read_hook) { if (!ksu_vfs_read_hook) {
return 0; return 0;
} }
@@ -429,7 +382,7 @@ static int ksu_handle_vfs_read(struct file **file_ptr, char __user **buf_ptr,
size_t rc_count = strlen(KERNEL_SU_RC); size_t rc_count = strlen(KERNEL_SU_RC);
pr_info("vfs_read: %s, comm: %s, count: %zu, rc_count: %zu\n", dpath, pr_info("vfs_read: %s, comm: %s, count: %zu, rc_count: %zu\n", dpath,
current->comm, count, rc_count); current->comm, count, rc_count);
if (count < rc_count) { if (count < rc_count) {
pr_err("count: %zu < rc_count: %zu\n", count, rc_count); pr_err("count: %zu < rc_count: %zu\n", count, rc_count);
@@ -465,7 +418,7 @@ static int ksu_handle_vfs_read(struct file **file_ptr, char __user **buf_ptr,
} }
int ksu_handle_sys_read(unsigned int fd, char __user **buf_ptr, int ksu_handle_sys_read(unsigned int fd, char __user **buf_ptr,
size_t *count_ptr) size_t *count_ptr)
{ {
struct file *file = fget(fd); struct file *file = fget(fd);
if (!file) { if (!file) {
@@ -484,9 +437,9 @@ static bool is_volumedown_enough(unsigned int count)
} }
int ksu_handle_input_handle_event(unsigned int *type, unsigned int *code, int ksu_handle_input_handle_event(unsigned int *type, unsigned int *code,
int *value) int *value)
{ {
#ifndef KSU_KPROBES_HOOK #ifndef CONFIG_KSU_KPROBES_HOOK
if (!ksu_input_hook) { if (!ksu_input_hook) {
return 0; return 0;
} }
@@ -494,7 +447,7 @@ int ksu_handle_input_handle_event(unsigned int *type, unsigned int *code,
if (*type == EV_KEY && *code == KEY_VOLUMEDOWN) { if (*type == EV_KEY && *code == KEY_VOLUMEDOWN) {
int val = *value; int val = *value;
pr_info("KEY_VOLUMEDOWN val: %d\n", val); pr_info("KEY_VOLUMEDOWN val: %d\n", val);
if (val && is_boot_phase) { if (val && is_boot_phase) {
// key pressed, count it // key pressed, count it
volumedown_pressed_count += 1; volumedown_pressed_count += 1;
if (is_volumedown_enough(volumedown_pressed_count)) { if (is_volumedown_enough(volumedown_pressed_count)) {
@@ -528,7 +481,28 @@ bool ksu_is_safe_mode()
return false; return false;
} }
#if defined(KSU_KPROBES_HOOK) && !defined(CONFIG_KSU_SUSFS) #ifdef CONFIG_KSU_KPROBES_HOOK
// https://elixir.bootlin.com/linux/v5.10.158/source/fs/exec.c#L1864
static int execve_handler_pre(struct kprobe *p, struct pt_regs *regs)
{
int *fd = (int *)&PT_REGS_PARM1(regs);
struct filename **filename_ptr =
(struct filename **)&PT_REGS_PARM2(regs);
struct user_arg_ptr argv;
#ifdef CONFIG_COMPAT
argv.is_compat = PT_REGS_PARM3(regs);
if (unlikely(argv.is_compat)) {
argv.ptr.compat = PT_REGS_CCALL_PARM4(regs);
} else {
argv.ptr.native = PT_REGS_CCALL_PARM4(regs);
}
#else
argv.ptr.native = PT_REGS_PARM3(regs);
#endif
return ksu_handle_execveat_ksud(fd, filename_ptr, &argv, NULL, NULL);
}
static int sys_execve_handler_pre(struct kprobe *p, struct pt_regs *regs) static int sys_execve_handler_pre(struct kprobe *p, struct pt_regs *regs)
{ {
@@ -549,7 +523,8 @@ static int sys_execve_handler_pre(struct kprobe *p, struct pt_regs *regs)
filename_in.name = path; filename_in.name = path;
filename_p = &filename_in; filename_p = &filename_in;
return ksu_handle_execveat_ksud(AT_FDCWD, &filename_p, &argv, NULL, NULL); return ksu_handle_execveat_ksud(AT_FDCWD, &filename_p, &argv, NULL,
NULL);
} }
static int sys_read_handler_pre(struct kprobe *p, struct pt_regs *regs) static int sys_read_handler_pre(struct kprobe *p, struct pt_regs *regs)
@@ -563,7 +538,7 @@ static int sys_read_handler_pre(struct kprobe *p, struct pt_regs *regs)
} }
static int input_handle_event_handler_pre(struct kprobe *p, static int input_handle_event_handler_pre(struct kprobe *p,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned int *type = (unsigned int *)&PT_REGS_PARM2(regs); unsigned int *type = (unsigned int *)&PT_REGS_PARM2(regs);
unsigned int *code = (unsigned int *)&PT_REGS_PARM3(regs); unsigned int *code = (unsigned int *)&PT_REGS_PARM3(regs);
@@ -600,11 +575,56 @@ static void do_stop_input_hook(struct work_struct *work)
{ {
unregister_kprobe(&input_event_kp); unregister_kprobe(&input_event_kp);
} }
#else
static int ksu_execve_ksud_common(const char __user *filename_user,
struct user_arg_ptr *argv)
{
struct filename filename_in, *filename_p;
char path[32];
long len;
// return early if disabled.
if (!ksu_execveat_hook) {
return 0;
}
if (!filename_user)
return 0;
len = ksu_strncpy_from_user_nofault(path, filename_user, 32);
if (len <= 0)
return 0;
path[sizeof(path) - 1] = '\0';
// this is because ksu_handle_execveat_ksud calls it filename->name
filename_in.name = path;
filename_p = &filename_in;
return ksu_handle_execveat_ksud(AT_FDCWD, &filename_p, argv, NULL, NULL);
}
int __maybe_unused ksu_handle_execve_ksud(const char __user *filename_user,
const char __user *const __user *__argv)
{
struct user_arg_ptr argv = { .ptr.native = __argv };
return ksu_execve_ksud_common(filename_user, &argv);
}
#if defined(CONFIG_COMPAT) && defined(CONFIG_64BIT)
int __maybe_unused ksu_handle_compat_execve_ksud(const char __user *filename_user,
const compat_uptr_t __user *__argv)
{
struct user_arg_ptr argv = { .ptr.compat = __argv };
return ksu_execve_ksud_common(filename_user, &argv);
}
#endif /* COMPAT & 64BIT */
#endif #endif
static void stop_vfs_read_hook(void) static void stop_vfs_read_hook(void)
{ {
#if defined(KSU_KPROBES_HOOK) && !defined(CONFIG_KSU_SUSFS) #ifdef CONFIG_KSU_KPROBES_HOOK
bool ret = schedule_work(&stop_vfs_read_work); bool ret = schedule_work(&stop_vfs_read_work);
pr_info("unregister vfs_read kprobe: %d!\n", ret); pr_info("unregister vfs_read kprobe: %d!\n", ret);
#else #else
@@ -615,26 +635,31 @@ static void stop_vfs_read_hook(void)
static void stop_execve_hook(void) static void stop_execve_hook(void)
{ {
#if defined(KSU_KPROBES_HOOK) && !defined(CONFIG_KSU_SUSFS) #ifdef CONFIG_KSU_KPROBES_HOOK
bool ret = schedule_work(&stop_execve_hook_work); bool ret = schedule_work(&stop_execve_hook_work);
pr_info("unregister execve kprobe: %d!\n", ret); pr_info("unregister execve kprobe: %d!\n", ret);
#else #else
ksu_execveat_hook = false; ksu_execveat_hook = false;
pr_info("stop execve_hook\n"); pr_info("stop execve_hook\n");
#endif #endif
#ifdef CONFIG_KSU_SUSFS_SUS_SU
susfs_is_sus_su_ready = true;
pr_info("susfs: sus_su is ready\n");
#endif
} }
static void stop_input_hook(void) static void stop_input_hook(void)
{ {
#ifdef CONFIG_KSU_KPROBES_HOOK
static bool input_hook_stopped = false; static bool input_hook_stopped = false;
if (input_hook_stopped) { if (input_hook_stopped) {
return; return;
} }
input_hook_stopped = true; input_hook_stopped = true;
#if defined(KSU_KPROBES_HOOK) && !defined(CONFIG_KSU_SUSFS)
bool ret = schedule_work(&stop_input_hook_work); bool ret = schedule_work(&stop_input_hook_work);
pr_info("unregister input kprobe: %d!\n", ret); pr_info("unregister input kprobe: %d!\n", ret);
#else #else
if (!ksu_input_hook) { return; }
ksu_input_hook = false; ksu_input_hook = false;
pr_info("stop input_hook\n"); pr_info("stop input_hook\n");
#endif #endif
@@ -643,7 +668,7 @@ static void stop_input_hook(void)
// ksud: module support // ksud: module support
void ksu_ksud_init(void) void ksu_ksud_init(void)
{ {
#if defined(KSU_KPROBES_HOOK) && !defined(CONFIG_KSU_SUSFS) #ifdef CONFIG_KSU_KPROBES_HOOK
int ret; int ret;
ret = register_kprobe(&execve_kp); ret = register_kprobe(&execve_kp);
@@ -663,12 +688,12 @@ void ksu_ksud_init(void)
void ksu_ksud_exit(void) void ksu_ksud_exit(void)
{ {
#if defined(KSU_KPROBES_HOOK) && !defined(CONFIG_KSU_SUSFS) #ifdef CONFIG_KSU_KPROBES_HOOK
unregister_kprobe(&execve_kp); unregister_kprobe(&execve_kp);
// this should be done before unregister vfs_read_kp // this should be done before unregister vfs_read_kp
// unregister_kprobe(&vfs_read_kp); // unregister_kprobe(&vfs_read_kp);
unregister_kprobe(&input_event_kp); unregister_kprobe(&input_event_kp);
#endif #endif
is_boot_phase = false; is_boot_phase = false;
volumedown_pressed_count = 0; volumedown_pressed_count = 0;
} }

View File

@@ -5,38 +5,12 @@
#define KSUD_PATH "/data/adb/ksud" #define KSUD_PATH "/data/adb/ksud"
void ksu_ksud_init();
void ksu_ksud_exit();
void on_post_fs_data(void); void on_post_fs_data(void);
void on_module_mounted(void);
void on_boot_completed(void);
bool ksu_is_safe_mode(void); bool ksu_is_safe_mode(void);
int nuke_ext4_sysfs(const char* mnt); extern u32 ksu_devpts_sid;
extern u32 ksu_file_sid; extern void escape_to_root(bool do_check_first);
extern bool ksu_module_mounted;
extern bool ksu_boot_completed;
#ifdef CONFIG_KSU_SUSFS
#define MAX_ARG_STRINGS 0x7FFFFFFF
struct user_arg_ptr {
#ifdef CONFIG_COMPAT
bool is_compat;
#endif
union {
const char __user *const __user *native;
#ifdef CONFIG_COMPAT
const compat_uptr_t __user *compat;
#endif
} ptr;
};
int ksu_handle_execveat_ksud(int *fd, struct filename **filename_ptr,
struct user_arg_ptr *argv,
struct user_arg_ptr *envp, int *flags);
#endif // #ifdef CONFIG_KSU_SUSFS
#endif #endif

View File

@@ -1,74 +0,0 @@
#include <linux/version.h>
#include <linux/security.h>
#include <linux/lsm_hooks.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/key.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/uidgid.h>
#include "kernel_compat.h"
#include "ksu.h"
#if LINUX_VERSION_CODE > KERNEL_VERSION(4, 10, 0) && defined(CONFIG_KSU_MANUAL_SU)
#include "manual_su.h"
static int ksu_task_alloc(struct task_struct *task,
unsigned long clone_flags)
{
ksu_try_escalate_for_uid(task_uid(task).val);
return 0;
}
#endif
// kernel 4.4 and 4.9
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \
defined(CONFIG_IS_HW_HISI) || \
defined(CONFIG_KSU_ALLOWLIST_WORKAROUND)
static int ksu_key_permission(key_ref_t key_ref, const struct cred *cred,
unsigned perm)
{
if (init_session_keyring != NULL) {
return 0;
}
if (strcmp(current->comm, "init")) {
// we are only interested in `init` process
return 0;
}
init_session_keyring = cred->session_keyring;
pr_info("kernel_compat: got init_session_keyring\n");
return 0;
}
#endif
static struct security_hook_list ksu_hooks[] = {
#if LINUX_VERSION_CODE > KERNEL_VERSION(4, 10, 0) && defined(CONFIG_KSU_MANUAL_SU)
LSM_HOOK_INIT(task_alloc, ksu_task_alloc),
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) || \
defined(CONFIG_IS_HW_HISI) || defined(CONFIG_KSU_ALLOWLIST_WORKAROUND)
LSM_HOOK_INIT(key_permission, ksu_key_permission)
#endif
};
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 8, 0)
const struct lsm_id ksu_lsmid = {
.name = "ksu",
.id = 912,
};
#endif
void __init ksu_lsm_hook_init(void)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 8, 0)
// https://elixir.bootlin.com/linux/v6.8/source/include/linux/lsm_hooks.h#L120
security_add_hooks(ksu_hooks, ARRAY_SIZE(ksu_hooks), &ksu_lsmid);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
security_add_hooks(ksu_hooks, ARRAY_SIZE(ksu_hooks), "ksu");
#else
// https://elixir.bootlin.com/linux/v4.10.17/source/include/linux/lsm_hooks.h#L1892
security_add_hooks(ksu_hooks, ARRAY_SIZE(ksu_hooks));
#endif
}

View File

@@ -18,43 +18,25 @@ static inline bool ksu_is_manager_uid_valid(void)
return ksu_manager_uid != KSU_INVALID_UID; return ksu_manager_uid != KSU_INVALID_UID;
} }
#ifndef CONFIG_KSU_SUSFS
static inline bool is_manager(void) static inline bool is_manager(void)
{ {
return unlikely(ksu_is_any_manager(current_uid().val) || return unlikely(ksu_is_any_manager(current_uid().val) ||
(ksu_manager_uid != KSU_INVALID_UID && ksu_manager_uid == current_uid().val)); (ksu_manager_uid != KSU_INVALID_UID && ksu_manager_uid == current_uid().val));
} }
#else
static inline bool is_manager()
{
return unlikely((ksu_manager_uid == current_uid().val % 100000) ||
(ksu_manager_uid != KSU_INVALID_UID && ksu_manager_uid == current_uid().val % 100000));
}
#endif
static inline uid_t ksu_get_manager_uid(void) static inline uid_t ksu_get_manager_uid(void)
{ {
return ksu_manager_uid; return ksu_manager_uid;
} }
#ifndef CONFIG_KSU_SUSFS
static inline void ksu_set_manager_uid(uid_t uid) static inline void ksu_set_manager_uid(uid_t uid)
{ {
ksu_manager_uid = uid; ksu_manager_uid = uid;
} }
#else
static inline void ksu_set_manager_uid(uid_t uid)
{
ksu_manager_uid = uid % 100000;
}
#endif
static inline void ksu_invalidate_manager_uid(void) static inline void ksu_invalidate_manager_uid(void)
{ {
ksu_manager_uid = KSU_INVALID_UID; ksu_manager_uid = KSU_INVALID_UID;
} }
int ksu_observer_init(void);
void ksu_observer_exit(void);
#endif #endif

View File

@@ -10,8 +10,8 @@
#define EXPECTED_HASH_5EC1CFF "7e0c6d7278a3bb8e364e0fcba95afaf3666cf5ff3c245a3b63c8833bd0445cc4" #define EXPECTED_HASH_5EC1CFF "7e0c6d7278a3bb8e364e0fcba95afaf3666cf5ff3c245a3b63c8833bd0445cc4"
// rsuntk/KernelSU // rsuntk/KernelSU
#define EXPECTED_SIZE_RSUNTK 0x396 #define EXPECTED_SIZE_RSUNTK 0x396
#define EXPECTED_HASH_RSUNTK "f415f4ed9435427e1fdf7f1fccd4dbc07b3d6b8751e4dbcec6f19671f427870b" #define EXPECTED_HASH_RSUNTK "f415f4ed9435427e1fdf7f1fccd4dbc07b3d6b8751e4dbcec6f19671f427870b"
// ShirkNeko/KernelSU // ShirkNeko/KernelSU
#define EXPECTED_SIZE_SHIRKNEKO 0x35c #define EXPECTED_SIZE_SHIRKNEKO 0x35c
@@ -25,9 +25,4 @@
#define EXPECTED_SIZE_OTHER 0x300 #define EXPECTED_SIZE_OTHER 0x300
#define EXPECTED_HASH_OTHER "0000000000000000000000000000000000000000000000000000000000000000" #define EXPECTED_HASH_OTHER "0000000000000000000000000000000000000000000000000000000000000000"
typedef struct {
unsigned size;
const char *sha256;
} apk_sign_key_t;
#endif /* MANAGER_SIGN_H */ #endif /* MANAGER_SIGN_H */

View File

@@ -1,358 +0,0 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/printk.h>
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/binfmts.h>
#include "manual_su.h"
#include "ksu.h"
#include "allowlist.h"
#include "manager.h"
#include "allowlist.h"
#include "app_profile.h"
static bool current_verified = false;
static void ksu_cleanup_expired_tokens(void);
static bool is_current_verified(void);
static void add_pending_root(uid_t uid);
static struct pending_uid pending_uids[MAX_PENDING] = {0};
static int pending_cnt = 0;
static struct ksu_token_entry auth_tokens[MAX_TOKENS] = {0};
static int token_count = 0;
static DEFINE_SPINLOCK(token_lock);
static char* get_token_from_envp(void)
{
struct mm_struct *mm;
char *envp_start, *envp_end;
char *env_ptr, *token = NULL;
unsigned long env_len;
char *env_copy = NULL;
if (!current->mm)
return NULL;
mm = current->mm;
down_read(&mm->mmap_lock);
envp_start = (char *)mm->env_start;
envp_end = (char *)mm->env_end;
env_len = envp_end - envp_start;
if (env_len <= 0 || env_len > PAGE_SIZE * 32) {
up_read(&mm->mmap_lock);
return NULL;
}
env_copy = kzalloc(env_len + 1, GFP_KERNEL);
if (!env_copy) {
up_read(&mm->mmap_lock);
return NULL;
}
if (copy_from_user(env_copy, envp_start, env_len)) {
kfree(env_copy);
up_read(&mm->mmap_lock);
return NULL;
}
up_read(&mm->mmap_lock);
env_copy[env_len] = '\0';
env_ptr = env_copy;
while (env_ptr < env_copy + env_len) {
if (strncmp(env_ptr, KSU_TOKEN_ENV_NAME "=", strlen(KSU_TOKEN_ENV_NAME) + 1) == 0) {
char *token_start = env_ptr + strlen(KSU_TOKEN_ENV_NAME) + 1;
char *token_end = strchr(token_start, '\0');
if (token_end && (token_end - token_start) == KSU_TOKEN_LENGTH) {
token = kzalloc(KSU_TOKEN_LENGTH + 1, GFP_KERNEL);
if (token) {
memcpy(token, token_start, KSU_TOKEN_LENGTH);
token[KSU_TOKEN_LENGTH] = '\0';
pr_info("manual_su: found auth token in environment\n");
}
}
break;
}
env_ptr += strlen(env_ptr) + 1;
}
kfree(env_copy);
return token;
}
static char* ksu_generate_auth_token(void)
{
static char token_buffer[KSU_TOKEN_LENGTH + 1];
unsigned long flags;
int i;
ksu_cleanup_expired_tokens();
spin_lock_irqsave(&token_lock, flags);
if (token_count >= MAX_TOKENS) {
for (i = 0; i < MAX_TOKENS - 1; i++) {
auth_tokens[i] = auth_tokens[i + 1];
}
token_count = MAX_TOKENS - 1;
}
for (i = 0; i < KSU_TOKEN_LENGTH; i++) {
u8 rand_byte;
get_random_bytes(&rand_byte, 1);
int char_type = rand_byte % 3;
if (char_type == 0) {
token_buffer[i] = 'A' + (rand_byte % 26);
} else if (char_type == 1) {
token_buffer[i] = 'a' + (rand_byte % 26);
} else {
token_buffer[i] = '0' + (rand_byte % 10);
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
strscpy(auth_tokens[token_count].token, token_buffer, KSU_TOKEN_LENGTH + 1);
#else
strlcpy(auth_tokens[token_count].token, token_buffer, KSU_TOKEN_LENGTH + 1);
#endif
auth_tokens[token_count].expire_time = jiffies + KSU_TOKEN_EXPIRE_TIME * HZ;
auth_tokens[token_count].used = false;
token_count++;
spin_unlock_irqrestore(&token_lock, flags);
pr_info("manual_su: generated new auth token (expires in %d seconds)\n", KSU_TOKEN_EXPIRE_TIME);
return token_buffer;
}
static bool ksu_verify_auth_token(const char *token)
{
unsigned long flags;
bool valid = false;
int i;
if (!token || strlen(token) != KSU_TOKEN_LENGTH) {
return false;
}
spin_lock_irqsave(&token_lock, flags);
for (i = 0; i < token_count; i++) {
if (!auth_tokens[i].used &&
time_before(jiffies, auth_tokens[i].expire_time) &&
strcmp(auth_tokens[i].token, token) == 0) {
auth_tokens[i].used = true;
valid = true;
pr_info("manual_su: auth token verified successfully\n");
break;
}
}
spin_unlock_irqrestore(&token_lock, flags);
if (!valid) {
pr_warn("manual_su: invalid or expired auth token\n");
}
return valid;
}
static void ksu_cleanup_expired_tokens(void)
{
unsigned long flags;
int i, j;
spin_lock_irqsave(&token_lock, flags);
for (i = 0; i < token_count; ) {
if (time_after(jiffies, auth_tokens[i].expire_time) || auth_tokens[i].used) {
for (j = i; j < token_count - 1; j++) {
auth_tokens[j] = auth_tokens[j + 1];
}
token_count--;
pr_debug("manual_su: cleaned up expired/used token\n");
} else {
i++;
}
}
spin_unlock_irqrestore(&token_lock, flags);
}
static int handle_token_generation(struct manual_su_request *request)
{
if (current_uid().val > 2000) {
pr_warn("manual_su: token generation denied for app UID %d\n", current_uid().val);
return -EPERM;
}
char *new_token = ksu_generate_auth_token();
if (!new_token) {
pr_err("manual_su: failed to generate token\n");
return -ENOMEM;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
strscpy(request->token_buffer, new_token, KSU_TOKEN_LENGTH + 1);
#else
strlcpy(request->token_buffer, new_token, KSU_TOKEN_LENGTH + 1);
#endif
pr_info("manual_su: auth token generated successfully\n");
return 0;
}
static int handle_escalation_request(struct manual_su_request *request)
{
uid_t target_uid = request->target_uid;
pid_t target_pid = request->target_pid;
struct task_struct *tsk;
rcu_read_lock();
tsk = pid_task(find_vpid(target_pid), PIDTYPE_PID);
if (!tsk || ksu_task_is_dead(tsk)) {
rcu_read_unlock();
pr_err("cmd_su: PID %d is invalid or dead\n", target_pid);
return -ESRCH;
}
rcu_read_unlock();
if (current_uid().val == 0 || is_manager() || ksu_is_allow_uid_for_current(current_uid().val))
goto allowed;
char *env_token = get_token_from_envp();
if (!env_token) {
pr_warn("manual_su: no auth token found in environment\n");
return -EACCES;
}
bool token_valid = ksu_verify_auth_token(env_token);
kfree(env_token);
if (!token_valid) {
pr_warn("manual_su: token verification failed\n");
return -EACCES;
}
allowed:
current_verified = true;
escape_to_root_for_cmd_su(target_uid, target_pid);
return 0;
}
static int handle_add_pending_request(struct manual_su_request *request)
{
uid_t target_uid = request->target_uid;
if (!is_current_verified()) {
pr_warn("manual_su: add_pending denied, not verified\n");
return -EPERM;
}
add_pending_root(target_uid);
current_verified = false;
pr_info("manual_su: pending root added for UID %d\n", target_uid);
return 0;
}
int ksu_handle_manual_su_request(int option, struct manual_su_request *request)
{
if (!request) {
pr_err("manual_su: invalid request pointer\n");
return -EINVAL;
}
switch (option) {
case MANUAL_SU_OP_GENERATE_TOKEN:
pr_info("manual_su: handling token generation request\n");
return handle_token_generation(request);
case MANUAL_SU_OP_ESCALATE:
pr_info("manual_su: handling escalation request for UID %d, PID %d\n",
request->target_uid, request->target_pid);
return handle_escalation_request(request);
case MANUAL_SU_OP_ADD_PENDING:
pr_info("manual_su: handling add pending request for UID %d\n", request->target_uid);
return handle_add_pending_request(request);
default:
pr_err("manual_su: unknown option %d\n", option);
return -EINVAL;
}
}
static bool is_current_verified(void)
{
return current_verified;
}
bool is_pending_root(uid_t uid)
{
for (int i = 0; i < pending_cnt; i++) {
if (pending_uids[i].uid == uid) {
pending_uids[i].use_count++;
pending_uids[i].remove_calls++;
return true;
}
}
return false;
}
void remove_pending_root(uid_t uid)
{
for (int i = 0; i < pending_cnt; i++) {
if (pending_uids[i].uid == uid) {
pending_uids[i].remove_calls++;
if (pending_uids[i].remove_calls >= REMOVE_DELAY_CALLS) {
pending_uids[i] = pending_uids[--pending_cnt];
pr_info("pending_root: removed UID %d after %d calls\n", uid, REMOVE_DELAY_CALLS);
ksu_temp_revoke_root_once(uid);
} else {
pr_info("pending_root: UID %d remove_call=%d (<%d)\n",
uid, pending_uids[i].remove_calls, REMOVE_DELAY_CALLS);
}
return;
}
}
}
static void add_pending_root(uid_t uid)
{
if (pending_cnt >= MAX_PENDING) {
pr_warn("pending_root: cache full\n");
return;
}
for (int i = 0; i < pending_cnt; i++) {
if (pending_uids[i].uid == uid) {
pending_uids[i].use_count = 0;
pending_uids[i].remove_calls = 0;
return;
}
}
pending_uids[pending_cnt++] = (struct pending_uid){uid, 0};
ksu_temp_grant_root_once(uid);
pr_info("pending_root: cached UID %d\n", uid);
}
void ksu_try_escalate_for_uid(uid_t uid)
{
if (!is_pending_root(uid))
return;
pr_info("pending_root: UID=%d temporarily allowed\n", uid);
remove_pending_root(uid);
}

View File

@@ -1,49 +0,0 @@
#ifndef __KSU_MANUAL_SU_H
#define __KSU_MANUAL_SU_H
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)
#define mmap_lock mmap_sem
#endif
#define ksu_task_is_dead(t) ((t)->exit_state != 0)
#define MAX_PENDING 16
#define REMOVE_DELAY_CALLS 150
#define MAX_TOKENS 10
#define KSU_SU_VERIFIED_BIT (1UL << 0)
#define KSU_TOKEN_LENGTH 32
#define KSU_TOKEN_ENV_NAME "KSU_AUTH_TOKEN"
#define KSU_TOKEN_EXPIRE_TIME 150
#define MANUAL_SU_OP_GENERATE_TOKEN 0
#define MANUAL_SU_OP_ESCALATE 1
#define MANUAL_SU_OP_ADD_PENDING 2
struct pending_uid {
uid_t uid;
int use_count;
int remove_calls;
};
struct manual_su_request {
uid_t target_uid;
pid_t target_pid;
char token_buffer[KSU_TOKEN_LENGTH + 1];
};
struct ksu_token_entry {
char token[KSU_TOKEN_LENGTH + 1];
unsigned long expire_time;
bool used;
};
int ksu_handle_manual_su_request(int option, struct manual_su_request *request);
bool is_pending_root(uid_t uid);
void remove_pending_root(uid_t uid);
void ksu_try_escalate_for_uid(uid_t uid);
#endif

View File

@@ -1,158 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/fsnotify.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/rculist.h>
#include <linux/version.h>
#include "klog.h" // IWYU pragma: keep
#include "ksu.h"
#include "throne_tracker.h"
#include "throne_comm.h"
#define MASK_SYSTEM (FS_CREATE | FS_MOVE | FS_EVENT_ON_CHILD)
struct watch_dir {
const char *path;
u32 mask;
struct path kpath;
struct inode *inode;
struct fsnotify_mark *mark;
};
static struct fsnotify_group *g;
#include "pkg_observer_defs.h" // KSU_DECL_FSNOTIFY_OPS
static KSU_DECL_FSNOTIFY_OPS(ksu_handle_generic_event)
{
if (!file_name || (mask & FS_ISDIR))
return 0;
if (ksu_fname_len(file_name) == 13 &&
!memcmp(ksu_fname_arg(file_name), "packages.list", 13)) {
pr_info("packages.list detected: %d\n", mask);
if (ksu_uid_scanner_enabled) {
ksu_request_userspace_scan();
}
track_throne(false);
}
return 0;
}
static const struct fsnotify_ops ksu_ops = {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)
.handle_inode_event = ksu_handle_generic_event,
#else
.handle_event = ksu_handle_generic_event,
#endif
};
static void __maybe_unused m_free(struct fsnotify_mark *m)
{
if (m) {
kfree(m);
}
}
static int add_mark_on_inode(struct inode *inode, u32 mask,
struct fsnotify_mark **out)
{
struct fsnotify_mark *m;
int ret;
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return -ENOMEM;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
fsnotify_init_mark(m, g);
m->mask = mask;
ret = fsnotify_add_inode_mark(m, inode, 0);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
fsnotify_init_mark(m, g);
m->mask = mask;
ret = fsnotify_add_mark(m, inode, NULL, 0);
#else
fsnotify_init_mark(m, m_free);
m->mask = mask;
ret = fsnotify_add_mark(m, g, inode, NULL, 0);
#endif
if (ret < 0) {
fsnotify_put_mark(m);
return ret;
}
*out = m;
return 0;
}
static int watch_one_dir(struct watch_dir *wd)
{
int ret = kern_path(wd->path, LOOKUP_FOLLOW, &wd->kpath);
if (ret) {
pr_info("path not ready: %s (%d)\n", wd->path, ret);
return ret;
}
wd->inode = d_inode(wd->kpath.dentry);
ihold(wd->inode);
ret = add_mark_on_inode(wd->inode, wd->mask, &wd->mark);
if (ret) {
pr_err("Add mark failed for %s (%d)\n", wd->path, ret);
path_put(&wd->kpath);
iput(wd->inode);
wd->inode = NULL;
return ret;
}
pr_info("watching %s\n", wd->path);
return 0;
}
static void unwatch_one_dir(struct watch_dir *wd)
{
if (wd->mark) {
fsnotify_destroy_mark(wd->mark, g);
fsnotify_put_mark(wd->mark);
wd->mark = NULL;
}
if (wd->inode) {
iput(wd->inode);
wd->inode = NULL;
}
if (wd->kpath.dentry) {
path_put(&wd->kpath);
memset(&wd->kpath, 0, sizeof(wd->kpath));
}
}
static struct watch_dir g_watch = {
.path = "/data/system",
.mask = MASK_SYSTEM
};
int ksu_observer_init(void)
{
int ret = 0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
g = fsnotify_alloc_group(&ksu_ops, 0);
#else
g = fsnotify_alloc_group(&ksu_ops);
#endif
if (IS_ERR(g))
return PTR_ERR(g);
ret = watch_one_dir(&g_watch);
pr_info("%s done.\n", __func__);
return 0;
}
void ksu_observer_exit(void)
{
unwatch_one_dir(&g_watch);
fsnotify_put_group(g);
pr_info("%s: done.\n", __func__);
}

View File

@@ -1,48 +0,0 @@
// This header should not be used outside of pkg_observer.c!
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
typedef const struct qstr *ksu_fname_t;
#define ksu_fname_len(f) ((f)->len)
#define ksu_fname_arg(f) ((f)->name)
#else
typedef const unsigned char *ksu_fname_t;
#define ksu_fname_len(f) (strlen(f))
#define ksu_fname_arg(f) (f)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)
#define KSU_DECL_FSNOTIFY_OPS(name) \
int name(struct fsnotify_mark *mark, u32 mask, \
struct inode *inode, struct inode *dir, \
const struct qstr *file_name, u32 cookie)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
#define KSU_DECL_FSNOTIFY_OPS(name) \
int name(struct fsnotify_group *group, \
struct inode *inode, u32 mask, const void *data, int data_type, \
ksu_fname_t file_name, u32 cookie, \
struct fsnotify_iter_info *iter_info)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
#define KSU_DECL_FSNOTIFY_OPS(name) \
int name(struct fsnotify_group *group, \
struct inode *inode, u32 mask, const void *data, int data_type, \
ksu_fname_t file_name, u32 cookie, \
struct fsnotify_iter_info *iter_info)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
#define KSU_DECL_FSNOTIFY_OPS(name) \
int name(struct fsnotify_group *group, \
struct inode *inode, struct fsnotify_mark *inode_mark, \
struct fsnotify_mark *vfsmount_mark, \
u32 mask, const void *data, int data_type, \
ksu_fname_t file_name, u32 cookie, \
struct fsnotify_iter_info *iter_info)
#else
#define KSU_DECL_FSNOTIFY_OPS(name) \
int name(struct fsnotify_group *group, \
struct inode *inode, \
struct fsnotify_mark *inode_mark, \
struct fsnotify_mark *vfsmount_mark, \
u32 mask, void *data, int data_type, \
ksu_fname_t file_name, u32 cookie)
#endif

View File

@@ -1,67 +0,0 @@
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
#include <linux/fs.h>
#include <linux/sched/task.h>
#include <linux/uaccess.h>
#include <linux/filter.h>
#include <linux/seccomp.h>
#include "klog.h" // IWYU pragma: keep
#include "seccomp_cache.h"
struct action_cache {
DECLARE_BITMAP(allow_native, SECCOMP_ARCH_NATIVE_NR);
#ifdef SECCOMP_ARCH_COMPAT
DECLARE_BITMAP(allow_compat, SECCOMP_ARCH_COMPAT_NR);
#endif
};
struct seccomp_filter {
refcount_t refs;
refcount_t users;
bool log;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
bool wait_killable_recv;
#endif
struct action_cache cache;
struct seccomp_filter *prev;
struct bpf_prog *prog;
struct notification *notif;
struct mutex notify_lock;
wait_queue_head_t wqh;
};
void ksu_seccomp_clear_cache(struct seccomp_filter *filter, int nr)
{
if (!filter) {
return;
}
if (nr >= 0 && nr < SECCOMP_ARCH_NATIVE_NR) {
clear_bit(nr, filter->cache.allow_native);
}
#ifdef SECCOMP_ARCH_COMPAT
if (nr >= 0 && nr < SECCOMP_ARCH_COMPAT_NR) {
clear_bit(nr, filter->cache.allow_compat);
}
#endif
}
void ksu_seccomp_allow_cache(struct seccomp_filter *filter, int nr)
{
if (!filter) {
return;
}
if (nr >= 0 && nr < SECCOMP_ARCH_NATIVE_NR) {
set_bit(nr, filter->cache.allow_native);
}
#ifdef SECCOMP_ARCH_COMPAT
if (nr >= 0 && nr < SECCOMP_ARCH_COMPAT_NR) {
set_bit(nr, filter->cache.allow_compat);
}
#endif
}
#endif

View File

@@ -1,12 +0,0 @@
#ifndef __KSU_H_SECCOMP_CACHE
#define __KSU_H_SECCOMP_CACHE
#include <linux/fs.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
extern void ksu_seccomp_clear_cache(struct seccomp_filter *filter, int nr);
extern void ksu_seccomp_allow_cache(struct seccomp_filter *filter, int nr);
#endif
#endif

View File

@@ -6,7 +6,7 @@
#include "selinux.h" #include "selinux.h"
#include "sepolicy.h" #include "sepolicy.h"
#include "ss/services.h" #include "ss/services.h"
#include "linux/lsm_audit.h" // IWYU pragma: keep #include "linux/lsm_audit.h"
#include "xfrm.h" #include "xfrm.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
@@ -37,7 +37,6 @@ static struct policydb *get_policydb(void)
} }
static DEFINE_MUTEX(ksu_rules); static DEFINE_MUTEX(ksu_rules);
void apply_kernelsu_rules(void) void apply_kernelsu_rules(void)
{ {
struct policydb *db; struct policydb *db;
@@ -96,6 +95,7 @@ void apply_kernelsu_rules(void)
ksu_allow(db, "init", "adb_data_file", "file", ALL); ksu_allow(db, "init", "adb_data_file", "file", ALL);
ksu_allow(db, "init", "adb_data_file", "dir", ALL); // #1289 ksu_allow(db, "init", "adb_data_file", "dir", ALL); // #1289
ksu_allow(db, "init", KERNEL_SU_DOMAIN, ALL, ALL); ksu_allow(db, "init", KERNEL_SU_DOMAIN, ALL, ALL);
// we need to umount modules in zygote // we need to umount modules in zygote
ksu_allow(db, "zygote", "adb_data_file", "dir", "search"); ksu_allow(db, "zygote", "adb_data_file", "dir", "search");
@@ -139,14 +139,18 @@ void apply_kernelsu_rules(void)
ksu_allow(db, "system_server", KERNEL_SU_DOMAIN, "process", "getpgid"); ksu_allow(db, "system_server", KERNEL_SU_DOMAIN, "process", "getpgid");
ksu_allow(db, "system_server", KERNEL_SU_DOMAIN, "process", "sigkill"); ksu_allow(db, "system_server", KERNEL_SU_DOMAIN, "process", "sigkill");
// https://android-review.googlesource.com/c/platform/system/logging/+/3725346
ksu_dontaudit(db, "untrusted_app", KERNEL_SU_DOMAIN, "dir", "getattr");
#ifdef CONFIG_KSU_SUSFS #ifdef CONFIG_KSU_SUSFS
// Allow umount in zygote process without installing zygisk // Allow umount in zygote process without installing zygisk
// ksu_allow(db, "zygote", "labeledfs", "filesystem", "unmount"); ksu_allow(db, "zygote", "labeledfs", "filesystem", "unmount");
susfs_set_priv_app_sid(); susfs_set_kernel_sid();
susfs_set_init_sid(); susfs_set_init_sid();
susfs_set_ksu_sid(); susfs_set_ksu_sid();
susfs_set_zygote_sid(); susfs_set_zygote_sid();
#endif #endif
mutex_unlock(&ksu_rules); mutex_unlock(&ksu_rules);
} }
@@ -162,20 +166,43 @@ void apply_kernelsu_rules(void)
#define CMD_TYPE_CHANGE 8 #define CMD_TYPE_CHANGE 8
#define CMD_GENFSCON 9 #define CMD_GENFSCON 9
// keep it!
extern bool ksu_is_compat __read_mostly;
// armv7l kernel compat
#ifdef CONFIG_64BIT
#define usize u64
#else
#define usize u32
#endif
struct sepol_data { struct sepol_data {
u32 cmd; u32 cmd;
u32 subcmd; u32 subcmd;
u64 sepol1; usize field_sepol1;
u64 sepol2; usize field_sepol2;
u64 sepol3; usize field_sepol3;
u64 sepol4; usize field_sepol4;
u64 sepol5; usize field_sepol5;
u64 sepol6; usize field_sepol6;
u64 sepol7; usize field_sepol7;
};
// ksud 32-bit on arm64 kernel
struct __maybe_unused sepol_data_compat {
u32 cmd;
u32 subcmd;
u32 field_sepol1;
u32 field_sepol2;
u32 field_sepol3;
u32 field_sepol4;
u32 field_sepol5;
u32 field_sepol6;
u32 field_sepol7;
}; };
static int get_object(char *buf, char __user *user_object, size_t buf_sz, static int get_object(char *buf, char __user *user_object, size_t buf_sz,
char **object) char **object)
{ {
if (!user_object) { if (!user_object) {
*object = ALL; *object = ALL;
@@ -183,23 +210,18 @@ static int get_object(char *buf, char __user *user_object, size_t buf_sz,
} }
if (strncpy_from_user(buf, user_object, buf_sz) < 0) { if (strncpy_from_user(buf, user_object, buf_sz) < 0) {
return -EINVAL; return -1;
} }
*object = buf; *object = buf;
return 0; return 0;
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0) || \
!defined(KSU_COMPAT_USE_SELINUX_STATE)
extern int avc_ss_reset(u32 seqno);
#else
extern int avc_ss_reset(struct selinux_avc *avc, u32 seqno);
#endif
// reset avc cache table, otherwise the new rules will not take effect if already denied // reset avc cache table, otherwise the new rules will not take effect if already denied
static void reset_avc_cache(void) static void reset_avc_cache(void)
{ {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0) || \ #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0) || \
!defined(KSU_COMPAT_USE_SELINUX_STATE) !defined(KSU_COMPAT_USE_SELINUX_STATE)
avc_ss_reset(0); avc_ss_reset(0);
selnl_notify_policyload(0); selnl_notify_policyload(0);
@@ -218,61 +240,83 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
struct policydb *db; struct policydb *db;
if (!arg4) { if (!arg4) {
return -EINVAL; return -1;
} }
if (!getenforce()) { if (!getenforce()) {
pr_info("SELinux permissive or disabled when handle policy!\n"); pr_info("SELinux permissive or disabled when handle policy!\n");
} }
struct sepol_data data; u32 cmd, subcmd;
if (copy_from_user(&data, arg4, sizeof(struct sepol_data))) { char __user *sepol1, *sepol2, *sepol3, *sepol4, *sepol5, *sepol6, *sepol7;
pr_err("sepol: copy sepol_data failed.\n");
return -EINVAL;
}
u32 cmd = data.cmd; if (unlikely(ksu_is_compat)) {
u32 subcmd = data.subcmd; struct sepol_data_compat data_compat;
if (copy_from_user(&data_compat, arg4, sizeof(struct sepol_data_compat))) {
pr_err("sepol: copy sepol_data failed.\n");
return -1;
}
pr_info("sepol: running in compat mode!\n");
sepol1 = compat_ptr(data_compat.field_sepol1);
sepol2 = compat_ptr(data_compat.field_sepol2);
sepol3 = compat_ptr(data_compat.field_sepol3);
sepol4 = compat_ptr(data_compat.field_sepol4);
sepol5 = compat_ptr(data_compat.field_sepol5);
sepol6 = compat_ptr(data_compat.field_sepol6);
sepol7 = compat_ptr(data_compat.field_sepol7);
cmd = data_compat.cmd;
subcmd = data_compat.subcmd;
} else {
struct sepol_data data;
if (copy_from_user(&data, arg4, sizeof(struct sepol_data))) {
pr_err("sepol: copy sepol_data failed.\n");
return -1;
}
sepol1 = data.field_sepol1;
sepol2 = data.field_sepol2;
sepol3 = data.field_sepol3;
sepol4 = data.field_sepol4;
sepol5 = data.field_sepol5;
sepol6 = data.field_sepol6;
sepol7 = data.field_sepol7;
cmd = data.cmd;
subcmd = data.subcmd;
}
mutex_lock(&ksu_rules); mutex_lock(&ksu_rules);
db = get_policydb(); db = get_policydb();
int ret = -EINVAL; int ret = -1;
switch (cmd) { if (cmd == CMD_NORMAL_PERM) {
case CMD_NORMAL_PERM: {
char src_buf[MAX_SEPOL_LEN]; char src_buf[MAX_SEPOL_LEN];
char tgt_buf[MAX_SEPOL_LEN]; char tgt_buf[MAX_SEPOL_LEN];
char cls_buf[MAX_SEPOL_LEN]; char cls_buf[MAX_SEPOL_LEN];
char perm_buf[MAX_SEPOL_LEN]; char perm_buf[MAX_SEPOL_LEN];
char *s, *t, *c, *p; char *s, *t, *c, *p;
if (get_object(src_buf, (void __user *)data.sepol1, if (get_object(src_buf, sepol1, sizeof(src_buf), &s) < 0) {
sizeof(src_buf), &s) < 0) {
pr_err("sepol: copy src failed.\n"); pr_err("sepol: copy src failed.\n");
goto exit; goto exit;
} }
if (get_object(tgt_buf, (void __user *)data.sepol2, if (get_object(tgt_buf, sepol2, sizeof(tgt_buf), &t) < 0) {
sizeof(tgt_buf), &t) < 0) {
pr_err("sepol: copy tgt failed.\n"); pr_err("sepol: copy tgt failed.\n");
goto exit; goto exit;
} }
if (get_object(cls_buf, (void __user *)data.sepol3, if (get_object(cls_buf, sepol3, sizeof(cls_buf), &c) < 0) {
sizeof(cls_buf), &c) < 0) {
pr_err("sepol: copy cls failed.\n"); pr_err("sepol: copy cls failed.\n");
goto exit; goto exit;
} }
if (get_object(perm_buf, (void __user *)data.sepol4, if (get_object(perm_buf, sepol4, sizeof(perm_buf), &p) <
sizeof(perm_buf), &p) < 0) { 0) {
pr_err("sepol: copy perm failed.\n"); pr_err("sepol: copy perm failed.\n");
goto exit; goto exit;
} }
bool success = false; bool success = false;
if (subcmd == 1) { if (subcmd == 1) {
success = ksu_allow(db, s, t, c, p); success = ksu_allow(db, s, t, c, p);
} else if (subcmd == 2) { } else if (subcmd == 2) {
@@ -284,10 +328,9 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
} else { } else {
pr_err("sepol: unknown subcmd: %d\n", subcmd); pr_err("sepol: unknown subcmd: %d\n", subcmd);
} }
ret = success ? 0 : -EINVAL; ret = success ? 0 : -1;
break;
} } else if (cmd == CMD_XPERM) {
case CMD_XPERM: {
char src_buf[MAX_SEPOL_LEN]; char src_buf[MAX_SEPOL_LEN];
char tgt_buf[MAX_SEPOL_LEN]; char tgt_buf[MAX_SEPOL_LEN];
char cls_buf[MAX_SEPOL_LEN]; char cls_buf[MAX_SEPOL_LEN];
@@ -297,28 +340,25 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
char perm_set[MAX_SEPOL_LEN]; char perm_set[MAX_SEPOL_LEN];
char *s, *t, *c; char *s, *t, *c;
if (get_object(src_buf, (void __user *)data.sepol1, if (get_object(src_buf, sepol1, sizeof(src_buf), &s) < 0) {
sizeof(src_buf), &s) < 0) {
pr_err("sepol: copy src failed.\n"); pr_err("sepol: copy src failed.\n");
goto exit; goto exit;
} }
if (get_object(tgt_buf, (void __user *)data.sepol2, if (get_object(tgt_buf, sepol2, sizeof(tgt_buf), &t) < 0) {
sizeof(tgt_buf), &t) < 0) {
pr_err("sepol: copy tgt failed.\n"); pr_err("sepol: copy tgt failed.\n");
goto exit; goto exit;
} }
if (get_object(cls_buf, (void __user *)data.sepol3, if (get_object(cls_buf, sepol3, sizeof(cls_buf), &c) < 0) {
sizeof(cls_buf), &c) < 0) {
pr_err("sepol: copy cls failed.\n"); pr_err("sepol: copy cls failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(operation, (void __user *)data.sepol4, if (strncpy_from_user(operation, sepol4,
sizeof(operation)) < 0) { sizeof(operation)) < 0) {
pr_err("sepol: copy operation failed.\n"); pr_err("sepol: copy operation failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(perm_set, (void __user *)data.sepol5, if (strncpy_from_user(perm_set, sepol5, sizeof(perm_set)) <
sizeof(perm_set)) < 0) { 0) {
pr_err("sepol: copy perm_set failed.\n"); pr_err("sepol: copy perm_set failed.\n");
goto exit; goto exit;
} }
@@ -333,14 +373,11 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
} else { } else {
pr_err("sepol: unknown subcmd: %d\n", subcmd); pr_err("sepol: unknown subcmd: %d\n", subcmd);
} }
ret = success ? 0 : -EINVAL; ret = success ? 0 : -1;
break; } else if (cmd == CMD_TYPE_STATE) {
}
case CMD_TYPE_STATE: {
char src[MAX_SEPOL_LEN]; char src[MAX_SEPOL_LEN];
if (strncpy_from_user(src, (void __user *)data.sepol1, if (strncpy_from_user(src, sepol1, sizeof(src)) < 0) {
sizeof(src)) < 0) {
pr_err("sepol: copy src failed.\n"); pr_err("sepol: copy src failed.\n");
goto exit; goto exit;
} }
@@ -355,20 +392,16 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
} }
if (success) if (success)
ret = 0; ret = 0;
break;
} } else if (cmd == CMD_TYPE || cmd == CMD_TYPE_ATTR) {
case CMD_TYPE:
case CMD_TYPE_ATTR: {
char type[MAX_SEPOL_LEN]; char type[MAX_SEPOL_LEN];
char attr[MAX_SEPOL_LEN]; char attr[MAX_SEPOL_LEN];
if (strncpy_from_user(type, (void __user *)data.sepol1, if (strncpy_from_user(type, sepol1, sizeof(type)) < 0) {
sizeof(type)) < 0) {
pr_err("sepol: copy type failed.\n"); pr_err("sepol: copy type failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(attr, (void __user *)data.sepol2, if (strncpy_from_user(attr, sepol2, sizeof(attr)) < 0) {
sizeof(attr)) < 0) {
pr_err("sepol: copy attr failed.\n"); pr_err("sepol: copy attr failed.\n");
goto exit; goto exit;
} }
@@ -384,13 +417,11 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
goto exit; goto exit;
} }
ret = 0; ret = 0;
break;
} } else if (cmd == CMD_ATTR) {
case CMD_ATTR: {
char attr[MAX_SEPOL_LEN]; char attr[MAX_SEPOL_LEN];
if (strncpy_from_user(attr, (void __user *)data.sepol1, if (strncpy_from_user(attr, sepol1, sizeof(attr)) < 0) {
sizeof(attr)) < 0) {
pr_err("sepol: copy attr failed.\n"); pr_err("sepol: copy attr failed.\n");
goto exit; goto exit;
} }
@@ -399,42 +430,37 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
goto exit; goto exit;
} }
ret = 0; ret = 0;
break;
} } else if (cmd == CMD_TYPE_TRANSITION) {
case CMD_TYPE_TRANSITION: {
char src[MAX_SEPOL_LEN]; char src[MAX_SEPOL_LEN];
char tgt[MAX_SEPOL_LEN]; char tgt[MAX_SEPOL_LEN];
char cls[MAX_SEPOL_LEN]; char cls[MAX_SEPOL_LEN];
char default_type[MAX_SEPOL_LEN]; char default_type[MAX_SEPOL_LEN];
char object[MAX_SEPOL_LEN]; char object[MAX_SEPOL_LEN];
if (strncpy_from_user(src, (void __user *)data.sepol1, if (strncpy_from_user(src, sepol1, sizeof(src)) < 0) {
sizeof(src)) < 0) {
pr_err("sepol: copy src failed.\n"); pr_err("sepol: copy src failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(tgt, (void __user *)data.sepol2, if (strncpy_from_user(tgt, sepol2, sizeof(tgt)) < 0) {
sizeof(tgt)) < 0) {
pr_err("sepol: copy tgt failed.\n"); pr_err("sepol: copy tgt failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(cls, (void __user *)data.sepol3, if (strncpy_from_user(cls, sepol3, sizeof(cls)) < 0) {
sizeof(cls)) < 0) {
pr_err("sepol: copy cls failed.\n"); pr_err("sepol: copy cls failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(default_type, (void __user *)data.sepol4, if (strncpy_from_user(default_type, sepol4,
sizeof(default_type)) < 0) { sizeof(default_type)) < 0) {
pr_err("sepol: copy default_type failed.\n"); pr_err("sepol: copy default_type failed.\n");
goto exit; goto exit;
} }
char *real_object; char *real_object;
if ((void __user *)data.sepol5 == NULL) { if (sepol5 == NULL) {
real_object = NULL; real_object = NULL;
} else { } else {
if (strncpy_from_user(object, if (strncpy_from_user(object, sepol5,
(void __user *)data.sepol5, sizeof(object)) < 0) {
sizeof(object)) < 0) {
pr_err("sepol: copy object failed.\n"); pr_err("sepol: copy object failed.\n");
goto exit; goto exit;
} }
@@ -445,31 +471,27 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
default_type, real_object); default_type, real_object);
if (success) if (success)
ret = 0; ret = 0;
break;
} } else if (cmd == CMD_TYPE_CHANGE) {
case CMD_TYPE_CHANGE: {
char src[MAX_SEPOL_LEN]; char src[MAX_SEPOL_LEN];
char tgt[MAX_SEPOL_LEN]; char tgt[MAX_SEPOL_LEN];
char cls[MAX_SEPOL_LEN]; char cls[MAX_SEPOL_LEN];
char default_type[MAX_SEPOL_LEN]; char default_type[MAX_SEPOL_LEN];
if (strncpy_from_user(src, (void __user *)data.sepol1, if (strncpy_from_user(src, sepol1, sizeof(src)) < 0) {
sizeof(src)) < 0) {
pr_err("sepol: copy src failed.\n"); pr_err("sepol: copy src failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(tgt, (void __user *)data.sepol2, if (strncpy_from_user(tgt, sepol2, sizeof(tgt)) < 0) {
sizeof(tgt)) < 0) {
pr_err("sepol: copy tgt failed.\n"); pr_err("sepol: copy tgt failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(cls, (void __user *)data.sepol3, if (strncpy_from_user(cls, sepol3, sizeof(cls)) < 0) {
sizeof(cls)) < 0) {
pr_err("sepol: copy cls failed.\n"); pr_err("sepol: copy cls failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(default_type, (void __user *)data.sepol4, if (strncpy_from_user(default_type, sepol4,
sizeof(default_type)) < 0) { sizeof(default_type)) < 0) {
pr_err("sepol: copy default_type failed.\n"); pr_err("sepol: copy default_type failed.\n");
goto exit; goto exit;
} }
@@ -485,24 +507,20 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
} }
if (success) if (success)
ret = 0; ret = 0;
break; } else if (cmd == CMD_GENFSCON) {
}
case CMD_GENFSCON: {
char name[MAX_SEPOL_LEN]; char name[MAX_SEPOL_LEN];
char path[MAX_SEPOL_LEN]; char path[MAX_SEPOL_LEN];
char context[MAX_SEPOL_LEN]; char context[MAX_SEPOL_LEN];
if (strncpy_from_user(name, (void __user *)data.sepol1, if (strncpy_from_user(name, sepol1, sizeof(name)) < 0) {
sizeof(name)) < 0) {
pr_err("sepol: copy name failed.\n"); pr_err("sepol: copy name failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(path, (void __user *)data.sepol2, if (strncpy_from_user(path, sepol2, sizeof(path)) < 0) {
sizeof(path)) < 0) {
pr_err("sepol: copy path failed.\n"); pr_err("sepol: copy path failed.\n");
goto exit; goto exit;
} }
if (strncpy_from_user(context, (void __user *)data.sepol3, if (strncpy_from_user(context, sepol3, sizeof(context)) <
sizeof(context)) < 0) { 0) {
pr_err("sepol: copy context failed.\n"); pr_err("sepol: copy context failed.\n");
goto exit; goto exit;
} }
@@ -512,12 +530,8 @@ int handle_sepolicy(unsigned long arg3, void __user *arg4)
goto exit; goto exit;
} }
ret = 0; ret = 0;
break; } else {
}
default: {
pr_err("sepol: unknown cmd: %d\n", cmd); pr_err("sepol: unknown cmd: %d\n", cmd);
break;
}
} }
exit: exit:

View File

@@ -1,12 +1,19 @@
#include "linux/cred.h" #include <linux/version.h>
#include "linux/sched.h"
#include "linux/security.h"
#include "linux/version.h"
#include "selinux_defs.h" #include "selinux_defs.h"
#include "../klog.h" // IWYU pragma: keep #include "../klog.h" // IWYU pragma: keep
#define KERNEL_SU_DOMAIN "u:r:su:s0" #define KERNEL_SU_DOMAIN "u:r:su:s0"
#ifdef CONFIG_KSU_SUSFS
#define KERNEL_INIT_DOMAIN "u:r:init:s0"
#define KERNEL_ZYGOTE_DOMAIN "u:r:zygote:s0"
#define KERNEL_KERNEL_DOMAIN "u:r:kernel:s0"
u32 susfs_ksu_sid = 0;
u32 susfs_init_sid = 0;
u32 susfs_zygote_sid = 0;
u32 susfs_kernel_sid = 0;
#endif
static int transive_to_domain(const char *domain) static int transive_to_domain(const char *domain)
{ {
struct cred *cred; struct cred *cred;
@@ -27,19 +34,20 @@ static int transive_to_domain(const char *domain)
pr_info("security_secctx_to_secid %s -> sid: %d, error: %d\n", pr_info("security_secctx_to_secid %s -> sid: %d, error: %d\n",
domain, sid, error); domain, sid, error);
} }
if (!error) { if (!error) {
tsec->sid = sid; tsec->sid = sid;
tsec->create_sid = 0; tsec->create_sid = 0;
tsec->keycreate_sid = 0; tsec->keycreate_sid = 0;
tsec->sockcreate_sid = 0; tsec->sockcreate_sid = 0;
} }
return error; return error;
} }
#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 19, 0) #if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 19, 0)
bool __maybe_unused bool __maybe_unused is_ksu_transition(const struct task_security_struct *old_tsec,
is_ksu_transition(const struct task_security_struct *old_tsec, const struct task_security_struct *new_tsec)
const struct task_security_struct *new_tsec)
{ {
static u32 ksu_sid; static u32 ksu_sid;
char *secdata; char *secdata;
@@ -47,8 +55,7 @@ is_ksu_transition(const struct task_security_struct *old_tsec,
bool allowed = false; bool allowed = false;
if (!ksu_sid) if (!ksu_sid)
security_secctx_to_secid(KERNEL_SU_DOMAIN, security_secctx_to_secid(KERNEL_SU_DOMAIN, strlen(KERNEL_SU_DOMAIN), &ksu_sid);
strlen(KERNEL_SU_DOMAIN), &ksu_sid);
if (security_secid_to_secctx(old_tsec->sid, &secdata, &seclen)) if (security_secid_to_secctx(old_tsec->sid, &secdata, &seclen))
return false; return false;
@@ -59,7 +66,6 @@ is_ksu_transition(const struct task_security_struct *old_tsec,
} }
#endif #endif
void setup_selinux(const char *domain) void setup_selinux(const char *domain)
{ {
if (transive_to_domain(domain)) { if (transive_to_domain(domain)) {
@@ -82,7 +88,7 @@ bool getenforce(void)
return __is_selinux_enforcing(); return __is_selinux_enforcing();
} }
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) && \ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) && \
!defined(KSU_COMPAT_HAS_CURRENT_SID) !defined(KSU_COMPAT_HAS_CURRENT_SID)
/* /*
* get the subjective security ID of the current task * get the subjective security ID of the current task
@@ -95,106 +101,44 @@ static inline u32 current_sid(void)
} }
#endif #endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 14, 0)
struct lsm_context {
char *context;
u32 len;
};
static int __security_secid_to_secctx(u32 secid, struct lsm_context *cp)
{
return security_secid_to_secctx(secid, &cp->context, &cp->len);
}
static void __security_release_secctx(struct lsm_context *cp)
{
security_release_secctx(cp->context, cp->len);
}
#else
#define __security_secid_to_secctx security_secid_to_secctx
#define __security_release_secctx security_release_secctx
#endif
bool is_task_ksu_domain(const struct cred *cred)
{
struct lsm_context ctx;
bool result;
if (!cred) {
return false;
}
const struct task_security_struct *tsec = __selinux_cred(cred);
if (!tsec) {
return false;
}
int err = __security_secid_to_secctx(tsec->sid, &ctx);
if (err) {
return false;
}
result = strncmp(KERNEL_SU_DOMAIN, ctx.context, ctx.len) == 0;
__security_release_secctx(&ctx);
return result;
}
bool is_ksu_domain(void) bool is_ksu_domain(void)
{ {
current_sid(); char *domain;
return is_task_ksu_domain(current_cred()); u32 seclen;
}
bool is_context(const struct cred *cred, const char *context)
{
if (!cred) {
return false;
}
const struct task_security_struct *tsec = __selinux_cred(cred);
if (!tsec) {
return false;
}
struct lsm_context ctx;
bool result; bool result;
int err = __security_secid_to_secctx(tsec->sid, &ctx);
int err = security_secid_to_secctx(current_sid(), &domain, &seclen);
if (err) { if (err) {
return false; return false;
} }
result = strncmp(context, ctx.context, ctx.len) == 0;
__security_release_secctx(&ctx); result = strncmp(KERNEL_SU_DOMAIN, domain, seclen) == 0;
security_release_secctx(domain, seclen);
return result; return result;
} }
bool is_zygote(const struct cred *cred) bool is_zygote(void *sec)
{ {
return is_context(cred, "u:r:zygote:s0"); struct task_security_struct *tsec = (struct task_security_struct *)sec;
} if (!tsec) {
return false;
bool is_init(const struct cred *cred)
{
return is_context(cred, "u:r:init:s0");
}
#define KSU_FILE_DOMAIN "u:object_r:ksu_file:s0"
u32 ksu_get_ksu_file_sid(void)
{
u32 ksu_file_sid = 0;
int err = security_secctx_to_secid(
KSU_FILE_DOMAIN, strlen(KSU_FILE_DOMAIN), &ksu_file_sid);
if (err) {
pr_info("get ksufile sid err %d\n", err);
} }
return ksu_file_sid;
char *domain;
u32 seclen;
bool result;
int err = security_secid_to_secctx(tsec->sid, &domain, &seclen);
if (err) {
return false;
}
result = strncmp("u:r:zygote:s0", domain, seclen) == 0;
security_release_secctx(domain, seclen);
return result;
} }
#ifdef CONFIG_KSU_SUSFS #ifdef CONFIG_KSU_SUSFS
#define KERNEL_INIT_DOMAIN "u:r:init:s0"
#define KERNEL_ZYGOTE_DOMAIN "u:r:zygote:s0"
#define KERNEL_PRIV_APP_DOMAIN "u:r:priv_app:s0:c512,c768"
#ifndef KERNEL_SU_DOMAIN
#define KERNEL_SU_DOMAIN "u:r:su:s0"
#endif // #ifndef KERNEL_SU_DOMAIN
u32 susfs_ksu_sid = 0;
u32 susfs_init_sid = 0;
u32 susfs_zygote_sid = 0;
u32 susfs_priv_app_sid = 0;
static inline void susfs_set_sid(const char *secctx_name, u32 *out_sid) static inline void susfs_set_sid(const char *secctx_name, u32 *out_sid)
{ {
int err; int err;
@@ -270,8 +214,22 @@ bool susfs_is_current_init_domain(void) {
return unlikely(current_sid() == susfs_init_sid); return unlikely(current_sid() == susfs_init_sid);
} }
void susfs_set_priv_app_sid(void) void susfs_set_kernel_sid(void)
{ {
susfs_set_sid(KERNEL_PRIV_APP_DOMAIN, &susfs_priv_app_sid); susfs_set_sid(KERNEL_KERNEL_DOMAIN, &susfs_kernel_sid);
} }
#endif #endif
#define DEVPTS_DOMAIN "u:object_r:ksu_file:s0"
u32 ksu_get_devpts_sid(void)
{
u32 devpts_sid = 0;
int err = security_secctx_to_secid(DEVPTS_DOMAIN, strlen(DEVPTS_DOMAIN),
&devpts_sid);
if (err)
pr_info("get devpts sid err %d\n", err);
return devpts_sid;
}

View File

@@ -1,12 +1,10 @@
#ifndef __KSU_H_SELINUX #ifndef __KSU_H_SELINUX
#define __KSU_H_SELINUX #define __KSU_H_SELINUX
#include "linux/types.h" #include <linux/types.h>
#include "linux/version.h" #include <linux/version.h>
#include "linux/cred.h"
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || defined(KSU_COMPAT_HAS_SELINUX_STATE)
defined(KSU_COMPAT_HAS_SELINUX_STATE)
#define KSU_COMPAT_USE_SELINUX_STATE #define KSU_COMPAT_USE_SELINUX_STATE
#endif #endif
@@ -16,21 +14,13 @@ void setenforce(bool);
bool getenforce(void); bool getenforce(void);
bool is_task_ksu_domain(const struct cred *cred);
bool is_ksu_domain(void); bool is_ksu_domain(void);
bool is_zygote(const struct cred *cred); bool is_zygote(void *cred);
bool is_init(const struct cred *cred);
void apply_kernelsu_rules(void); void apply_kernelsu_rules(void);
u32 ksu_get_ksu_file_sid(void); #ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
int handle_sepolicy(unsigned long arg3, void __user *arg4);
#ifdef CONFIG_KSU_SUSFS
bool susfs_is_sid_equal(void *sec, u32 sid2); bool susfs_is_sid_equal(void *sec, u32 sid2);
u32 susfs_get_sid_from_name(const char *secctx_name); u32 susfs_get_sid_from_name(const char *secctx_name);
u32 susfs_get_current_sid(void); u32 susfs_get_current_sid(void);
@@ -40,7 +30,8 @@ void susfs_set_ksu_sid(void);
bool susfs_is_current_ksu_domain(void); bool susfs_is_current_ksu_domain(void);
void susfs_set_init_sid(void); void susfs_set_init_sid(void);
bool susfs_is_current_init_domain(void); bool susfs_is_current_init_domain(void);
void susfs_set_priv_app_sid(void); #endif
#endif // #ifdef CONFIG_KSU_SUSFS
u32 ksu_get_devpts_sid(void);
#endif #endif

View File

@@ -12,31 +12,25 @@
#ifdef CONFIG_SECURITY_SELINUX_DISABLE #ifdef CONFIG_SECURITY_SELINUX_DISABLE
#ifdef KSU_COMPAT_USE_SELINUX_STATE #ifdef KSU_COMPAT_USE_SELINUX_STATE
#define is_selinux_disabled() (selinux_state.disabled) #define is_selinux_disabled() (selinux_state.disabled)
#else #else
#define is_selinux_disabled() (selinux_disabled) #define is_selinux_disabled() (selinux_disabled)
#endif #endif
#else #else
#define is_selinux_disabled() (0) #define is_selinux_disabled() (0)
#endif #endif
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP #ifdef CONFIG_SECURITY_SELINUX_DEVELOP
#ifdef KSU_COMPAT_USE_SELINUX_STATE #ifdef KSU_COMPAT_USE_SELINUX_STATE
#define __is_selinux_enforcing() (selinux_state.enforcing) #define __is_selinux_enforcing() (selinux_state.enforcing)
#define __setenforce(val) selinux_state.enforcing = val #define __setenforce(val) selinux_state.enforcing = val
#elif defined(SAMSUNG_SELINUX_PORTING) || !defined(KSU_COMPAT_USE_SELINUX_STATE) #elif defined(SAMSUNG_SELINUX_PORTING) || !defined(KSU_COMPAT_USE_SELINUX_STATE)
#define __is_selinux_enforcing() (selinux_enforcing) #define __is_selinux_enforcing() (selinux_enforcing)
#define __setenforce(val) selinux_enforcing = val #define __setenforce(val) selinux_enforcing = val
#endif #endif
#else #else
#define __is_selinux_enforcing() (1) #define __is_selinux_enforcing() (1)
#define __setenforce(val) #define __setenforce(val)
#endif #endif
#ifdef KSU_OPTIONAL_SELINUX_CRED
#define __selinux_cred(cred) (selinux_cred(cred))
#else
#define __selinux_cred(cred) (cred->security)
#endif
#endif #endif

View File

@@ -19,16 +19,16 @@ static struct avtab_node *get_avtab_node(struct policydb *db,
struct avtab_extended_perms *xperms); struct avtab_extended_perms *xperms);
static bool add_rule(struct policydb *db, const char *s, const char *t, static bool add_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *p, int effect, bool invert); const char *c, const char *p, int effect, bool invert);
static void add_rule_raw(struct policydb *db, struct type_datum *src, static void add_rule_raw(struct policydb *db, struct type_datum *src,
struct type_datum *tgt, struct class_datum *cls, struct type_datum *tgt, struct class_datum *cls,
struct perm_datum *perm, int effect, bool invert); struct perm_datum *perm, int effect, bool invert);
static void add_xperm_rule_raw(struct policydb *db, struct type_datum *src, static void add_xperm_rule_raw(struct policydb *db, struct type_datum *src,
struct type_datum *tgt, struct class_datum *cls, struct type_datum *tgt, struct class_datum *cls,
uint16_t low, uint16_t high, int effect, uint16_t low, uint16_t high, int effect,
bool invert); bool invert);
static bool add_xperm_rule(struct policydb *db, const char *s, const char *t, static bool add_xperm_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *range, int effect, const char *c, const char *range, int effect,
bool invert); bool invert);
@@ -37,8 +37,8 @@ static bool add_type_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *d, int effect); const char *c, const char *d, int effect);
static bool add_filename_trans(struct policydb *db, const char *s, static bool add_filename_trans(struct policydb *db, const char *s,
const char *t, const char *c, const char *d, const char *t, const char *c, const char *d,
const char *o); const char *o);
static bool add_genfscon(struct policydb *db, const char *fs_name, static bool add_genfscon(struct policydb *db, const char *fs_name,
const char *path, const char *context); const char *path, const char *context);
@@ -52,7 +52,7 @@ static void add_typeattribute_raw(struct policydb *db, struct type_datum *type,
struct type_datum *attr); struct type_datum *attr);
static bool add_typeattribute(struct policydb *db, const char *type, static bool add_typeattribute(struct policydb *db, const char *type,
const char *attr); const char *attr);
////////////////////////////////////////////////////// //////////////////////////////////////////////////////
// Implementation // Implementation
@@ -62,18 +62,18 @@ static bool add_typeattribute(struct policydb *db, const char *type,
// rules // rules
#define strip_av(effect, invert) ((effect == AVTAB_AUDITDENY) == !invert) #define strip_av(effect, invert) ((effect == AVTAB_AUDITDENY) == !invert)
#define ksu_hash_for_each(node_ptr, n_slot, cur) \ #define ksu_hash_for_each(node_ptr, n_slot, cur) \
int i; \ int i; \
for (i = 0; i < n_slot; ++i) \ for (i = 0; i < n_slot; ++i) \
for (cur = node_ptr[i]; cur; cur = cur->next) for (cur = node_ptr[i]; cur; cur = cur->next)
// htable is a struct instead of pointer above 5.8.0: // htable is a struct instead of pointer above 5.8.0:
// https://elixir.bootlin.com/linux/v5.8-rc1/source/security/selinux/ss/symtab.h // https://elixir.bootlin.com/linux/v5.8-rc1/source/security/selinux/ss/symtab.h
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
#define ksu_hashtab_for_each(htab, cur) \ #define ksu_hashtab_for_each(htab, cur) \
ksu_hash_for_each(htab.htable, htab.size, cur) ksu_hash_for_each(htab.htable, htab.size, cur)
#else #else
#define ksu_hashtab_for_each(htab, cur) \ #define ksu_hashtab_for_each(htab, cur) \
ksu_hash_for_each(htab->htable, htab->size, cur) ksu_hash_for_each(htab->htable, htab->size, cur)
#endif #endif
@@ -84,7 +84,7 @@ static bool add_typeattribute(struct policydb *db, const char *type,
#define symtab_insert(s, name, datum) hashtab_insert((s)->table, name, datum) #define symtab_insert(s, name, datum) hashtab_insert((s)->table, name, datum)
#endif #endif
#define avtab_for_each(avtab, cur) \ #define avtab_for_each(avtab, cur) \
ksu_hash_for_each(avtab.htable, avtab.nslot, cur); ksu_hash_for_each(avtab.htable, avtab.nslot, cur);
static struct avtab_node *get_avtab_node(struct policydb *db, static struct avtab_node *get_avtab_node(struct policydb *db,
@@ -99,8 +99,8 @@ static struct avtab_node *get_avtab_node(struct policydb *db,
node = avtab_search_node(&db->te_avtab, key); node = avtab_search_node(&db->te_avtab, key);
while (node) { while (node) {
if ((node->datum.u.xperms->specified == if ((node->datum.u.xperms->specified ==
xperms->specified) && xperms->specified) &&
(node->datum.u.xperms->driver == xperms->driver)) { (node->datum.u.xperms->driver == xperms->driver)) {
match = true; match = true;
break; break;
} }
@@ -115,9 +115,9 @@ static struct avtab_node *get_avtab_node(struct policydb *db,
if (!node) { if (!node) {
struct avtab_datum avdatum = {}; struct avtab_datum avdatum = {};
/* /*
* AUDITDENY, aka DONTAUDIT, are &= assigned, versus |= for * AUDITDENY, aka DONTAUDIT, are &= assigned, versus |= for
* others. Initialize the data accordingly. * others. Initialize the data accordingly.
*/ */
if (key->specified & AVTAB_XPERMS) { if (key->specified & AVTAB_XPERMS) {
avdatum.u.xperms = xperms; avdatum.u.xperms = xperms;
} else { } else {
@@ -133,7 +133,7 @@ static struct avtab_node *get_avtab_node(struct policydb *db,
grow_size += sizeof(u8); grow_size += sizeof(u8);
grow_size += sizeof(u8); grow_size += sizeof(u8);
grow_size += sizeof(u32) * grow_size += sizeof(u32) *
ARRAY_SIZE(avdatum.u.xperms->perms.p); ARRAY_SIZE(avdatum.u.xperms->perms.p);
} }
db->len += grow_size; db->len += grow_size;
} }
@@ -142,7 +142,7 @@ static struct avtab_node *get_avtab_node(struct policydb *db,
} }
static bool add_rule(struct policydb *db, const char *s, const char *t, static bool add_rule(struct policydb *db, const char *s, const char *t,
const char *c, const char *p, int effect, bool invert) const char *c, const char *p, int effect, bool invert)
{ {
struct type_datum *src = NULL, *tgt = NULL; struct type_datum *src = NULL, *tgt = NULL;
struct class_datum *cls = NULL; struct class_datum *cls = NULL;
@@ -202,8 +202,8 @@ static void add_rule_raw(struct policydb *db, struct type_datum *src,
ksu_hashtab_for_each(db->p_types.table, node) ksu_hashtab_for_each(db->p_types.table, node)
{ {
add_rule_raw(db, add_rule_raw(db,
(struct type_datum *)node->datum, (struct type_datum *)node->datum,
tgt, cls, perm, effect, invert); tgt, cls, perm, effect, invert);
}; };
} else { } else {
ksu_hashtab_for_each(db->p_types.table, node) ksu_hashtab_for_each(db->p_types.table, node)
@@ -212,7 +212,7 @@ static void add_rule_raw(struct policydb *db, struct type_datum *src,
(struct type_datum *)(node->datum); (struct type_datum *)(node->datum);
if (type->attribute) { if (type->attribute) {
add_rule_raw(db, type, tgt, cls, perm, add_rule_raw(db, type, tgt, cls, perm,
effect, invert); effect, invert);
} }
}; };
} }
@@ -222,8 +222,8 @@ static void add_rule_raw(struct policydb *db, struct type_datum *src,
ksu_hashtab_for_each(db->p_types.table, node) ksu_hashtab_for_each(db->p_types.table, node)
{ {
add_rule_raw(db, src, add_rule_raw(db, src,
(struct type_datum *)node->datum, (struct type_datum *)node->datum,
cls, perm, effect, invert); cls, perm, effect, invert);
}; };
} else { } else {
ksu_hashtab_for_each(db->p_types.table, node) ksu_hashtab_for_each(db->p_types.table, node)
@@ -232,7 +232,7 @@ static void add_rule_raw(struct policydb *db, struct type_datum *src,
(struct type_datum *)(node->datum); (struct type_datum *)(node->datum);
if (type->attribute) { if (type->attribute) {
add_rule_raw(db, src, type, cls, perm, add_rule_raw(db, src, type, cls, perm,
effect, invert); effect, invert);
} }
}; };
} }
@@ -241,8 +241,8 @@ static void add_rule_raw(struct policydb *db, struct type_datum *src,
ksu_hashtab_for_each(db->p_classes.table, node) ksu_hashtab_for_each(db->p_classes.table, node)
{ {
add_rule_raw(db, src, tgt, add_rule_raw(db, src, tgt,
(struct class_datum *)node->datum, perm, (struct class_datum *)node->datum, perm,
effect, invert); effect, invert);
} }
} else { } else {
struct avtab_key key; struct avtab_key key;
@@ -275,9 +275,9 @@ static void add_rule_raw(struct policydb *db, struct type_datum *src,
#define xperm_clear(x, p) (p[x >> 5] &= ~(1 << (x & 0x1f))) #define xperm_clear(x, p) (p[x >> 5] &= ~(1 << (x & 0x1f)))
static void add_xperm_rule_raw(struct policydb *db, struct type_datum *src, static void add_xperm_rule_raw(struct policydb *db, struct type_datum *src,
struct type_datum *tgt, struct class_datum *cls, struct type_datum *tgt, struct class_datum *cls,
uint16_t low, uint16_t high, int effect, uint16_t low, uint16_t high, int effect,
bool invert) bool invert)
{ {
if (src == NULL) { if (src == NULL) {
struct hashtab_node *node; struct hashtab_node *node;
@@ -331,7 +331,7 @@ static void add_xperm_rule_raw(struct policydb *db, struct type_datum *src,
int i; int i;
if (xperms.specified == AVTAB_XPERMS_IOCTLDRIVER) { if (xperms.specified == AVTAB_XPERMS_IOCTLDRIVER) {
for (i = ioctl_driver(low); i <= ioctl_driver(high); for (i = ioctl_driver(low); i <= ioctl_driver(high);
++i) { ++i) {
if (invert) if (invert)
xperm_clear(i, xperms.perms.p); xperm_clear(i, xperms.perms.p);
else else
@@ -355,7 +355,7 @@ static void add_xperm_rule_raw(struct policydb *db, struct type_datum *src,
if (datum->u.xperms == NULL) { if (datum->u.xperms == NULL) {
datum->u.xperms = datum->u.xperms =
(struct avtab_extended_perms *)(kzalloc( (struct avtab_extended_perms *)(kmalloc(
sizeof(xperms), GFP_KERNEL)); sizeof(xperms), GFP_KERNEL));
if (!datum->u.xperms) { if (!datum->u.xperms) {
pr_err("alloc xperms failed\n"); pr_err("alloc xperms failed\n");
@@ -497,8 +497,8 @@ static const struct hashtab_key_params filenametr_key_params = {
#endif #endif
static bool add_filename_trans(struct policydb *db, const char *s, static bool add_filename_trans(struct policydb *db, const char *s,
const char *t, const char *c, const char *d, const char *t, const char *c, const char *d,
const char *o) const char *o)
{ {
struct type_datum *src, *tgt, *def; struct type_datum *src, *tgt, *def;
struct class_datum *cls; struct class_datum *cls;
@@ -553,16 +553,16 @@ static bool add_filename_trans(struct policydb *db, const char *s,
if (trans == NULL) { if (trans == NULL) {
trans = (struct filename_trans_datum *)kcalloc(sizeof(*trans), trans = (struct filename_trans_datum *)kcalloc(sizeof(*trans),
1, GFP_ATOMIC); 1, GFP_ATOMIC);
struct filename_trans_key *new_key = struct filename_trans_key *new_key =
(struct filename_trans_key *)kzalloc(sizeof(*new_key), (struct filename_trans_key *)kmalloc(sizeof(*new_key),
GFP_ATOMIC); GFP_ATOMIC);
*new_key = key; *new_key = key;
new_key->name = kstrdup(key.name, GFP_ATOMIC); new_key->name = kstrdup(key.name, GFP_ATOMIC);
trans->next = last; trans->next = last;
trans->otype = def->value; trans->otype = def->value;
hashtab_insert(&db->filename_trans, new_key, trans, hashtab_insert(&db->filename_trans, new_key, trans,
filenametr_key_params); filenametr_key_params);
} }
db->compat_filename_trans_count++; db->compat_filename_trans_count++;
@@ -579,13 +579,13 @@ static bool add_filename_trans(struct policydb *db, const char *s,
if (trans == NULL) { if (trans == NULL) {
trans = (struct filename_trans_datum *)kcalloc(sizeof(*trans), trans = (struct filename_trans_datum *)kcalloc(sizeof(*trans),
1, GFP_ATOMIC); 1, GFP_ATOMIC);
if (!trans) { if (!trans) {
pr_err("add_filename_trans: Failed to alloc datum\n"); pr_err("add_filename_trans: Failed to alloc datum\n");
return false; return false;
} }
struct filename_trans *new_key = struct filename_trans *new_key =
(struct filename_trans *)kzalloc(sizeof(*new_key), (struct filename_trans *)kmalloc(sizeof(*new_key),
GFP_ATOMIC); GFP_ATOMIC);
if (!new_key) { if (!new_key) {
pr_err("add_filename_trans: Failed to alloc new_key\n"); pr_err("add_filename_trans: Failed to alloc new_key\n");
@@ -598,7 +598,7 @@ static bool add_filename_trans(struct policydb *db, const char *s,
} }
return ebitmap_set_bit(&db->filename_trans_ttypes, src->value - 1, 1) == return ebitmap_set_bit(&db->filename_trans_ttypes, src->value - 1, 1) ==
0; 0;
#endif #endif
} }
@@ -635,7 +635,7 @@ static bool add_type(struct policydb *db, const char *type_name, bool attr)
u32 value = ++db->p_types.nprim; u32 value = ++db->p_types.nprim;
type = (struct type_datum *)kzalloc(sizeof(struct type_datum), type = (struct type_datum *)kzalloc(sizeof(struct type_datum),
GFP_ATOMIC); GFP_ATOMIC);
if (!type) { if (!type) {
pr_err("add_type: alloc type_datum failed.\n"); pr_err("add_type: alloc type_datum failed.\n");
return false; return false;
@@ -659,8 +659,8 @@ static bool add_type(struct policydb *db, const char *type_name, bool attr)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)
struct ebitmap *new_type_attr_map_array = struct ebitmap *new_type_attr_map_array =
ksu_realloc(db->type_attr_map_array, ksu_realloc(db->type_attr_map_array,
value * sizeof(struct ebitmap), value * sizeof(struct ebitmap),
(value - 1) * sizeof(struct ebitmap)); (value - 1) * sizeof(struct ebitmap));
if (!new_type_attr_map_array) { if (!new_type_attr_map_array) {
pr_err("add_type: alloc type_attr_map_array failed\n"); pr_err("add_type: alloc type_attr_map_array failed\n");
@@ -669,8 +669,8 @@ static bool add_type(struct policydb *db, const char *type_name, bool attr)
struct type_datum **new_type_val_to_struct = struct type_datum **new_type_val_to_struct =
ksu_realloc(db->type_val_to_struct, ksu_realloc(db->type_val_to_struct,
sizeof(*db->type_val_to_struct) * value, sizeof(*db->type_val_to_struct) * value,
sizeof(*db->type_val_to_struct) * (value - 1)); sizeof(*db->type_val_to_struct) * (value - 1));
if (!new_type_val_to_struct) { if (!new_type_val_to_struct) {
pr_err("add_type: alloc type_val_to_struct failed\n"); pr_err("add_type: alloc type_val_to_struct failed\n");
@@ -679,8 +679,8 @@ static bool add_type(struct policydb *db, const char *type_name, bool attr)
char **new_val_to_name_types = char **new_val_to_name_types =
ksu_realloc(db->sym_val_to_name[SYM_TYPES], ksu_realloc(db->sym_val_to_name[SYM_TYPES],
sizeof(char *) * value, sizeof(char *) * value,
sizeof(char *) * (value - 1)); sizeof(char *) * (value - 1));
if (!new_val_to_name_types) { if (!new_val_to_name_types) {
pr_err("add_type: alloc val_to_name failed\n"); pr_err("add_type: alloc val_to_name failed\n");
return false; return false;
@@ -809,7 +809,7 @@ static bool add_type(struct policydb *db, const char *type_name, bool attr)
old_elem = flex_array_get(db->type_attr_map_array, j); old_elem = flex_array_get(db->type_attr_map_array, j);
if (old_elem) if (old_elem)
flex_array_put(new_type_attr_map_array, j, old_elem, flex_array_put(new_type_attr_map_array, j, old_elem,
GFP_ATOMIC | __GFP_ZERO); GFP_ATOMIC | __GFP_ZERO);
} }
for (j = 0; j < db->type_val_to_struct_array->total_nr_elements; j++) { for (j = 0; j < db->type_val_to_struct_array->total_nr_elements; j++) {
@@ -880,7 +880,7 @@ static bool set_type_state(struct policydb *db, const char *type_name,
{ {
type = (struct type_datum *)(node->datum); type = (struct type_datum *)(node->datum);
if (ebitmap_set_bit(&db->permissive_map, type->value, if (ebitmap_set_bit(&db->permissive_map, type->value,
permissive)) permissive))
pr_info("Could not set bit in permissive map\n"); pr_info("Could not set bit in permissive map\n");
}; };
} else { } else {
@@ -891,7 +891,7 @@ static bool set_type_state(struct policydb *db, const char *type_name,
return false; return false;
} }
if (ebitmap_set_bit(&db->permissive_map, type->value, if (ebitmap_set_bit(&db->permissive_map, type->value,
permissive)) { permissive)) {
pr_info("Could not set bit in permissive map\n"); pr_info("Could not set bit in permissive map\n");
return false; return false;
} }
@@ -909,7 +909,7 @@ static void add_typeattribute_raw(struct policydb *db, struct type_datum *type,
* HISI_SELINUX_EBITMAP_RO is Huawei's unique features. * HISI_SELINUX_EBITMAP_RO is Huawei's unique features.
*/ */
struct ebitmap *sattr = &db->type_attr_map[type->value - 1], struct ebitmap *sattr = &db->type_attr_map[type->value - 1],
HISI_SELINUX_EBITMAP_RO; HISI_SELINUX_EBITMAP_RO;
#else #else
struct ebitmap *sattr = struct ebitmap *sattr =
flex_array_get(db->type_attr_map_array, type->value - 1); flex_array_get(db->type_attr_map_array, type->value - 1);
@@ -925,8 +925,8 @@ static void add_typeattribute_raw(struct policydb *db, struct type_datum *type,
for (n = cls->constraints; n; n = n->next) { for (n = cls->constraints; n; n = n->next) {
for (e = n->expr; e; e = e->next) { for (e = n->expr; e; e = e->next) {
if (e->expr_type == CEXPR_NAMES && if (e->expr_type == CEXPR_NAMES &&
ebitmap_get_bit(&e->type_names->types, ebitmap_get_bit(&e->type_names->types,
attr->value - 1)) { attr->value - 1)) {
ebitmap_set_bit(&e->names, ebitmap_set_bit(&e->names,
type->value - 1, 1); type->value - 1, 1);
} }
@@ -936,7 +936,7 @@ static void add_typeattribute_raw(struct policydb *db, struct type_datum *type,
} }
static bool add_typeattribute(struct policydb *db, const char *type, static bool add_typeattribute(struct policydb *db, const char *type,
const char *attr) const char *attr)
{ {
struct type_datum *type_d = symtab_search(&db->p_types, type); struct type_datum *type_d = symtab_search(&db->p_types, type);
if (type_d == NULL) { if (type_d == NULL) {
@@ -995,19 +995,19 @@ bool ksu_exists(struct policydb *db, const char *type)
// Access vector rules // Access vector rules
bool ksu_allow(struct policydb *db, const char *src, const char *tgt, bool ksu_allow(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm) const char *cls, const char *perm)
{ {
return add_rule(db, src, tgt, cls, perm, AVTAB_ALLOWED, false); return add_rule(db, src, tgt, cls, perm, AVTAB_ALLOWED, false);
} }
bool ksu_deny(struct policydb *db, const char *src, const char *tgt, bool ksu_deny(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm) const char *cls, const char *perm)
{ {
return add_rule(db, src, tgt, cls, perm, AVTAB_ALLOWED, true); return add_rule(db, src, tgt, cls, perm, AVTAB_ALLOWED, true);
} }
bool ksu_auditallow(struct policydb *db, const char *src, const char *tgt, bool ksu_auditallow(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm) const char *cls, const char *perm)
{ {
return add_rule(db, src, tgt, cls, perm, AVTAB_AUDITALLOW, false); return add_rule(db, src, tgt, cls, perm, AVTAB_AUDITALLOW, false);
} }
@@ -1019,24 +1019,24 @@ bool ksu_dontaudit(struct policydb *db, const char *src, const char *tgt,
// Extended permissions access vector rules // Extended permissions access vector rules
bool ksu_allowxperm(struct policydb *db, const char *src, const char *tgt, bool ksu_allowxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range) const char *cls, const char *range)
{ {
return add_xperm_rule(db, src, tgt, cls, range, AVTAB_XPERMS_ALLOWED, return add_xperm_rule(db, src, tgt, cls, range, AVTAB_XPERMS_ALLOWED,
false); false);
} }
bool ksu_auditallowxperm(struct policydb *db, const char *src, const char *tgt, bool ksu_auditallowxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range) const char *cls, const char *range)
{ {
return add_xperm_rule(db, src, tgt, cls, range, AVTAB_XPERMS_AUDITALLOW, return add_xperm_rule(db, src, tgt, cls, range, AVTAB_XPERMS_AUDITALLOW,
false); false);
} }
bool ksu_dontauditxperm(struct policydb *db, const char *src, const char *tgt, bool ksu_dontauditxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range) const char *cls, const char *range)
{ {
return add_xperm_rule(db, src, tgt, cls, range, AVTAB_XPERMS_DONTAUDIT, return add_xperm_rule(db, src, tgt, cls, range, AVTAB_XPERMS_DONTAUDIT,
false); false);
} }
// Type rules // Type rules
@@ -1051,13 +1051,13 @@ bool ksu_type_transition(struct policydb *db, const char *src, const char *tgt,
} }
bool ksu_type_change(struct policydb *db, const char *src, const char *tgt, bool ksu_type_change(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def) const char *cls, const char *def)
{ {
return add_type_rule(db, src, tgt, cls, def, AVTAB_CHANGE); return add_type_rule(db, src, tgt, cls, def, AVTAB_CHANGE);
} }
bool ksu_type_member(struct policydb *db, const char *src, const char *tgt, bool ksu_type_member(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def) const char *cls, const char *def)
{ {
return add_type_rule(db, src, tgt, cls, def, AVTAB_MEMBER); return add_type_rule(db, src, tgt, cls, def, AVTAB_MEMBER);
} }

View File

@@ -15,17 +15,17 @@ bool ksu_exists(struct policydb *db, const char *type);
// Access vector rules // Access vector rules
bool ksu_allow(struct policydb *db, const char *src, const char *tgt, bool ksu_allow(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm); const char *cls, const char *perm);
bool ksu_deny(struct policydb *db, const char *src, const char *tgt, bool ksu_deny(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm); const char *cls, const char *perm);
bool ksu_auditallow(struct policydb *db, const char *src, const char *tgt, bool ksu_auditallow(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm); const char *cls, const char *perm);
bool ksu_dontaudit(struct policydb *db, const char *src, const char *tgt, bool ksu_dontaudit(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *perm); const char *cls, const char *perm);
// Extended permissions access vector rules // Extended permissions access vector rules
bool ksu_allowxperm(struct policydb *db, const char *src, const char *tgt, bool ksu_allowxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range); const char *cls, const char *range);
bool ksu_auditallowxperm(struct policydb *db, const char *src, const char *tgt, bool ksu_auditallowxperm(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *range); const char *cls, const char *range);
bool ksu_dontauditxperm(struct policydb *db, const char *src, const char *tgt, bool ksu_dontauditxperm(struct policydb *db, const char *src, const char *tgt,
@@ -35,9 +35,9 @@ bool ksu_dontauditxperm(struct policydb *db, const char *src, const char *tgt,
bool ksu_type_transition(struct policydb *db, const char *src, const char *tgt, bool ksu_type_transition(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def, const char *obj); const char *cls, const char *def, const char *obj);
bool ksu_type_change(struct policydb *db, const char *src, const char *tgt, bool ksu_type_change(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def); const char *cls, const char *def);
bool ksu_type_member(struct policydb *db, const char *src, const char *tgt, bool ksu_type_member(struct policydb *db, const char *src, const char *tgt,
const char *cls, const char *def); const char *cls, const char *def);
// File system labeling // File system labeling
bool ksu_genfscon(struct policydb *db, const char *fs_name, const char *path, bool ksu_genfscon(struct policydb *db, const char *fs_name, const char *path,

View File

@@ -1,328 +0,0 @@
#include <linux/compiler.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
#include <linux/sched/signal.h>
#endif
#include <linux/slab.h>
#include <linux/task_work.h>
#include <linux/thread_info.h>
#include <linux/seccomp.h>
#include <linux/capability.h>
#include <linux/cred.h>
#include <linux/dcache.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/nsproxy.h>
#include <linux/path.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/uidgid.h>
#ifdef CONFIG_KSU_SUSFS
#include <linux/susfs.h>
#endif // #ifdef CONFIG_KSU_SUSFS
#include "allowlist.h"
#include "setuid_hook.h"
#include "feature.h"
#include "klog.h" // IWYU pragma: keep
#include "manager.h"
#include "selinux/selinux.h"
#include "seccomp_cache.h"
#include "supercalls.h"
#ifndef CONFIG_KSU_SUSFS
#include "syscall_hook_manager.h"
#endif
#include "kernel_umount.h"
#include "sulog.h"
#ifdef CONFIG_KSU_SUSFS
static inline bool is_zygote_isolated_service_uid(uid_t uid)
{
uid %= 100000;
return (uid >= 99000 && uid < 100000);
}
static inline bool is_zygote_normal_app_uid(uid_t uid)
{
uid %= 100000;
return (uid >= 10000 && uid < 19999);
}
extern u32 susfs_zygote_sid;
#ifdef CONFIG_KSU_SUSFS_SUS_PATH
extern void susfs_run_sus_path_loop(uid_t uid);
#endif // #ifdef CONFIG_KSU_SUSFS_SUS_PATH
#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
extern void susfs_reorder_mnt_id(void);
#endif // #ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
#endif // #ifdef CONFIG_KSU_SUSFS
static bool ksu_enhanced_security_enabled = false;
static int enhanced_security_feature_get(u64 *value)
{
*value = ksu_enhanced_security_enabled ? 1 : 0;
return 0;
}
static int enhanced_security_feature_set(u64 value)
{
bool enable = value != 0;
ksu_enhanced_security_enabled = enable;
pr_info("enhanced_security: set to %d\n", enable);
return 0;
}
static const struct ksu_feature_handler enhanced_security_handler = {
.feature_id = KSU_FEATURE_ENHANCED_SECURITY,
.name = "enhanced_security",
.get_handler = enhanced_security_feature_get,
.set_handler = enhanced_security_feature_set,
};
static inline bool is_allow_su(void)
{
if (is_manager()) {
// we are manager, allow!
return true;
}
return ksu_is_allow_uid_for_current(current_uid().val);
}
// force_sig kcompat, TODO: move it out of core_hook.c
// https://elixir.bootlin.com/linux/v5.3-rc1/source/kernel/signal.c#L1613
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
#define __force_sig(sig) force_sig(sig)
#else
#define __force_sig(sig) force_sig(sig, current)
#endif
extern void disable_seccomp(struct task_struct *tsk);
#ifndef CONFIG_KSU_SUSFS
int ksu_handle_setresuid(uid_t ruid, uid_t euid, uid_t suid)
{
// we rely on the fact that zygote always call setresuid(3) with same uids
uid_t new_uid = ruid;
uid_t old_uid = current_uid().val;
if (old_uid != new_uid)
pr_info("handle_setresuid from %d to %d\n", old_uid, new_uid);
// if old process is root, ignore it.
if (old_uid != 0 && ksu_enhanced_security_enabled) {
// disallow any non-ksu domain escalation from non-root to root!
// euid is what we care about here as it controls permission
if (unlikely(euid == 0)) {
if (!is_ksu_domain()) {
pr_warn("find suspicious EoP: %d %s, from %d to %d\n",
current->pid, current->comm, old_uid,
new_uid);
__force_sig(SIGKILL);
return 0;
}
}
// disallow appuid decrease to any other uid if it is not allowed to su
if (is_appuid(old_uid)) {
if (euid < current_euid().val &&
!ksu_is_allow_uid_for_current(old_uid)) {
pr_warn("find suspicious EoP: %d %s, from %d to %d\n",
current->pid, current->comm, old_uid,
new_uid);
__force_sig(SIGKILL);
return 0;
}
}
return 0;
}
// if on private space, see if its possibly the manager
if (new_uid > PER_USER_RANGE &&
new_uid % PER_USER_RANGE == ksu_get_manager_uid()) {
ksu_set_manager_uid(new_uid);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
if (ksu_get_manager_uid() == new_uid) {
pr_info("install fd for ksu manager(uid=%d)\n", new_uid);
ksu_install_fd();
spin_lock_irq(&current->sighand->siglock);
ksu_seccomp_allow_cache(current->seccomp.filter, __NR_reboot);
ksu_set_task_tracepoint_flag(current);
spin_unlock_irq(&current->sighand->siglock);
return 0;
}
if (ksu_is_allow_uid_for_current(new_uid)) {
if (current->seccomp.mode == SECCOMP_MODE_FILTER &&
current->seccomp.filter) {
spin_lock_irq(&current->sighand->siglock);
ksu_seccomp_allow_cache(current->seccomp.filter,
__NR_reboot);
spin_unlock_irq(&current->sighand->siglock);
}
ksu_set_task_tracepoint_flag(current);
} else {
ksu_clear_task_tracepoint_flag_if_needed(current);
}
#else
if (ksu_is_allow_uid_for_current(new_uid)) {
spin_lock_irq(&current->sighand->siglock);
disable_seccomp(current);
spin_unlock_irq(&current->sighand->siglock);
if (ksu_get_manager_uid() == new_uid) {
pr_info("install fd for ksu manager(uid=%d)\n",
new_uid);
ksu_install_fd();
}
return 0;
}
#endif
// Handle kernel umount
ksu_handle_umount(old_uid, new_uid);
return 0;
}
#else
int ksu_handle_setresuid(uid_t ruid, uid_t euid, uid_t suid){
// we rely on the fact that zygote always call setresuid(3) with same uids
uid_t new_uid = ruid;
uid_t old_uid = current_uid().val;
// if old process is root, ignore it.
if (old_uid != 0 && ksu_enhanced_security_enabled) {
// disallow any non-ksu domain escalation from non-root to root!
// euid is what we care about here as it controls permission
if (unlikely(euid == 0)) {
if (!is_ksu_domain()) {
pr_warn("find suspicious EoP: %d %s, from %d to %d\n",
current->pid, current->comm, old_uid, new_uid);
__force_sig(SIGKILL);
return 0;
}
}
// disallow appuid decrease to any other uid if it is not allowed to su
if (is_appuid(old_uid)) {
if (euid < current_euid().val && !ksu_is_allow_uid_for_current(old_uid)) {
pr_warn("find suspicious EoP: %d %s, from %d to %d\n",
current->pid, current->comm, old_uid, new_uid);
__force_sig(SIGKILL);
return 0;
}
}
return 0;
}
// We only interest in process spwaned by zygote
if (!susfs_is_sid_equal(current_cred()->security, susfs_zygote_sid)) {
return 0;
}
#if __SULOG_GATE
ksu_sulog_report_syscall(new_uid, NULL, "setuid", NULL);
#endif
#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
// Check if spawned process is isolated service first, and force to do umount if so
if (is_zygote_isolated_service_uid(new_uid)) {
goto do_umount;
}
#endif // #ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
// - Since ksu maanger app uid is excluded in allow_list_arr, so ksu_uid_should_umount(manager_uid)
// will always return true, that's why we need to explicitly check if new_uid belongs to
// ksu manager
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
if (ksu_get_manager_uid() == new_uid) {
pr_info("install fd for ksu manager(uid=%d)\n", new_uid);
ksu_install_fd();
spin_lock_irq(&current->sighand->siglock);
ksu_seccomp_allow_cache(current->seccomp.filter, __NR_reboot);
spin_unlock_irq(&current->sighand->siglock);
return 0;
}
if (ksu_is_allow_uid_for_current(new_uid)) {
if (current->seccomp.mode == SECCOMP_MODE_FILTER &&
current->seccomp.filter) {
spin_lock_irq(&current->sighand->siglock);
ksu_seccomp_allow_cache(current->seccomp.filter,
__NR_reboot);
spin_unlock_irq(&current->sighand->siglock);
}
} else {
ksu_clear_task_tracepoint_flag_if_needed(current);
}
#else
if (ksu_is_allow_uid_for_current(new_uid)) {
spin_lock_irq(&current->sighand->siglock);
disable_seccomp(current);
spin_unlock_irq(&current->sighand->siglock);
if (ksu_get_manager_uid() == new_uid) {
pr_info("install fd for ksu manager(uid=%d)\n",
new_uid);
ksu_install_fd();
}
return 0;
}
#endif
// Check if spawned process is normal user app and needs to be umounted
if (likely(is_zygote_normal_app_uid(new_uid) && ksu_uid_should_umount(new_uid))) {
goto do_umount;
}
return 0;
do_umount:
// Handle kernel umount
ksu_handle_umount(old_uid, new_uid);
get_task_struct(current);
#ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
// We can reorder the mnt_id now after all sus mounts are umounted
susfs_reorder_mnt_id();
#endif // #ifdef CONFIG_KSU_SUSFS_SUS_MOUNT
susfs_set_current_proc_umounted();
put_task_struct(current);
#ifdef CONFIG_KSU_SUSFS_SUS_PATH
susfs_run_sus_path_loop(new_uid);
#endif // #ifdef CONFIG_KSU_SUSFS_SUS_PATH
return 0;
}
#endif // #ifndef CONFIG_KSU_SUSFS
void ksu_setuid_hook_init(void)
{
ksu_kernel_umount_init();
if (ksu_register_feature_handler(&enhanced_security_handler)) {
pr_err("Failed to register enhanced security feature handler\n");
}
}
void ksu_setuid_hook_exit(void)
{
pr_info("ksu_core_exit\n");
ksu_kernel_umount_exit();
ksu_unregister_feature_handler(KSU_FEATURE_ENHANCED_SECURITY);
}

View File

@@ -1,12 +0,0 @@
#ifndef __KSU_H_KSU_SETUID_HOOK
#define __KSU_H_KSU_SETUID_HOOK
#include <linux/init.h>
#include <linux/types.h>
void ksu_setuid_hook_init(void);
void ksu_setuid_hook_exit(void);
int ksu_handle_setresuid(uid_t ruid, uid_t euid, uid_t suid);
#endif

View File

@@ -4,77 +4,77 @@ set -eu
GKI_ROOT=$(pwd) GKI_ROOT=$(pwd)
display_usage() { display_usage() {
echo "Usage: $0 [--cleanup | <commit-or-tag>]" echo "Usage: $0 [--cleanup | <commit-or-tag>]"
echo " --cleanup: Cleans up previous modifications made by the script." echo " --cleanup: Cleans up previous modifications made by the script."
echo " <commit-or-tag>: Sets up or updates the KernelSU to specified tag or commit." echo " <commit-or-tag>: Sets up or updates the KernelSU to specified tag or commit."
echo " -h, --help: Displays this usage information." echo " -h, --help: Displays this usage information."
echo " (no args): Sets up or updates the KernelSU environment to the latest tagged version." echo " (no args): Sets up or updates the KernelSU environment to the latest tagged version."
} }
initialize_variables() { initialize_variables() {
if test -d "$GKI_ROOT/common/drivers"; then if test -d "$GKI_ROOT/common/drivers"; then
DRIVER_DIR="$GKI_ROOT/common/drivers" DRIVER_DIR="$GKI_ROOT/common/drivers"
elif test -d "$GKI_ROOT/drivers"; then elif test -d "$GKI_ROOT/drivers"; then
DRIVER_DIR="$GKI_ROOT/drivers" DRIVER_DIR="$GKI_ROOT/drivers"
else else
echo '[ERROR] "drivers/" directory not found.' echo '[ERROR] "drivers/" directory not found.'
exit 127 exit 127
fi fi
DRIVER_MAKEFILE=$DRIVER_DIR/Makefile DRIVER_MAKEFILE=$DRIVER_DIR/Makefile
DRIVER_KCONFIG=$DRIVER_DIR/Kconfig DRIVER_KCONFIG=$DRIVER_DIR/Kconfig
} }
# Reverts modifications made by this script # Reverts modifications made by this script
perform_cleanup() { perform_cleanup() {
echo "[+] Cleaning up..." echo "[+] Cleaning up..."
[ -L "$DRIVER_DIR/kernelsu" ] && rm "$DRIVER_DIR/kernelsu" && echo "[-] Symlink removed." [ -L "$DRIVER_DIR/kernelsu" ] && rm "$DRIVER_DIR/kernelsu" && echo "[-] Symlink removed."
grep -q "kernelsu" "$DRIVER_MAKEFILE" && sed -i '/kernelsu/d' "$DRIVER_MAKEFILE" && echo "[-] Makefile reverted." grep -q "kernelsu" "$DRIVER_MAKEFILE" && sed -i '/kernelsu/d' "$DRIVER_MAKEFILE" && echo "[-] Makefile reverted."
grep -q "drivers/kernelsu/Kconfig" "$DRIVER_KCONFIG" && sed -i '/drivers\/kernelsu\/Kconfig/d' "$DRIVER_KCONFIG" && echo "[-] Kconfig reverted." grep -q "drivers/kernelsu/Kconfig" "$DRIVER_KCONFIG" && sed -i '/drivers\/kernelsu\/Kconfig/d' "$DRIVER_KCONFIG" && echo "[-] Kconfig reverted."
if [ -d "$GKI_ROOT/KernelSU" ]; then if [ -d "$GKI_ROOT/KernelSU" ]; then
rm -rf "$GKI_ROOT/KernelSU" && echo "[-] KernelSU directory deleted." rm -rf "$GKI_ROOT/KernelSU" && echo "[-] KernelSU directory deleted."
fi fi
} }
# Sets up or update KernelSU environment # Sets up or update KernelSU environment
setup_kernelsu() { setup_kernelsu() {
echo "[+] Setting up KernelSU..." echo "[+] Setting up KernelSU..."
# Clone the repository and rename it to KernelSU # Clone the repository and rename it to KernelSU
if [ ! -d "$GKI_ROOT/KernelSU" ]; then if [ ! -d "$GKI_ROOT/KernelSU" ]; then
git clone https://github.com/SukiSU-Ultra/SukiSU-Ultra SukiSU-Ultra git clone https://github.com/SukiSU-Ultra/SukiSU-Ultra SukiSU-Ultra
mv SukiSU-Ultra KernelSU mv SukiSU-Ultra KernelSU
echo "[+] Repository cloned and renamed to KernelSU." echo "[+] Repository cloned and renamed to KernelSU."
fi fi
cd "$GKI_ROOT/KernelSU" cd "$GKI_ROOT/KernelSU"
git stash && echo "[-] Stashed current changes." git stash && echo "[-] Stashed current changes."
if [ "$(git status | grep -Po 'v\d+(\.\d+)*' | head -n1)" ]; then if [ "$(git status | grep -Po 'v\d+(\.\d+)*' | head -n1)" ]; then
git checkout main && echo "[-] Switched to main branch." git checkout main && echo "[-] Switched to main branch."
fi fi
git pull && echo "[+] Repository updated." git pull && echo "[+] Repository updated."
if [ -z "${1-}" ]; then if [ -z "${1-}" ]; then
git checkout "$(git describe --abbrev=0 --tags)" && echo "[-] Checked out latest tag." git checkout "$(git describe --abbrev=0 --tags)" && echo "[-] Checked out latest tag."
else else
git checkout "$1" && echo "[-] Checked out $1." || echo "[-] Checkout default branch" git checkout "$1" && echo "[-] Checked out $1." || echo "[-] Checkout default branch"
fi fi
cd "$DRIVER_DIR" cd "$DRIVER_DIR"
ln -sf "$(realpath --relative-to="$DRIVER_DIR" "$GKI_ROOT/KernelSU/kernel")" "kernelsu" && echo "[+] Symlink created." ln -sf "$(realpath --relative-to="$DRIVER_DIR" "$GKI_ROOT/KernelSU/kernel")" "kernelsu" && echo "[+] Symlink created."
# Add entries in Makefile and Kconfig if not already existing # Add entries in Makefile and Kconfig if not already existing
grep -q "kernelsu" "$DRIVER_MAKEFILE" || printf "\nobj-\$(CONFIG_KSU) += kernelsu/\n" >> "$DRIVER_MAKEFILE" && echo "[+] Modified Makefile." grep -q "kernelsu" "$DRIVER_MAKEFILE" || printf "\nobj-\$(CONFIG_KSU) += kernelsu/\n" >> "$DRIVER_MAKEFILE" && echo "[+] Modified Makefile."
grep -q "source \"drivers/kernelsu/Kconfig\"" "$DRIVER_KCONFIG" || sed -i "/endmenu/i\source \"drivers/kernelsu/Kconfig\"" "$DRIVER_KCONFIG" && echo "[+] Modified Kconfig." grep -q "source \"drivers/kernelsu/Kconfig\"" "$DRIVER_KCONFIG" || sed -i "/endmenu/i\source \"drivers/kernelsu/Kconfig\"" "$DRIVER_KCONFIG" && echo "[+] Modified Kconfig."
echo '[+] Done.' echo '[+] Done.'
} }
# Process command-line arguments # Process command-line arguments
if [ "$#" -eq 0 ]; then if [ "$#" -eq 0 ]; then
initialize_variables initialize_variables
setup_kernelsu setup_kernelsu
elif [ "$1" = "-h" ] || [ "$1" = "--help" ]; then elif [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
display_usage display_usage
elif [ "$1" = "--cleanup" ]; then elif [ "$1" = "--cleanup" ]; then
initialize_variables initialize_variables
perform_cleanup perform_cleanup
else else
initialize_variables initialize_variables
setup_kernelsu "$@" setup_kernelsu "$@"
fi fi

View File

@@ -1,8 +1,10 @@
#include "linux/compiler.h" #include <linux/dcache.h>
#include "linux/printk.h" #include <linux/security.h>
#include <asm/current.h> #include <asm/current.h>
#include <linux/cred.h> #include <linux/cred.h>
#include <linux/err.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/kprobes.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/version.h> #include <linux/version.h>
@@ -12,97 +14,70 @@
#else #else
#include <linux/sched.h> #include <linux/sched.h>
#endif #endif
#ifdef CONFIG_KSU_SUSFS_SUS_SU
#include <linux/susfs_def.h>
#endif
#ifdef CONFIG_KSU_SUSFS
#include <linux/namei.h>
#include "objsec.h" #include "objsec.h"
#endif // #ifdef CONFIG_KSU_SUSFS
#include "allowlist.h" #include "allowlist.h"
#include "feature.h" #include "arch.h"
#include "klog.h" // IWYU pragma: keep #include "klog.h" // IWYU pragma: keep
#include "ksud.h" #include "ksud.h"
#include "sucompat.h" #include "kernel_compat.h"
#include "app_profile.h"
#ifndef CONFIG_KSU_SUSFS
#include "syscall_hook_manager.h"
#endif // #ifndef CONFIG_KSU_SUSFS
#include "sulog.h"
#define SU_PATH "/system/bin/su" #define SU_PATH "/system/bin/su"
#define SH_PATH "/system/bin/sh" #define SH_PATH "/system/bin/sh"
bool ksu_su_compat_enabled __read_mostly = true; static const char sh_path[] = "/system/bin/sh";
static const char ksud_path[] = KSUD_PATH;
static const char su[] = SU_PATH;
static int su_compat_feature_get(u64 *value) bool ksu_sucompat_hook_state __read_mostly = true;
static inline void __user *userspace_stack_buffer(const void *d, size_t len)
{ {
*value = ksu_su_compat_enabled ? 1 : 0; /* To avoid having to mmap a page in userspace, just write below the stack
return 0; * pointer. */
}
static int su_compat_feature_set(u64 value)
{
bool enable = value != 0;
ksu_su_compat_enabled = enable;
pr_info("su_compat: set to %d\n", enable);
return 0;
}
static const struct ksu_feature_handler su_compat_handler = {
.feature_id = KSU_FEATURE_SU_COMPAT,
.name = "su_compat",
.get_handler = su_compat_feature_get,
.set_handler = su_compat_feature_set,
};
static void __user *userspace_stack_buffer(const void *d, size_t len)
{
// To avoid having to mmap a page in userspace, just write below the stack
// pointer.
char __user *p = (void __user *)current_user_stack_pointer() - len; char __user *p = (void __user *)current_user_stack_pointer() - len;
return copy_to_user(p, d, len) ? NULL : p; return copy_to_user(p, d, len) ? NULL : p;
} }
static char __user *sh_user_path(void) static inline char __user *sh_user_path(void)
{ {
static const char sh_path[] = "/system/bin/sh";
return userspace_stack_buffer(sh_path, sizeof(sh_path)); return userspace_stack_buffer(sh_path, sizeof(sh_path));
} }
static char __user *ksud_user_path(void) static inline char __user *ksud_user_path(void)
{ {
static const char ksud_path[] = KSUD_PATH;
return userspace_stack_buffer(ksud_path, sizeof(ksud_path)); return userspace_stack_buffer(ksud_path, sizeof(ksud_path));
} }
#ifndef CONFIG_KSU_SUSFS
int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode, int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode,
int *__unused_flags) int *__unused_flags)
{ {
const char su[] = SU_PATH;
#ifdef KSU_MANUAL_HOOK #ifndef CONFIG_KSU_KPROBES_HOOK
if (!ksu_su_compat_enabled) { if (!ksu_sucompat_hook_state) {
return 0; return 0;
} }
#endif #endif
if (!ksu_is_allow_uid_for_current(current_uid().val)) { #ifndef CONFIG_KSU_SUSFS_SUS_SU
if (!ksu_is_allow_uid(current_uid().val)) {
return 0; return 0;
} }
#endif
#ifdef CONFIG_KSU_SUSFS_SUS_SU
char path[sizeof(su) + 1] = {0};
#else
char path[sizeof(su) + 1]; char path[sizeof(su) + 1];
memset(path, 0, sizeof(path)); memset(path, 0, sizeof(path));
#endif
ksu_strncpy_from_user_nofault(path, *filename_user, sizeof(path)); ksu_strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (unlikely(!memcmp(path, su, sizeof(su)))) { if (unlikely(!memcmp(path, su, sizeof(su)))) {
#if __SULOG_GATE
ksu_sulog_report_syscall(current_uid().val, NULL, "faccessat", path);
#endif
pr_info("faccessat su->sh!\n"); pr_info("faccessat su->sh!\n");
*filename_user = sh_user_path(); *filename_user = sh_user_path();
} }
@@ -110,86 +85,75 @@ int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode,
return 0; return 0;
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) && defined(CONFIG_KSU_SUSFS_SUS_SU)
struct filename* susfs_ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags) {
struct filename *name = getname_flags(*filename_user, getname_statx_lookup_flags(*flags), NULL);
if (unlikely(IS_ERR(name) || name->name == NULL)) {
return name;
}
if (likely(memcmp(name->name, su, sizeof(su)))) {
return name;
}
const char sh[] = SH_PATH;
pr_info("vfs_fstatat su->sh!\n");
memcpy((void *)name->name, sh, sizeof(sh));
return name;
}
#endif
int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags) int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags)
{ {
// const char sh[] = SH_PATH;
const char su[] = SU_PATH;
#ifdef KSU_MANUAL_HOOK #ifndef CONFIG_KSU_KPROBES_HOOK
if (!ksu_su_compat_enabled) { if (!ksu_sucompat_hook_state) {
return 0; return 0;
} }
#endif #endif
if (!ksu_is_allow_uid_for_current(current_uid().val)) {
#ifndef CONFIG_KSU_SUSFS_SUS_SU
if (!ksu_is_allow_uid(current_uid().val)) {
return 0; return 0;
} }
#endif
if (unlikely(!filename_user)) { if (unlikely(!filename_user)) {
return 0; return 0;
} }
#ifdef CONFIG_KSU_SUSFS_SUS_SU
char path[sizeof(su) + 1] = {0};
#else
char path[sizeof(su) + 1]; char path[sizeof(su) + 1];
memset(path, 0, sizeof(path)); memset(path, 0, sizeof(path));
#endif
// Remove this later!! we use syscall hook, so this will never happen!!!!!
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) && 0
// it becomes a `struct filename *` after 5.18
// https://elixir.bootlin.com/linux/v5.18/source/fs/stat.c#L216
const char sh[] = SH_PATH;
struct filename *filename = *((struct filename **)filename_user);
if (IS_ERR(filename)) {
return 0;
}
if (likely(memcmp(filename->name, su, sizeof(su))))
return 0;
pr_info("vfs_statx su->sh!\n");
memcpy((void *)filename->name, sh, sizeof(sh));
#else
ksu_strncpy_from_user_nofault(path, *filename_user, sizeof(path)); ksu_strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (unlikely(!memcmp(path, su, sizeof(su)))) { if (unlikely(!memcmp(path, su, sizeof(su)))) {
#if __SULOG_GATE
ksu_sulog_report_syscall(current_uid().val, NULL, "newfstatat", path);
#endif
pr_info("newfstatat su->sh!\n"); pr_info("newfstatat su->sh!\n");
*filename_user = sh_user_path(); *filename_user = sh_user_path();
} }
#endif
return 0; return 0;
} }
int ksu_handle_execve_sucompat(const char __user **filename_user,
void *__never_use_argv, void *__never_use_envp,
int *__never_use_flags)
{
const char su[] = SU_PATH;
char path[sizeof(su) + 1];
#ifdef KSU_MANUAL_HOOK
if (!ksu_su_compat_enabled){
return 0;
}
#endif
if (unlikely(!filename_user))
return 0;
memset(path, 0, sizeof(path));
ksu_strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (likely(memcmp(path, su, sizeof(su))))
return 0;
#if __SULOG_GATE
bool is_allowed = ksu_is_allow_uid_for_current(current_uid().val);
ksu_sulog_report_syscall(current_uid().val, NULL, "execve", path);
if (!is_allowed)
return 0;
ksu_sulog_report_su_attempt(current_uid().val, NULL, path, is_allowed);
#else
if (!ksu_is_allow_uid_for_current(current_uid().val)) {
return 0;
}
#endif
pr_info("sys_execve su found\n");
*filename_user = ksud_user_path();
escape_with_root_profile();
return 0;
}
#else
static const char sh_path[] = SH_PATH;
static const char su_path[] = SU_PATH;
static const char ksud_path[] = KSUD_PATH;
// the call from execve_handler_pre won't provided correct value for __never_use_argument, use them after fix execve_handler_pre, keeping them for consistence for manually patched code // the call from execve_handler_pre won't provided correct value for __never_use_argument, use them after fix execve_handler_pre, keeping them for consistence for manually patched code
int ksu_handle_execveat_sucompat(int *fd, struct filename **filename_ptr, int ksu_handle_execveat_sucompat(int *fd, struct filename **filename_ptr,
void *__never_use_argv, void *__never_use_envp, void *__never_use_argv, void *__never_use_envp,
@@ -197,9 +161,11 @@ int ksu_handle_execveat_sucompat(int *fd, struct filename **filename_ptr,
{ {
struct filename *filename; struct filename *filename;
if (!ksu_su_compat_enabled){ #ifndef CONFIG_KSU_KPROBES_HOOK
if (!ksu_sucompat_hook_state) {
return 0; return 0;
} }
#endif
if (unlikely(!filename_ptr)) if (unlikely(!filename_ptr))
return 0; return 0;
@@ -209,120 +175,80 @@ int ksu_handle_execveat_sucompat(int *fd, struct filename **filename_ptr,
return 0; return 0;
} }
if (likely(memcmp(filename->name, su_path, sizeof(su_path)))) if (likely(memcmp(filename->name, su, sizeof(su))))
return 0; return 0;
#if __SULOG_GATE #ifndef CONFIG_KSU_SUSFS_SUS_SU
bool is_allowed = ksu_is_allow_uid_for_current(current_uid().val); if (!ksu_is_allow_uid(current_uid().val))
ksu_sulog_report_syscall(current_uid().val, NULL, "execve", su_path); return 0;
ksu_sulog_report_su_attempt(current_uid().val, NULL, su_path, is_allowed);
#endif #endif
pr_info("do_execveat_common su found\n"); pr_info("do_execveat_common su found\n");
memcpy((void *)filename->name, ksud_path, sizeof(ksud_path)); memcpy((void *)filename->name, ksud_path, sizeof(ksud_path));
escape_with_root_profile(); escape_to_root(true);
return 0; return 0;
} }
int ksu_handle_execveat(int *fd, struct filename **filename_ptr, void *argv, int ksu_handle_execve_sucompat(int *fd, const char __user **filename_user,
void *envp, int *flags) void *__never_use_argv, void *__never_use_envp,
int *__never_use_flags)
{ {
if (ksu_handle_execveat_ksud(fd, filename_ptr, argv, envp, flags)) { //const char su[] = SU_PATH;
return 0; #ifdef CONFIG_KSU_SUSFS_SUS_SU
} char path[sizeof(su) + 1] = {0};
return ksu_handle_execveat_sucompat(fd, filename_ptr, argv, envp,
flags);
}
int ksu_handle_faccessat(int *dfd, const char __user **filename_user, int *mode,
int *__unused_flags)
{
char path[sizeof(su_path) + 1] = {0};
if (!ksu_su_compat_enabled){
return 0;
}
ksu_strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (unlikely(!memcmp(path, su_path, sizeof(su_path)))) {
#if __SULOG_GATE
ksu_sulog_report_syscall(current_uid().val, NULL, "faccessat", path);
#endif
pr_info("faccessat su->sh!\n");
*filename_user = sh_user_path();
}
return 0;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
int ksu_handle_stat(int *dfd, struct filename **filename, int *flags) {
if (unlikely(IS_ERR(*filename) || (*filename)->name == NULL)) {
return 0;
}
if (likely(memcmp((*filename)->name, su_path, sizeof(su_path)))) {
return 0;
}
pr_info("ksu_handle_stat: su->sh!\n");
memcpy((void *)((*filename)->name), sh_path, sizeof(sh_path));
return 0;
}
#else #else
int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags) char path[sizeof(su) + 1];
{
if (!ksu_su_compat_enabled){
return 0;
}
if (unlikely(!filename_user)) {
return 0;
}
char path[sizeof(su_path) + 1] = {0};
// Remove this later!! we use syscall hook, so this will never happen!!!!!
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) && 0
// it becomes a `struct filename *` after 5.18
// https://elixir.bootlin.com/linux/v5.18/source/fs/stat.c#L216
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)
struct filename *filename = *((struct filename **)filename_user);
#endif #endif
if (IS_ERR(filename)) { #ifndef CONFIG_KSU_KPROBES_HOOK
if (!ksu_sucompat_hook_state) {
return 0; return 0;
} }
if (likely(memcmp(filename->name, su_path, sizeof(su_path)))) #endif
return 0;
pr_info("ksu_handle_stat: su->sh!\n");
memcpy((void *)filename->name, sh_path, sizeof(sh_path));
#else
ksu_strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (unlikely(!memcmp(path, su_path, sizeof(su_path)))) { if (unlikely(!filename_user))
#if __SULOG_GATE return 0;
ksu_sulog_report_syscall(current_uid().val, NULL, "newfstatat", path);
#endif /*
pr_info("ksu_handle_stat: su->sh!\n"); * nofault variant fails silently due to pagefault_disable
*filename_user = sh_user_path(); * some cpus dont really have that good speculative execution
} * access_ok to substitute set_fs, we check if pointer is accessible
#endif */
if (!ksu_access_ok(*filename_user, sizeof(path)))
return 0;
// success = returns number of bytes and should be less than path
long len = strncpy_from_user(path, *filename_user, sizeof(path));
if (len <= 0 || len > sizeof(path))
return 0;
// strncpy_from_user_nofault does this too
path[sizeof(path) - 1] = '\0';
if (likely(memcmp(path, su, sizeof(su))))
return 0;
if (!ksu_is_allow_uid(current_uid().val))
return 0;
pr_info("sys_execve su found\n");
*filename_user = ksud_user_path();
escape_to_root(true);
return 0; return 0;
} }
#endif // #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
int ksu_handle_devpts(struct inode *inode) int ksu_handle_devpts(struct inode *inode)
{ {
if (!current->mm) {
return 0;
}
if (!ksu_su_compat_enabled){ #ifndef CONFIG_KSU_KPROBES_HOOK
if (!ksu_sucompat_hook_state)
return 0;
#endif
if (!current->mm) {
return 0; return 0;
} }
@@ -332,35 +258,166 @@ int ksu_handle_devpts(struct inode *inode)
return 0; return 0;
} }
if (!__ksu_is_allow_uid_for_current(uid)) if (!ksu_is_allow_uid(uid))
return 0; return 0;
if (ksu_file_sid) { if (ksu_devpts_sid) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) || \ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) || defined(KSU_OPTIONAL_SELINUX_INODE)
defined(KSU_OPTIONAL_SELINUX_INODE)
struct inode_security_struct *sec = selinux_inode(inode); struct inode_security_struct *sec = selinux_inode(inode);
#else #else
struct inode_security_struct *sec = struct inode_security_struct *sec =
(struct inode_security_struct *)inode->i_security; (struct inode_security_struct *)inode->i_security;
#endif #endif
if (sec) { if (sec) {
sec->sid = ksu_file_sid; sec->sid = ksu_devpts_sid;
} }
} }
return 0; return 0;
} }
#endif // #ifndef CONFIG_KSU_SUSFS
// sucompat: permitted process can execute 'su' to gain root access. #ifdef CONFIG_KSU_KPROBES_HOOK
void ksu_sucompat_init()
static int faccessat_handler_pre(struct kprobe *p, struct pt_regs *regs)
{ {
if (ksu_register_feature_handler(&su_compat_handler)) { struct pt_regs *real_regs = PT_REAL_REGS(regs);
pr_err("Failed to register su_compat feature handler\n"); int *dfd = (int *)&PT_REGS_PARM1(real_regs);
const char __user **filename_user =
(const char **)&PT_REGS_PARM2(real_regs);
int *mode = (int *)&PT_REGS_PARM3(real_regs);
return ksu_handle_faccessat(dfd, filename_user, mode, NULL);
}
static int newfstatat_handler_pre(struct kprobe *p, struct pt_regs *regs)
{
struct pt_regs *real_regs = PT_REAL_REGS(regs);
int *dfd = (int *)&PT_REGS_PARM1(real_regs);
const char __user **filename_user =
(const char **)&PT_REGS_PARM2(real_regs);
int *flags = (int *)&PT_REGS_SYSCALL_PARM4(real_regs);
return ksu_handle_stat(dfd, filename_user, flags);
}
static int execve_handler_pre(struct kprobe *p, struct pt_regs *regs)
{
struct pt_regs *real_regs = PT_REAL_REGS(regs);
const char __user **filename_user =
(const char **)&PT_REGS_PARM1(real_regs);
return ksu_handle_execve_sucompat(AT_FDCWD, filename_user, NULL, NULL,
NULL);
}
static struct kprobe *su_kps[6];
static int pts_unix98_lookup_pre(struct kprobe *p, struct pt_regs *regs)
{
struct inode *inode;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
struct file *file = (struct file *)PT_REGS_PARM2(regs);
inode = file->f_path.dentry->d_inode;
#else
inode = (struct inode *)PT_REGS_PARM2(regs);
#endif
return ksu_handle_devpts(inode);
}
static struct kprobe *init_kprobe(const char *name,
kprobe_pre_handler_t handler)
{
struct kprobe *kp = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
if (!kp)
return NULL;
kp->symbol_name = name;
kp->pre_handler = handler;
int ret = register_kprobe(kp);
pr_info("sucompat: register_%s kprobe: %d\n", name, ret);
if (ret) {
kfree(kp);
return NULL;
}
return kp;
}
static void destroy_kprobe(struct kprobe **kp_ptr)
{
struct kprobe *kp = *kp_ptr;
if (!kp)
return;
unregister_kprobe(kp);
synchronize_rcu();
kfree(kp);
*kp_ptr = NULL;
}
#endif
// sucompat: permited process can execute 'su' to gain root access.
void ksu_sucompat_init(void)
{
#ifdef CONFIG_KSU_KPROBES_HOOK
su_kps[0] = init_kprobe(SYS_EXECVE_SYMBOL, execve_handler_pre);
su_kps[1] = init_kprobe(SYS_EXECVE_COMPAT_SYMBOL, execve_handler_pre);
su_kps[2] = init_kprobe(SYS_FACCESSAT_SYMBOL, faccessat_handler_pre);
su_kps[3] = init_kprobe(SYS_NEWFSTATAT_SYMBOL, newfstatat_handler_pre);
su_kps[4] = init_kprobe(SYS_FSTATAT64_SYMBOL, newfstatat_handler_pre);
su_kps[5] = init_kprobe("pts_unix98_lookup", pts_unix98_lookup_pre);
#else
ksu_sucompat_hook_state = true;
pr_info("ksu_sucompat init\n");
#endif
}
void ksu_sucompat_exit(void)
{
#ifdef CONFIG_KSU_KPROBES_HOOK
int i;
for (i = 0; i < ARRAY_SIZE(su_kps); i++) {
destroy_kprobe(&su_kps[i]);
}
#else
ksu_sucompat_hook_state = false;
pr_info("ksu_sucompat exit\n");
#endif
}
#ifdef CONFIG_KSU_SUSFS_SUS_SU
extern bool ksu_su_compat_enabled;
bool ksu_devpts_hook = false;
bool susfs_is_sus_su_hooks_enabled __read_mostly = false;
int susfs_sus_su_working_mode = 0;
static bool ksu_is_su_kps_enabled(void) {
int i;
for (i = 0; i < ARRAY_SIZE(su_kps); i++) {
if (su_kps[i]) {
return true;
}
}
return false;
}
void ksu_susfs_disable_sus_su(void) {
susfs_is_sus_su_hooks_enabled = false;
ksu_devpts_hook = false;
susfs_sus_su_working_mode = SUS_SU_DISABLED;
// Re-enable the su_kps for user, users need to toggle off the kprobe hooks again in ksu manager if they want it disabled.
if (!ksu_is_su_kps_enabled()) {
ksu_sucompat_init();
ksu_su_compat_enabled = true;
} }
} }
void ksu_sucompat_exit() void ksu_susfs_enable_sus_su(void) {
{ if (ksu_is_su_kps_enabled()) {
ksu_unregister_feature_handler(KSU_FEATURE_SU_COMPAT); ksu_sucompat_exit();
ksu_su_compat_enabled = false;
}
susfs_is_sus_su_hooks_enabled = true;
ksu_devpts_hook = true;
susfs_sus_su_working_mode = SUS_SU_WITH_HOOKS;
} }
#endif // #ifdef CONFIG_KSU_SUSFS_SUS_SU

View File

@@ -1,22 +0,0 @@
#ifndef __KSU_H_SUCOMPAT
#define __KSU_H_SUCOMPAT
#include <linux/types.h>
extern bool ksu_su_compat_enabled;
void ksu_sucompat_init(void);
void ksu_sucompat_exit(void);
// Handler functions exported for hook_manager
int ksu_handle_faccessat(int *dfd, const char __user **filename_user,
int *mode, int *__unused_flags);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) && defined(CONFIG_KSU_SUSFS)
int ksu_handle_stat(int *dfd, struct filename **filename, int *flags);
#else
int ksu_handle_stat(int *dfd, const char __user **filename_user, int *flags);
#endif // #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) && defined(CONFIG_KSU_SUSFS)
int ksu_handle_execve_sucompat(const char __user **filename_user,
void *__never_use_argv, void *__never_use_envp,
int *__never_use_flags);
#endif

View File

@@ -1,369 +0,0 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/cred.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include "sulog.h"
#include "klog.h"
#include "kernel_compat.h"
#include "ksu.h"
#include "feature.h"
#if __SULOG_GATE
struct dedup_entry dedup_tbl[SULOG_COMM_LEN];
static DEFINE_SPINLOCK(dedup_lock);
static LIST_HEAD(sulog_queue);
static struct workqueue_struct *sulog_workqueue;
static struct work_struct sulog_work;
static bool sulog_enabled __read_mostly = true;
static int sulog_feature_get(u64 *value)
{
*value = sulog_enabled ? 1 : 0;
return 0;
}
static int sulog_feature_set(u64 value)
{
bool enable = value != 0;
sulog_enabled = enable;
pr_info("sulog: set to %d\n", enable);
return 0;
}
static const struct ksu_feature_handler sulog_handler = {
.feature_id = KSU_FEATURE_SULOG,
.name = "sulog",
.get_handler = sulog_feature_get,
.set_handler = sulog_feature_set,
};
static void get_timestamp(char *buf, size_t len)
{
struct timespec64 ts;
struct tm tm;
ktime_get_real_ts64(&ts);
time64_to_tm(ts.tv_sec - sys_tz.tz_minuteswest * 60, 0, &tm);
snprintf(buf, len, "%04ld-%02d-%02d %02d:%02d:%02d",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
}
static void ksu_get_cmdline(char *full_comm, const char *comm, size_t buf_len)
{
if (!full_comm || buf_len <= 0)
return;
if (comm && strlen(comm) > 0) {
KSU_STRSCPY(full_comm, comm, buf_len);
return;
}
if (in_atomic() || in_interrupt() || irqs_disabled()) {
KSU_STRSCPY(full_comm, current->comm, buf_len);
return;
}
if (!current->mm) {
KSU_STRSCPY(full_comm, current->comm, buf_len);
return;
}
int n = get_cmdline(current, full_comm, buf_len);
if (n <= 0) {
KSU_STRSCPY(full_comm, current->comm, buf_len);
return;
}
for (int i = 0; i < n && i < buf_len - 1; i++) {
if (full_comm[i] == '\0')
full_comm[i] = ' ';
}
full_comm[n < buf_len ? n : buf_len - 1] = '\0';
}
static void sanitize_string(char *str, size_t len)
{
if (!str || len == 0)
return;
size_t read_pos = 0, write_pos = 0;
while (read_pos < len && str[read_pos] != '\0') {
char c = str[read_pos];
if (c == '\n' || c == '\r') {
read_pos++;
continue;
}
if (c == ' ' && write_pos > 0 && str[write_pos - 1] == ' ') {
read_pos++;
continue;
}
str[write_pos++] = c;
read_pos++;
}
str[write_pos] = '\0';
}
static bool dedup_should_print(uid_t uid, u8 type, const char *content, size_t len)
{
struct dedup_key key = {
.crc = dedup_calc_hash(content, len),
.uid = uid,
.type = type,
};
u64 now = ktime_get_ns();
u64 delta_ns = DEDUP_SECS * NSEC_PER_SEC;
u32 idx = key.crc & (SULOG_COMM_LEN - 1);
spin_lock(&dedup_lock);
struct dedup_entry *e = &dedup_tbl[idx];
if (e->key.crc == key.crc &&
e->key.uid == key.uid &&
e->key.type == key.type &&
(now - e->ts_ns) < delta_ns) {
spin_unlock(&dedup_lock);
return false;
}
e->key = key;
e->ts_ns = now;
spin_unlock(&dedup_lock);
return true;
}
static void sulog_work_handler(struct work_struct *work)
{
struct file *fp;
struct sulog_entry *entry, *tmp;
LIST_HEAD(local_queue);
loff_t pos = 0;
unsigned long flags;
spin_lock_irqsave(&dedup_lock, flags);
list_splice_init(&sulog_queue, &local_queue);
spin_unlock_irqrestore(&dedup_lock, flags);
if (list_empty(&local_queue))
return;
fp = ksu_filp_open_compat(SULOG_PATH, O_WRONLY | O_CREAT | O_APPEND, 0640);
if (IS_ERR(fp)) {
pr_err("sulog: failed to open log file: %ld\n", PTR_ERR(fp));
goto cleanup;
}
if (fp->f_inode->i_size > SULOG_MAX_SIZE) {
if (vfs_truncate(&fp->f_path, 0))
pr_err("sulog: failed to truncate log file\n");
pos = 0;
} else {
pos = fp->f_inode->i_size;
}
list_for_each_entry(entry, &local_queue, list)
ksu_kernel_write_compat(fp, entry->content, strlen(entry->content), &pos);
vfs_fsync(fp, 0);
filp_close(fp, 0);
cleanup:
list_for_each_entry_safe(entry, tmp, &local_queue, list) {
list_del(&entry->list);
kfree(entry);
}
}
static void sulog_add_entry(char *log_buf, size_t len, uid_t uid, u8 dedup_type)
{
struct sulog_entry *entry;
unsigned long flags;
if (!sulog_enabled || !log_buf || len == 0)
return;
if (!dedup_should_print(uid, dedup_type, log_buf, len))
return;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return;
KSU_STRSCPY(entry->content, log_buf, SULOG_ENTRY_MAX_LEN);
spin_lock_irqsave(&dedup_lock, flags);
list_add_tail(&entry->list, &sulog_queue);
spin_unlock_irqrestore(&dedup_lock, flags);
if (sulog_workqueue)
queue_work(sulog_workqueue, &sulog_work);
}
void ksu_sulog_report_su_grant(uid_t uid, const char *comm, const char *method)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, comm, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] SU_GRANT: UID=%d COMM=%s METHOD=%s PID=%d\n",
timestamp, uid, full_comm, method ? method : "unknown", current->pid);
sulog_add_entry(log_buf, strlen(log_buf), uid, DEDUP_SU_GRANT);
}
void ksu_sulog_report_su_attempt(uid_t uid, const char *comm, const char *target_path, bool success)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, comm, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] SU_EXEC: UID=%d COMM=%s TARGET=%s RESULT=%s PID=%d\n",
timestamp, uid, full_comm, target_path ? target_path : "unknown",
success ? "SUCCESS" : "DENIED", current->pid);
sulog_add_entry(log_buf, strlen(log_buf), uid, DEDUP_SU_ATTEMPT);
}
void ksu_sulog_report_permission_check(uid_t uid, const char *comm, bool allowed)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, comm, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] PERM_CHECK: UID=%d COMM=%s RESULT=%s PID=%d\n",
timestamp, uid, full_comm, allowed ? "ALLOWED" : "DENIED", current->pid);
sulog_add_entry(log_buf, strlen(log_buf), uid, DEDUP_PERM_CHECK);
}
void ksu_sulog_report_manager_operation(const char *operation, uid_t manager_uid, uid_t target_uid)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, NULL, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] MANAGER_OP: OP=%s MANAGER_UID=%d TARGET_UID=%d COMM=%s PID=%d\n",
timestamp, operation ? operation : "unknown", manager_uid, target_uid, full_comm, current->pid);
sulog_add_entry(log_buf, strlen(log_buf), manager_uid, DEDUP_MANAGER_OP);
}
void ksu_sulog_report_syscall(uid_t uid, const char *comm, const char *syscall, const char *args)
{
char log_buf[SULOG_ENTRY_MAX_LEN];
char timestamp[32];
char full_comm[SULOG_COMM_LEN];
if (!sulog_enabled)
return;
get_timestamp(timestamp, sizeof(timestamp));
ksu_get_cmdline(full_comm, comm, sizeof(full_comm));
sanitize_string(full_comm, sizeof(full_comm));
snprintf(log_buf, sizeof(log_buf),
"[%s] SYSCALL: UID=%d COMM=%s SYSCALL=%s ARGS=%s PID=%d\n",
timestamp, uid, full_comm, syscall ? syscall : "unknown",
args ? args : "none", current->pid);
sulog_add_entry(log_buf, strlen(log_buf), uid, DEDUP_SYSCALL);
}
int ksu_sulog_init(void)
{
if (ksu_register_feature_handler(&sulog_handler)) {
pr_err("Failed to register sulog feature handler\n");
}
sulog_workqueue = alloc_workqueue("ksu_sulog", WQ_UNBOUND | WQ_HIGHPRI, 1);
if (!sulog_workqueue) {
pr_err("sulog: failed to create workqueue\n");
return -ENOMEM;
}
INIT_WORK(&sulog_work, sulog_work_handler);
pr_info("sulog: initialized successfully\n");
return 0;
}
void ksu_sulog_exit(void)
{
struct sulog_entry *entry, *tmp;
unsigned long flags;
ksu_unregister_feature_handler(KSU_FEATURE_SULOG);
sulog_enabled = false;
if (sulog_workqueue) {
flush_workqueue(sulog_workqueue);
destroy_workqueue(sulog_workqueue);
sulog_workqueue = NULL;
}
spin_lock_irqsave(&dedup_lock, flags);
list_for_each_entry_safe(entry, tmp, &sulog_queue, list) {
list_del(&entry->list);
kfree(entry);
}
spin_unlock_irqrestore(&dedup_lock, flags);
pr_info("sulog: cleaned up successfully\n");
}
#endif // __SULOG_GATE

View File

@@ -1,93 +0,0 @@
#ifndef __KSU_SULOG_H
#define __KSU_SULOG_H
#include <linux/types.h>
#include <linux/version.h>
#include <linux/crc32.h> // needed for function dedup_calc_hash
#define __SULOG_GATE 1
#if __SULOG_GATE
extern struct timezone sys_tz;
#define SULOG_PATH "/data/adb/ksu/log/sulog.log"
#define SULOG_MAX_SIZE (32 * 1024 * 1024) // 128MB
#define SULOG_ENTRY_MAX_LEN 512
#define SULOG_COMM_LEN 256
#define DEDUP_SECS 10
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0)
static inline size_t strlcpy(char *dest, const char *src, size_t size)
{
return strscpy(dest, src, size);
}
#endif
#define KSU_STRSCPY(dst, src, size) \
do { \
if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)) { \
strscpy(dst, src, size); \
} else { \
strlcpy(dst, src, size); \
} \
} while (0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
#include <linux/rtc.h>
static inline void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
{
struct rtc_time rtc_tm;
rtc_time64_to_tm(totalsecs, &rtc_tm);
result->tm_sec = rtc_tm.tm_sec;
result->tm_min = rtc_tm.tm_min;
result->tm_hour = rtc_tm.tm_hour;
result->tm_mday = rtc_tm.tm_mday;
result->tm_mon = rtc_tm.tm_mon;
result->tm_year = rtc_tm.tm_year;
}
#endif
struct dedup_key {
u32 crc;
uid_t uid;
u8 type;
u8 _pad[1];
};
struct dedup_entry {
struct dedup_key key;
u64 ts_ns;
};
enum {
DEDUP_SU_GRANT = 0,
DEDUP_SU_ATTEMPT,
DEDUP_PERM_CHECK,
DEDUP_MANAGER_OP,
DEDUP_SYSCALL,
};
static inline u32 dedup_calc_hash(const char *content, size_t len)
{
return crc32(0, content, len);
}
struct sulog_entry {
struct list_head list;
char content[SULOG_ENTRY_MAX_LEN];
};
void ksu_sulog_report_su_grant(uid_t uid, const char *comm, const char *method);
void ksu_sulog_report_su_attempt(uid_t uid, const char *comm, const char *target_path, bool success);
void ksu_sulog_report_permission_check(uid_t uid, const char *comm, bool allowed);
void ksu_sulog_report_manager_operation(const char *operation, uid_t manager_uid, uid_t target_uid);
void ksu_sulog_report_syscall(uid_t uid, const char *comm, const char *syscall, const char *args);
int ksu_sulog_init(void);
void ksu_sulog_exit(void);
#endif // __SULOG_GATE
#endif /* __KSU_SULOG_H */

File diff suppressed because it is too large Load Diff

View File

@@ -1,197 +0,0 @@
#ifndef __KSU_H_SUPERCALLS
#define __KSU_H_SUPERCALLS
#include <linux/types.h>
#include <linux/ioctl.h>
#include "ksu.h"
#include "app_profile.h"
#ifdef CONFIG_KPM
#include "kpm/kpm.h"
#endif
// Magic numbers for reboot hook to install fd
#define KSU_INSTALL_MAGIC1 0xDEADBEEF
#define KSU_INSTALL_MAGIC2 0xCAFEBABE
// Command structures for ioctl
struct ksu_become_daemon_cmd {
__u8 token[65]; // Input: daemon token (null-terminated)
};
struct ksu_get_info_cmd {
__u32 version; // Output: KERNEL_SU_VERSION
__u32 flags; // Output: flags (bit 0: MODULE mode)
__u32 features; // Output: max feature ID supported
};
struct ksu_report_event_cmd {
__u32 event; // Input: EVENT_POST_FS_DATA, EVENT_BOOT_COMPLETED, etc.
};
struct ksu_set_sepolicy_cmd {
__u64 cmd; // Input: sepolicy command
__aligned_u64 arg; // Input: sepolicy argument pointer
};
struct ksu_check_safemode_cmd {
__u8 in_safe_mode; // Output: true if in safe mode, false otherwise
};
struct ksu_get_allow_list_cmd {
__u32 uids[128]; // Output: array of allowed/denied UIDs
__u32 count; // Output: number of UIDs in array
__u8 allow; // Input: true for allow list, false for deny list
};
struct ksu_uid_granted_root_cmd {
__u32 uid; // Input: target UID to check
__u8 granted; // Output: true if granted, false otherwise
};
struct ksu_uid_should_umount_cmd {
__u32 uid; // Input: target UID to check
__u8 should_umount; // Output: true if should umount, false otherwise
};
struct ksu_get_manager_uid_cmd {
__u32 uid; // Output: manager UID
};
struct ksu_get_app_profile_cmd {
struct app_profile profile; // Input/Output: app profile structure
};
struct ksu_set_app_profile_cmd {
struct app_profile profile; // Input: app profile structure
};
struct ksu_get_feature_cmd {
__u32 feature_id; // Input: feature ID (enum ksu_feature_id)
__u64 value; // Output: feature value/state
__u8 supported; // Output: true if feature is supported, false otherwise
};
struct ksu_set_feature_cmd {
__u32 feature_id; // Input: feature ID (enum ksu_feature_id)
__u64 value; // Input: feature value/state to set
};
struct ksu_get_wrapper_fd_cmd {
__u32 fd; // Input: userspace fd
__u32 flags; // Input: flags of userspace fd
};
struct ksu_manage_mark_cmd {
__u32 operation; // Input: KSU_MARK_*
__s32 pid; // Input: target pid (0 for all processes)
__u32 result; // Output: for get operation - mark status or reg_count
};
#define KSU_MARK_GET 1
#define KSU_MARK_MARK 2
#define KSU_MARK_UNMARK 3
#define KSU_MARK_REFRESH 4
struct ksu_nuke_ext4_sysfs_cmd {
__aligned_u64 arg; // Input: mnt pointer
};
struct ksu_add_try_umount_cmd {
__aligned_u64 arg; // char ptr, this is the mountpoint
__u32 flags; // this is the flag we use for it
__u8 mode; // denotes what to do with it 0:wipe_list 1:add_to_list 2:delete_entry
};
#define KSU_UMOUNT_WIPE 0 // ignore everything and wipe list
#define KSU_UMOUNT_ADD 1 // add entry (path + flags)
#define KSU_UMOUNT_DEL 2 // delete entry, strcmp
// Other command structures
struct ksu_get_full_version_cmd {
char version_full[KSU_FULL_VERSION_STRING]; // Output: full version string
};
struct ksu_hook_type_cmd {
char hook_type[32]; // Output: hook type string
};
struct ksu_enable_kpm_cmd {
__u8 enabled; // Output: true if KPM is enabled
};
struct ksu_dynamic_manager_cmd {
struct dynamic_manager_user_config config; // Input/Output: dynamic manager config
};
struct ksu_get_managers_cmd {
struct manager_list_info manager_info; // Output: manager list information
};
struct ksu_enable_uid_scanner_cmd {
__u32 operation; // Input: operation type (UID_SCANNER_OP_GET_STATUS, UID_SCANNER_OP_TOGGLE, UID_SCANNER_OP_CLEAR_ENV)
__u32 enabled; // Input: enable or disable (for UID_SCANNER_OP_TOGGLE)
void __user *status_ptr; // Input: pointer to store status (for UID_SCANNER_OP_GET_STATUS)
};
#ifdef CONFIG_KSU_MANUAL_SU
struct ksu_manual_su_cmd {
__u32 option; // Input: operation type (MANUAL_SU_OP_GENERATE_TOKEN, MANUAL_SU_OP_ESCALATE, MANUAL_SU_OP_ADD_PENDING)
__u32 target_uid; // Input: target UID
__u32 target_pid; // Input: target PID
char token_buffer[33]; // Input/Output: token buffer
};
#endif
// IOCTL command definitions
#define KSU_IOCTL_GRANT_ROOT _IOC(_IOC_NONE, 'K', 1, 0)
#define KSU_IOCTL_GET_INFO _IOC(_IOC_READ, 'K', 2, 0)
#define KSU_IOCTL_REPORT_EVENT _IOC(_IOC_WRITE, 'K', 3, 0)
#define KSU_IOCTL_SET_SEPOLICY _IOC(_IOC_READ|_IOC_WRITE, 'K', 4, 0)
#define KSU_IOCTL_CHECK_SAFEMODE _IOC(_IOC_READ, 'K', 5, 0)
#define KSU_IOCTL_GET_ALLOW_LIST _IOC(_IOC_READ|_IOC_WRITE, 'K', 6, 0)
#define KSU_IOCTL_GET_DENY_LIST _IOC(_IOC_READ|_IOC_WRITE, 'K', 7, 0)
#define KSU_IOCTL_UID_GRANTED_ROOT _IOC(_IOC_READ|_IOC_WRITE, 'K', 8, 0)
#define KSU_IOCTL_UID_SHOULD_UMOUNT _IOC(_IOC_READ|_IOC_WRITE, 'K', 9, 0)
#define KSU_IOCTL_GET_MANAGER_UID _IOC(_IOC_READ, 'K', 10, 0)
#define KSU_IOCTL_GET_APP_PROFILE _IOC(_IOC_READ|_IOC_WRITE, 'K', 11, 0)
#define KSU_IOCTL_SET_APP_PROFILE _IOC(_IOC_WRITE, 'K', 12, 0)
#define KSU_IOCTL_GET_FEATURE _IOC(_IOC_READ|_IOC_WRITE, 'K', 13, 0)
#define KSU_IOCTL_SET_FEATURE _IOC(_IOC_WRITE, 'K', 14, 0)
#define KSU_IOCTL_GET_WRAPPER_FD _IOC(_IOC_WRITE, 'K', 15, 0)
#define KSU_IOCTL_MANAGE_MARK _IOC(_IOC_READ|_IOC_WRITE, 'K', 16, 0)
#define KSU_IOCTL_NUKE_EXT4_SYSFS _IOC(_IOC_WRITE, 'K', 17, 0)
#define KSU_IOCTL_ADD_TRY_UMOUNT _IOC(_IOC_WRITE, 'K', 18, 0)
// Other IOCTL command definitions
#define KSU_IOCTL_GET_FULL_VERSION _IOC(_IOC_READ, 'K', 100, 0)
#define KSU_IOCTL_HOOK_TYPE _IOC(_IOC_READ, 'K', 101, 0)
#define KSU_IOCTL_ENABLE_KPM _IOC(_IOC_READ, 'K', 102, 0)
#define KSU_IOCTL_DYNAMIC_MANAGER _IOC(_IOC_READ|_IOC_WRITE, 'K', 103, 0)
#define KSU_IOCTL_GET_MANAGERS _IOC(_IOC_READ|_IOC_WRITE, 'K', 104, 0)
#define KSU_IOCTL_ENABLE_UID_SCANNER _IOC(_IOC_READ|_IOC_WRITE, 'K', 105, 0)
#ifdef CONFIG_KSU_MANUAL_SU
#define KSU_IOCTL_MANUAL_SU _IOC(_IOC_READ|_IOC_WRITE, 'K', 106, 0)
#endif
#define KSU_IOCTL_UMOUNT_MANAGER _IOC(_IOC_READ|_IOC_WRITE, 'K', 107, 0)
// IOCTL handler types
typedef int (*ksu_ioctl_handler_t)(void __user *arg);
typedef bool (*ksu_perm_check_t)(void);
// IOCTL command mapping
struct ksu_ioctl_cmd_map {
unsigned int cmd;
const char *name;
ksu_ioctl_handler_t handler;
ksu_perm_check_t perm_check; // Permission check function
};
// Install KSU fd to current process
int ksu_install_fd(void);
void ksu_supercalls_init(void);
void ksu_supercalls_exit(void);
#endif // __KSU_H_SUPERCALLS

View File

@@ -1,375 +0,0 @@
#include "linux/compiler.h"
#include "linux/cred.h"
#include "linux/printk.h"
#include "selinux/selinux.h"
#include <linux/spinlock.h>
#include <linux/kprobes.h>
#include <linux/tracepoint.h>
#include <asm/syscall.h>
#include <linux/slab.h>
#include <linux/ptrace.h>
#include <trace/events/syscalls.h>
#include <linux/namei.h>
#include "allowlist.h"
#include "arch.h"
#include "klog.h" // IWYU pragma: keep
#include "syscall_hook_manager.h"
#include "sucompat.h"
#include "setuid_hook.h"
#include "selinux/selinux.h"
// Tracepoint registration count management
// == 1: just us
// > 1: someone else is also using syscall tracepoint e.g. ftrace
static int tracepoint_reg_count = 0;
static DEFINE_SPINLOCK(tracepoint_reg_lock);
void ksu_clear_task_tracepoint_flag_if_needed(struct task_struct *t)
{
unsigned long flags;
spin_lock_irqsave(&tracepoint_reg_lock, flags);
if (tracepoint_reg_count <= 1) {
ksu_clear_task_tracepoint_flag(t);
}
spin_unlock_irqrestore(&tracepoint_reg_lock, flags);
}
// Process marking management
static void handle_process_mark(bool mark)
{
struct task_struct *p, *t;
read_lock(&tasklist_lock);
for_each_process_thread(p, t) {
if (mark)
ksu_set_task_tracepoint_flag(t);
else
ksu_clear_task_tracepoint_flag(t);
}
read_unlock(&tasklist_lock);
}
void ksu_mark_all_process(void)
{
handle_process_mark(true);
pr_info("hook_manager: mark all user process done!\n");
}
void ksu_unmark_all_process(void)
{
handle_process_mark(false);
pr_info("hook_manager: unmark all user process done!\n");
}
static void ksu_mark_running_process_locked()
{
struct task_struct *p, *t;
read_lock(&tasklist_lock);
for_each_process_thread (p, t) {
if (!t->mm) { // only user processes
continue;
}
int uid = task_uid(t).val;
const struct cred *cred = get_task_cred(t);
bool ksu_root_process =
uid == 0 && is_task_ksu_domain(cred);
bool is_zygote_process = is_zygote(cred);
bool is_shell = uid == 2000;
// before boot completed, we shall mark init for marking zygote
bool is_init = t->pid == 1;
if (ksu_root_process || is_zygote_process || is_shell || is_init
|| ksu_is_allow_uid(uid)) {
ksu_set_task_tracepoint_flag(t);
pr_info("hook_manager: mark process: pid:%d, uid: %d, comm:%s\n",
t->pid, uid, t->comm);
} else {
ksu_clear_task_tracepoint_flag(t);
pr_info("hook_manager: unmark process: pid:%d, uid: %d, comm:%s\n",
t->pid, uid, t->comm);
}
put_cred(cred);
}
read_unlock(&tasklist_lock);
}
void ksu_mark_running_process()
{
unsigned long flags;
spin_lock_irqsave(&tracepoint_reg_lock, flags);
if (tracepoint_reg_count <= 1) {
ksu_mark_running_process_locked();
} else {
pr_info("hook_manager: not mark running process since syscall tracepoint is in use\n");
}
spin_unlock_irqrestore(&tracepoint_reg_lock, flags);
}
// Get task mark status
// Returns: 1 if marked, 0 if not marked, -ESRCH if task not found
int ksu_get_task_mark(pid_t pid)
{
struct task_struct *task;
int marked = -ESRCH;
rcu_read_lock();
task = find_task_by_vpid(pid);
if (task) {
get_task_struct(task);
rcu_read_unlock();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
marked = test_task_syscall_work(task, SYSCALL_TRACEPOINT) ? 1 : 0;
#else
marked = test_tsk_thread_flag(task, TIF_SYSCALL_TRACEPOINT) ? 1 : 0;
#endif
put_task_struct(task);
} else {
rcu_read_unlock();
}
return marked;
}
// Set task mark status
// Returns: 0 on success, -ESRCH if task not found
int ksu_set_task_mark(pid_t pid, bool mark)
{
struct task_struct *task;
int ret = -ESRCH;
rcu_read_lock();
task = find_task_by_vpid(pid);
if (task) {
get_task_struct(task);
rcu_read_unlock();
if (mark) {
ksu_set_task_tracepoint_flag(task);
pr_info("hook_manager: marked task pid=%d comm=%s\n", pid, task->comm);
} else {
ksu_clear_task_tracepoint_flag(task);
pr_info("hook_manager: unmarked task pid=%d comm=%s\n", pid, task->comm);
}
put_task_struct(task);
ret = 0;
} else {
rcu_read_unlock();
}
return ret;
}
#ifdef CONFIG_KRETPROBES
static struct kretprobe *init_kretprobe(const char *name,
kretprobe_handler_t handler)
{
struct kretprobe *rp = kzalloc(sizeof(struct kretprobe), GFP_KERNEL);
if (!rp)
return NULL;
rp->kp.symbol_name = name;
rp->handler = handler;
rp->data_size = 0;
rp->maxactive = 0;
int ret = register_kretprobe(rp);
pr_info("hook_manager: register_%s kretprobe: %d\n", name, ret);
if (ret) {
kfree(rp);
return NULL;
}
return rp;
}
static void destroy_kretprobe(struct kretprobe **rp_ptr)
{
struct kretprobe *rp = *rp_ptr;
if (!rp)
return;
unregister_kretprobe(rp);
synchronize_rcu();
kfree(rp);
*rp_ptr = NULL;
}
static int syscall_regfunc_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long flags;
spin_lock_irqsave(&tracepoint_reg_lock, flags);
if (tracepoint_reg_count < 1) {
// while install our tracepoint, mark our processes
ksu_mark_running_process_locked();
} else if (tracepoint_reg_count == 1) {
// while other tracepoint first added, mark all processes
ksu_mark_all_process();
}
tracepoint_reg_count++;
spin_unlock_irqrestore(&tracepoint_reg_lock, flags);
return 0;
}
static int syscall_unregfunc_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long flags;
spin_lock_irqsave(&tracepoint_reg_lock, flags);
tracepoint_reg_count--;
if (tracepoint_reg_count <= 0) {
// while no tracepoint left, unmark all processes
ksu_unmark_all_process();
} else if (tracepoint_reg_count == 1) {
// while just our tracepoint left, unmark disallowed processes
ksu_mark_running_process_locked();
}
spin_unlock_irqrestore(&tracepoint_reg_lock, flags);
return 0;
}
static struct kretprobe *syscall_regfunc_rp = NULL;
static struct kretprobe *syscall_unregfunc_rp = NULL;
#endif
static inline bool check_syscall_fastpath(int nr)
{
switch (nr) {
case __NR_newfstatat:
case __NR_faccessat:
case __NR_execve:
case __NR_setresuid:
case __NR_clone:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
case __NR_clone3:
#endif
return true;
default:
return false;
}
}
// Unmark init's child that are not zygote, adbd or ksud
int ksu_handle_init_mark_tracker(const char __user **filename_user)
{
char path[64];
if (unlikely(!filename_user))
return 0;
memset(path, 0, sizeof(path));
ksu_strncpy_from_user_nofault(path, *filename_user, sizeof(path));
if (likely(strstr(path, "/app_process") == NULL && strstr(path, "/adbd") == NULL && strstr(path, "/ksud") == NULL)) {
pr_info("hook_manager: unmark %d exec %s", current->pid, path);
ksu_clear_task_tracepoint_flag_if_needed(current);
}
return 0;
}
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
// Generic sys_enter handler that dispatches to specific handlers
static void ksu_sys_enter_handler(void *data, struct pt_regs *regs, long id)
{
if (unlikely(check_syscall_fastpath(id))) {
#ifndef CONFIG_KSU_SUSFS
#ifdef KSU_TP_HOOK
if (ksu_su_compat_enabled) {
// Handle newfstatat
if (id == __NR_newfstatat) {
int *dfd = (int *)&PT_REGS_PARM1(regs);
int *flags = (int *)&PT_REGS_SYSCALL_PARM4(regs);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) && defined(CONFIG_KSU_SUSFS)
// Kernel 6.1+ with SUSFS uses struct filename **
struct filename **filename_ptr = (struct filename **)&PT_REGS_PARM2(regs);
ksu_handle_stat(dfd, filename_ptr, flags);
#else
// Older kernel or no SUSFS: use const char __user **
const char __user **filename_user = (const char __user **)&PT_REGS_PARM2(regs);
ksu_handle_stat(dfd, filename_user, flags);
#endif
return;
}
// Handle faccessat
if (id == __NR_faccessat) {
int *dfd = (int *)&PT_REGS_PARM1(regs);
const char __user **filename_user =
(const char __user **)&PT_REGS_PARM2(regs);
int *mode = (int *)&PT_REGS_PARM3(regs);
ksu_handle_faccessat(dfd, filename_user, mode, NULL);
return;
}
// Handle execve
if (id == __NR_execve) {
const char __user **filename_user =
(const char __user **)&PT_REGS_PARM1(regs);
if (current->pid != 1 && is_init(get_current_cred())) {
ksu_handle_init_mark_tracker(filename_user);
} else {
ksu_handle_execve_sucompat(filename_user, NULL, NULL, NULL);
}
return;
}
}
#endif
// Handle setresuid
if (id == __NR_setresuid) {
uid_t ruid = (uid_t)PT_REGS_PARM1(regs);
uid_t euid = (uid_t)PT_REGS_PARM2(regs);
uid_t suid = (uid_t)PT_REGS_PARM3(regs);
ksu_handle_setresuid(ruid, euid, suid);
return;
}
#endif
}
}
#endif
void ksu_syscall_hook_manager_init(void)
{
#if defined(CONFIG_KPROBES) && !defined(CONFIG_KSU_SUSFS)
int ret;
pr_info("hook_manager: ksu_hook_manager_init called\n");
#ifdef CONFIG_KRETPROBES
// Register kretprobe for syscall_regfunc
syscall_regfunc_rp = init_kretprobe("syscall_regfunc", syscall_regfunc_handler);
// Register kretprobe for syscall_unregfunc
syscall_unregfunc_rp = init_kretprobe("syscall_unregfunc", syscall_unregfunc_handler);
#endif
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
ret = register_trace_sys_enter(ksu_sys_enter_handler, NULL);
#ifndef CONFIG_KRETPROBES
ksu_mark_running_process_locked();
#endif
if (ret) {
pr_err("hook_manager: failed to register sys_enter tracepoint: %d\n", ret);
} else {
pr_info("hook_manager: sys_enter tracepoint registered\n");
}
#endif
#endif
ksu_setuid_hook_init();
ksu_sucompat_init();
}
void ksu_syscall_hook_manager_exit(void)
{
#if defined(CONFIG_KPROBES) && !defined(CONFIG_KSU_SUSFS)
pr_info("hook_manager: ksu_hook_manager_exit called\n");
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
unregister_trace_sys_enter(ksu_sys_enter_handler, NULL);
tracepoint_synchronize_unregister();
pr_info("hook_manager: sys_enter tracepoint unregistered\n");
#endif
#ifdef CONFIG_KRETPROBES
destroy_kretprobe(&syscall_regfunc_rp);
destroy_kretprobe(&syscall_unregfunc_rp);
#endif
#endif
ksu_sucompat_exit();
ksu_setuid_hook_exit();
}

View File

@@ -1,47 +0,0 @@
#ifndef __KSU_H_HOOK_MANAGER
#define __KSU_H_HOOK_MANAGER
#include <linux/version.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/init.h>
#include <linux/binfmts.h>
#include <linux/tty.h>
#include <linux/fs.h>
#include "selinux/selinux.h"
// Hook manager initialization and cleanup
void ksu_syscall_hook_manager_init(void);
void ksu_syscall_hook_manager_exit(void);
// Process marking for tracepoint
void ksu_mark_all_process(void);
void ksu_unmark_all_process(void);
void ksu_mark_running_process(void);
// Per-task mark operations
int ksu_get_task_mark(pid_t pid);
int ksu_set_task_mark(pid_t pid, bool mark);
static inline void ksu_set_task_tracepoint_flag(struct task_struct *t)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
set_task_syscall_work(t, SYSCALL_TRACEPOINT);
#else
set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
#endif
}
static inline void ksu_clear_task_tracepoint_flag(struct task_struct *t)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
#else
clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
#endif
}
void ksu_clear_task_tracepoint_flag_if_needed(struct task_struct *t);
#endif

View File

@@ -1,215 +0,0 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/version.h>
#include "ksu.h"
#include "klog.h"
#include "ksu.h"
#include "kernel_compat.h"
#include "throne_comm.h"
#define PROC_UID_SCANNER "ksu_uid_scanner"
#define UID_SCANNER_STATE_FILE "/data/adb/ksu/.uid_scanner"
static struct proc_dir_entry *proc_entry = NULL;
static struct workqueue_struct *scanner_wq = NULL;
static struct work_struct scan_work;
static struct work_struct ksu_state_save_work;
static struct work_struct ksu_state_load_work;
// Signal userspace to rescan
static bool need_rescan = false;
static void rescan_work_fn(struct work_struct *work)
{
// Signal userspace through proc interface
need_rescan = true;
pr_info("requested userspace uid rescan\n");
}
void ksu_request_userspace_scan(void)
{
if (scanner_wq) {
queue_work(scanner_wq, &scan_work);
}
}
void ksu_handle_userspace_update(void)
{
// Called when userspace notifies update complete
need_rescan = false;
pr_info("userspace uid list updated\n");
}
static void do_save_throne_state(struct work_struct *work)
{
struct file *fp;
char state_char = ksu_uid_scanner_enabled ? '1' : '0';
loff_t off = 0;
fp = ksu_filp_open_compat(UID_SCANNER_STATE_FILE, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (IS_ERR(fp)) {
pr_err("save_throne_state create file failed: %ld\n", PTR_ERR(fp));
return;
}
if (ksu_kernel_write_compat(fp, &state_char, sizeof(state_char), &off) != sizeof(state_char)) {
pr_err("save_throne_state write failed\n");
goto exit;
}
pr_info("throne state saved: %s\n", ksu_uid_scanner_enabled ? "enabled" : "disabled");
exit:
filp_close(fp, 0);
}
void do_load_throne_state(struct work_struct *work)
{
struct file *fp;
char state_char;
loff_t off = 0;
ssize_t ret;
fp = ksu_filp_open_compat(UID_SCANNER_STATE_FILE, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_info("throne state file not found, using default: disabled\n");
ksu_uid_scanner_enabled = false;
return;
}
ret = ksu_kernel_read_compat(fp, &state_char, sizeof(state_char), &off);
if (ret != sizeof(state_char)) {
pr_err("load_throne_state read err: %zd\n", ret);
ksu_uid_scanner_enabled = false;
goto exit;
}
ksu_uid_scanner_enabled = (state_char == '1');
pr_info("throne state loaded: %s\n", ksu_uid_scanner_enabled ? "enabled" : "disabled");
exit:
filp_close(fp, 0);
}
bool ksu_throne_comm_load_state(void)
{
return ksu_queue_work(&ksu_state_load_work);
}
void ksu_throne_comm_save_state(void)
{
ksu_queue_work(&ksu_state_save_work);
}
static int uid_scanner_show(struct seq_file *m, void *v)
{
if (need_rescan) {
seq_puts(m, "RESCAN\n");
} else {
seq_puts(m, "OK\n");
}
return 0;
}
static int uid_scanner_open(struct inode *inode, struct file *file)
{
return single_open(file, uid_scanner_show, NULL);
}
static ssize_t uid_scanner_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
char cmd[16];
if (count >= sizeof(cmd))
return -EINVAL;
if (copy_from_user(cmd, buffer, count))
return -EFAULT;
cmd[count] = '\0';
// Remove newline if present
if (count > 0 && cmd[count-1] == '\n')
cmd[count-1] = '\0';
if (strcmp(cmd, "UPDATED") == 0) {
ksu_handle_userspace_update();
pr_info("received userspace update notification\n");
}
return count;
}
#ifdef KSU_COMPAT_HAS_PROC_OPS
static const struct proc_ops uid_scanner_proc_ops = {
.proc_open = uid_scanner_open,
.proc_read = seq_read,
.proc_write = uid_scanner_write,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
#else
static const struct file_operations uid_scanner_proc_ops = {
.owner = THIS_MODULE,
.open = uid_scanner_open,
.read = seq_read,
.write = uid_scanner_write,
.llseek = seq_lseek,
.release = single_release,
};
#endif
int ksu_throne_comm_init(void)
{
// Create workqueue
scanner_wq = alloc_workqueue("ksu_scanner", WQ_UNBOUND, 1);
if (!scanner_wq) {
pr_err("failed to create scanner workqueue\n");
return -ENOMEM;
}
INIT_WORK(&scan_work, rescan_work_fn);
// Create proc entry
proc_entry = proc_create(PROC_UID_SCANNER, 0600, NULL, &uid_scanner_proc_ops);
if (!proc_entry) {
pr_err("failed to create proc entry\n");
destroy_workqueue(scanner_wq);
return -ENOMEM;
}
pr_info("throne communication initialized\n");
return 0;
}
void ksu_throne_comm_exit(void)
{
if (proc_entry) {
proc_remove(proc_entry);
proc_entry = NULL;
}
if (scanner_wq) {
destroy_workqueue(scanner_wq);
scanner_wq = NULL;
}
pr_info("throne communication cleaned up\n");
}
int ksu_uid_init(void)
{
INIT_WORK(&ksu_state_save_work, do_save_throne_state);
INIT_WORK(&ksu_state_load_work, do_load_throne_state);
return 0;
}
void ksu_uid_exit(void)
{
do_save_throne_state(NULL);
}

View File

@@ -1,22 +0,0 @@
#ifndef __KSU_H_THRONE_COMM
#define __KSU_H_THRONE_COMM
void ksu_request_userspace_scan(void);
void ksu_handle_userspace_update(void);
int ksu_throne_comm_init(void);
void ksu_throne_comm_exit(void);
int ksu_uid_init(void);
void ksu_uid_exit(void);
bool ksu_throne_comm_load_state(void);
void ksu_throne_comm_save_state(void);
void do_load_throne_state(struct work_struct *work);
#endif

View File

@@ -5,101 +5,23 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/stat.h>
#include <linux/namei.h>
#include "allowlist.h" #include "allowlist.h"
#include "klog.h" // IWYU pragma: keep #include "klog.h" // IWYU pragma: keep
#include "ksu.h"
#include "ksud.h"
#include "manager.h" #include "manager.h"
#include "throne_tracker.h" #include "throne_tracker.h"
#include "apk_sign.h"
#include "kernel_compat.h" #include "kernel_compat.h"
#include "dynamic_manager.h" #include "dynamic_manager.h"
#include "throne_comm.h" #include "user_data_scanner.h"
#include <linux/kthread.h>
#include <linux/sched.h>
uid_t ksu_manager_uid = KSU_INVALID_UID; uid_t ksu_manager_uid = KSU_INVALID_UID;
static uid_t locked_manager_uid = KSU_INVALID_UID;
static uid_t locked_dynamic_manager_uid = KSU_INVALID_UID;
#define KSU_UID_LIST_PATH "/data/misc/user_uid/uid_list" static struct task_struct *throne_thread;
#define SYSTEM_PACKAGES_LIST_PATH "/data/system/packages.list"
struct uid_data {
struct list_head list;
u32 uid;
char package[KSU_MAX_PACKAGE_NAME];
};
// Try read /data/misc/user_uid/uid_list
static int uid_from_um_list(struct list_head *uid_list)
{
struct file *fp;
char *buf = NULL;
loff_t size, pos = 0;
ssize_t nr;
int cnt = 0;
fp = ksu_filp_open_compat(KSU_UID_LIST_PATH, O_RDONLY, 0);
if (IS_ERR(fp))
return -ENOENT;
size = fp->f_inode->i_size;
if (size <= 0) {
filp_close(fp, NULL);
return -ENODATA;
}
buf = kzalloc(size + 1, GFP_ATOMIC);
if (!buf) {
pr_err("uid_list: OOM %lld B\n", size);
filp_close(fp, NULL);
return -ENOMEM;
}
nr = ksu_kernel_read_compat(fp, buf, size, &pos);
filp_close(fp, NULL);
if (nr != size) {
pr_err("uid_list: short read %zd/%lld\n", nr, size);
kfree(buf);
return -EIO;
}
buf[size] = '\0';
for (char *line = buf, *next; line; line = next) {
next = strchr(line, '\n');
if (next) *next++ = '\0';
while (*line == ' ' || *line == '\t' || *line == '\r') ++line;
if (!*line) continue;
char *uid_str = strsep(&line, " \t");
char *pkg = line;
if (!pkg) continue;
while (*pkg == ' ' || *pkg == '\t') ++pkg;
if (!*pkg) continue;
u32 uid;
if (kstrtou32(uid_str, 10, &uid)) {
pr_warn_once("uid_list: bad uid <%s>\n", uid_str);
continue;
}
struct uid_data *d = kzalloc(sizeof(*d), GFP_ATOMIC);
if (unlikely(!d)) {
pr_err("uid_list: OOM uid=%u\n", uid);
continue;
}
d->uid = uid;
strscpy(d->package, pkg, KSU_MAX_PACKAGE_NAME);
list_add_tail(&d->list, uid_list);
++cnt;
}
kfree(buf);
pr_info("uid_list: loaded %d entries\n", cnt);
return cnt > 0 ? 0 : -ENODATA;
}
static int get_pkg_from_apk_path(char *pkg, const char *path) static int get_pkg_from_apk_path(char *pkg, const char *path)
{ {
@@ -140,67 +62,44 @@ static int get_pkg_from_apk_path(char *pkg, const char *path)
return 0; return 0;
} }
static void crown_manager(const char *apk, struct list_head *uid_data, int signature_index) static void crown_manager(const char *apk, struct list_head *uid_data,
int signature_index, struct work_buffers *work_buf)
{ {
char pkg[KSU_MAX_PACKAGE_NAME]; if (get_pkg_from_apk_path(work_buf->package_buffer, apk) < 0) {
if (get_pkg_from_apk_path(pkg, apk) < 0) {
pr_err("Failed to get package name from apk path: %s\n", apk); pr_err("Failed to get package name from apk path: %s\n", apk);
return; return;
} }
pr_info("manager pkg: %s, signature_index: %d\n", pkg, signature_index); pr_info("manager pkg: %s, signature_index: %d\n", work_buf->package_buffer, signature_index);
#ifdef KSU_MANAGER_PACKAGE #ifdef KSU_MANAGER_PACKAGE
// pkg is `/<real package>` // pkg is `/<real package>`
if (strncmp(pkg, KSU_MANAGER_PACKAGE, sizeof(KSU_MANAGER_PACKAGE))) { if (strncmp(work_buf->package_buffer, KSU_MANAGER_PACKAGE, sizeof(KSU_MANAGER_PACKAGE))) {
pr_info("manager package is inconsistent with kernel build: %s\n", pr_info("manager package is inconsistent with kernel build: %s\n",
KSU_MANAGER_PACKAGE); KSU_MANAGER_PACKAGE);
return; return;
} }
#endif #endif
struct uid_data *np; struct uid_data *np;
list_for_each_entry(np, uid_data, list) { list_for_each_entry(np, uid_data, list) {
if (strncmp(np->package, pkg, KSU_MAX_PACKAGE_NAME) == 0) { if (strncmp(np->package, work_buf->package_buffer, KSU_MAX_PACKAGE_NAME) == 0) {
bool is_dynamic = (signature_index == DYNAMIC_SIGN_INDEX || signature_index >= 2); pr_info("Crowning manager: %s(uid=%d, signature_index=%d, user=%u)\n",
work_buf->package_buffer, np->uid, signature_index, np->user_id);
if (is_dynamic) { if (signature_index == DYNAMIC_SIGN_INDEX || signature_index >= 2) {
if (locked_dynamic_manager_uid != KSU_INVALID_UID && locked_dynamic_manager_uid != np->uid) {
pr_info("Unlocking previous dynamic manager UID: %d\n", locked_dynamic_manager_uid);
ksu_remove_manager(locked_dynamic_manager_uid);
locked_dynamic_manager_uid = KSU_INVALID_UID;
}
} else {
if (locked_manager_uid != KSU_INVALID_UID && locked_manager_uid != np->uid) {
pr_info("Unlocking previous manager UID: %d\n", locked_manager_uid);
ksu_invalidate_manager_uid(); // unlock old one
locked_manager_uid = KSU_INVALID_UID;
}
}
pr_info("Crowning %s manager: %s (uid=%d, signature_index=%d)\n",
is_dynamic ? "dynamic" : "traditional", pkg, np->uid, signature_index);
if (is_dynamic) {
ksu_add_manager(np->uid, signature_index); ksu_add_manager(np->uid, signature_index);
locked_dynamic_manager_uid = np->uid;
// If there is no traditional manager, set it to the current UID
if (!ksu_is_manager_uid_valid()) { if (!ksu_is_manager_uid_valid()) {
ksu_set_manager_uid(np->uid); ksu_set_manager_uid(np->uid);
locked_manager_uid = np->uid;
} }
} else { } else {
ksu_set_manager_uid(np->uid); // throne new UID ksu_set_manager_uid(np->uid);
locked_manager_uid = np->uid; // store locked UID
} }
break; break;
} }
} }
} }
#define DATA_PATH_LEN 384 // 384 is enough for /data/app/<package>/base.apk
struct data_path { struct data_path {
char dirpath[DATA_PATH_LEN]; char dirpath[DATA_PATH_LEN];
int depth; int depth;
@@ -213,7 +112,7 @@ struct apk_path_hash {
struct list_head list; struct list_head list;
}; };
static struct list_head apk_path_hash_list = LIST_HEAD_INIT(apk_path_hash_list); static struct list_head apk_path_hash_list;
struct my_dir_context { struct my_dir_context {
struct dir_context ctx; struct dir_context ctx;
@@ -222,31 +121,29 @@ struct my_dir_context {
void *private_data; void *private_data;
int depth; int depth;
int *stop; int *stop;
bool found_dynamic_manager;
struct work_buffers *work_buf; // Passing the work buffer
size_t processed_count;
}; };
// https://docs.kernel.org/filesystems/porting.html
// filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: callers never looked at specific -E... values anyway. -> iterate_shared() instances require no changes at all, all filldir_t ones in the tree converted.
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
#define FILLDIR_RETURN_TYPE bool
#define FILLDIR_ACTOR_CONTINUE true
#define FILLDIR_ACTOR_STOP false
#else
#define FILLDIR_RETURN_TYPE int
#define FILLDIR_ACTOR_CONTINUE 0
#define FILLDIR_ACTOR_STOP -EINVAL
#endif
FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name, FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, int namelen, loff_t off, u64 ino,
unsigned int d_type) unsigned int d_type)
{ {
struct my_dir_context *my_ctx = struct my_dir_context *my_ctx =
container_of(ctx, struct my_dir_context, ctx); container_of(ctx, struct my_dir_context, ctx);
char dirpath[DATA_PATH_LEN]; struct work_buffers *work_buf = my_ctx->work_buf;
if (!my_ctx) { if (!my_ctx) {
pr_err("Invalid context\n"); pr_err("Invalid context\n");
return FILLDIR_ACTOR_STOP; return FILLDIR_ACTOR_STOP;
} }
my_ctx->processed_count++;
if (my_ctx->processed_count % SCHEDULE_INTERVAL == 0) {
cond_resched();
}
if (my_ctx->stop && *my_ctx->stop) { if (my_ctx->stop && *my_ctx->stop) {
pr_info("Stop searching\n"); pr_info("Stop searching\n");
return FILLDIR_ACTOR_STOP; return FILLDIR_ACTOR_STOP;
@@ -256,37 +153,37 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
return FILLDIR_ACTOR_CONTINUE; // Skip "." and ".." return FILLDIR_ACTOR_CONTINUE; // Skip "." and ".."
if (d_type == DT_DIR && namelen >= 8 && !strncmp(name, "vmdl", 4) && if (d_type == DT_DIR && namelen >= 8 && !strncmp(name, "vmdl", 4) &&
!strncmp(name + namelen - 4, ".tmp", 4)) { !strncmp(name + namelen - 4, ".tmp", 4)) {
pr_info("Skipping directory: %.*s\n", namelen, name); pr_info("Skipping directory: %.*s\n", namelen, name);
return FILLDIR_ACTOR_CONTINUE; // Skip staging package return FILLDIR_ACTOR_CONTINUE; // Skip staging package
} }
if (snprintf(dirpath, DATA_PATH_LEN, "%s/%.*s", my_ctx->parent_dir, if (snprintf(work_buf->path_buffer, DATA_PATH_LEN, "%s/%.*s", my_ctx->parent_dir,
namelen, name) >= DATA_PATH_LEN) { namelen, name) >= DATA_PATH_LEN) {
pr_err("Path too long: %s/%.*s\n", my_ctx->parent_dir, namelen, pr_err("Path too long: %s/%.*s\n", my_ctx->parent_dir, namelen,
name); name);
return FILLDIR_ACTOR_CONTINUE; return FILLDIR_ACTOR_CONTINUE;
} }
if (d_type == DT_DIR && my_ctx->depth > 0 && if (d_type == DT_DIR && my_ctx->depth > 0 &&
(my_ctx->stop && !*my_ctx->stop)) { (my_ctx->stop && !*my_ctx->stop)) {
struct data_path *data = kzalloc(sizeof(struct data_path), GFP_ATOMIC); struct data_path *data = kmalloc(sizeof(struct data_path), GFP_ATOMIC);
if (!data) { if (!data) {
pr_err("Failed to allocate memory for %s\n", dirpath); pr_err("Failed to allocate memory for %s\n", work_buf->path_buffer);
return FILLDIR_ACTOR_CONTINUE; return FILLDIR_ACTOR_CONTINUE;
} }
strscpy(data->dirpath, dirpath, DATA_PATH_LEN); strscpy(data->dirpath, work_buf->path_buffer, DATA_PATH_LEN);
data->depth = my_ctx->depth - 1; data->depth = my_ctx->depth - 1;
list_add_tail(&data->list, my_ctx->data_path_list); list_add_tail(&data->list, my_ctx->data_path_list);
} else { } else {
if ((namelen == 8) && (strncmp(name, "base.apk", namelen) == 0)) { if ((namelen == 8) && (strncmp(name, "base.apk", namelen) == 0)) {
struct apk_path_hash *pos, *n; struct apk_path_hash *pos, *n;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
unsigned int hash = full_name_hash(dirpath, strlen(dirpath)); unsigned int hash = full_name_hash(work_buf->path_buffer, strlen(work_buf->path_buffer));
#else #else
unsigned int hash = full_name_hash(NULL, dirpath, strlen(dirpath)); unsigned int hash = full_name_hash(NULL, work_buf->path_buffer, strlen(work_buf->path_buffer));
#endif #endif
list_for_each_entry(pos, &apk_path_hash_list, list) { list_for_each_entry(pos, &apk_path_hash_list, list) {
if (hash == pos->hash) { if (hash == pos->hash) {
@@ -297,31 +194,44 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
int signature_index = -1; int signature_index = -1;
bool is_multi_manager = is_dynamic_manager_apk( bool is_multi_manager = is_dynamic_manager_apk(
dirpath, &signature_index); work_buf->path_buffer, &signature_index);
pr_info("Found new base.apk at path: %s, is_multi_manager: %d, signature_index: %d\n", pr_info("Found new base.apk at path: %s, is_multi_manager: %d, signature_index: %d\n",
dirpath, is_multi_manager, signature_index); work_buf->path_buffer, is_multi_manager, signature_index);
// Check for dynamic sign or multi-manager signatures // Check for dynamic sign or multi-manager signatures
if (is_multi_manager && (signature_index == DYNAMIC_SIGN_INDEX || signature_index >= 2)) { if (is_multi_manager && (signature_index == DYNAMIC_SIGN_INDEX || signature_index >= 2)) {
crown_manager(dirpath, my_ctx->private_data, signature_index); my_ctx->found_dynamic_manager = true;
} else if (is_manager_apk(dirpath)) { crown_manager(work_buf->path_buffer, my_ctx->private_data,
crown_manager(dirpath, my_ctx->private_data, 0); signature_index, work_buf);
struct apk_path_hash *apk_data = kmalloc(sizeof(struct apk_path_hash), GFP_ATOMIC);
if (apk_data) {
apk_data->hash = hash;
apk_data->exists = true;
list_add_tail(&apk_data->list, &apk_path_hash_list);
}
} else if (is_manager_apk(work_buf->path_buffer)) {
crown_manager(work_buf->path_buffer,
my_ctx->private_data, 0, work_buf);
if (!my_ctx->found_dynamic_manager && !ksu_is_dynamic_manager_enabled()) {
*my_ctx->stop = 1; *my_ctx->stop = 1;
} }
struct apk_path_hash *apk_data = kzalloc(sizeof(*apk_data), GFP_ATOMIC);
if (apk_data) {
apk_data->hash = hash;
apk_data->exists = true;
list_add_tail(&apk_data->list, &apk_path_hash_list);
}
if (is_manager_apk(dirpath)) {
// Manager found, clear APK cache list // Manager found, clear APK cache list
if (!ksu_is_dynamic_manager_enabled()) {
list_for_each_entry_safe(pos, n, &apk_path_hash_list, list) { list_for_each_entry_safe(pos, n, &apk_path_hash_list, list) {
list_del(&pos->list); list_del(&pos->list);
kfree(pos); kfree(pos);
}
}
} else {
struct apk_path_hash *apk_data = kmalloc(sizeof(struct apk_path_hash), GFP_ATOMIC);
if (apk_data) {
apk_data->hash = hash;
apk_data->exists = true;
list_add_tail(&apk_data->list, &apk_path_hash_list);
} }
} }
} }
@@ -334,12 +244,21 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
{ {
int i, stop = 0; int i, stop = 0;
struct list_head data_path_list; struct list_head data_path_list;
struct work_buffers *work_buf = get_work_buffer();
if (!work_buf) {
pr_err("Failed to get work buffer for search_manager\n");
return;
}
INIT_LIST_HEAD(&data_path_list); INIT_LIST_HEAD(&data_path_list);
INIT_LIST_HEAD(&apk_path_hash_list);
unsigned long data_app_magic = 0; unsigned long data_app_magic = 0;
bool found_dynamic_manager = false;
// Initialize APK cache list // Initialize APK cache list
struct apk_path_hash *pos, *n; struct apk_path_hash *pos, *n;
list_for_each_entry (pos, &apk_path_hash_list, list) { list_for_each_entry(pos, &apk_path_hash_list, list) {
pos->exists = false; pos->exists = false;
} }
@@ -352,20 +271,22 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
for (i = depth; i >= 0; i--) { for (i = depth; i >= 0; i--) {
struct data_path *pos, *n; struct data_path *pos, *n;
list_for_each_entry_safe (pos, n, &data_path_list, list) { list_for_each_entry_safe(pos, n, &data_path_list, list) {
struct my_dir_context ctx = { .ctx.actor = my_actor, struct my_dir_context ctx = { .ctx.actor = my_actor,
.data_path_list = &data_path_list, .data_path_list = &data_path_list,
.parent_dir = pos->dirpath, .parent_dir = pos->dirpath,
.private_data = uid_data, .private_data = uid_data,
.depth = pos->depth, .depth = pos->depth,
.stop = &stop }; .stop = &stop,
.found_dynamic_manager = false,
.work_buf = work_buf,
.processed_count = 0 };
struct file *file; struct file *file;
if (!stop) { if (!stop) {
file = ksu_filp_open_compat(pos->dirpath, O_RDONLY | O_NOFOLLOW, 0); file = ksu_filp_open_compat(pos->dirpath, O_RDONLY | O_NOFOLLOW | O_DIRECTORY, 0);
if (IS_ERR(file)) { if (IS_ERR(file)) {
pr_err("Failed to open directory: %s, err: %ld\n", pr_err("Failed to open directory: %s, err: %ld\n", pos->dirpath, PTR_ERR(file));
pos->dirpath, PTR_ERR(file));
goto skip_iterate; goto skip_iterate;
} }
@@ -373,8 +294,7 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
if (!data_app_magic) { if (!data_app_magic) {
if (file->f_inode->i_sb->s_magic) { if (file->f_inode->i_sb->s_magic) {
data_app_magic = file->f_inode->i_sb->s_magic; data_app_magic = file->f_inode->i_sb->s_magic;
pr_info("%s: dir: %s got magic! 0x%lx\n", __func__, pr_info("%s: dir: %s got magic! 0x%lx\n", __func__, pos->dirpath, data_app_magic);
pos->dirpath, data_app_magic);
} else { } else {
filp_close(file, NULL); filp_close(file, NULL);
goto skip_iterate; goto skip_iterate;
@@ -382,25 +302,32 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
} }
if (file->f_inode->i_sb->s_magic != data_app_magic) { if (file->f_inode->i_sb->s_magic != data_app_magic) {
pr_info("%s: skip: %s magic: 0x%lx expected: 0x%lx\n", pr_info("%s: skip: %s magic: 0x%lx expected: 0x%lx\n", __func__, pos->dirpath,
__func__, pos->dirpath, file->f_inode->i_sb->s_magic, data_app_magic);
file->f_inode->i_sb->s_magic, data_app_magic);
filp_close(file, NULL); filp_close(file, NULL);
goto skip_iterate; goto skip_iterate;
} }
iterate_dir(file, &ctx.ctx); iterate_dir(file, &ctx.ctx);
filp_close(file, NULL); filp_close(file, NULL);
if (ctx.found_dynamic_manager) {
found_dynamic_manager = true;
}
cond_resched();
} }
skip_iterate: skip_iterate:
list_del(&pos->list); list_del(&pos->list);
if (pos != &data) if (pos != &data)
kfree(pos); kfree(pos);
} }
cond_resched();
} }
// Remove stale cached APK entries // Remove stale cached APK entries
list_for_each_entry_safe (pos, n, &apk_path_hash_list, list) { list_for_each_entry_safe(pos, n, &apk_path_hash_list, list) {
if (!pos->exists) { if (!pos->exists) {
list_del(&pos->list); list_del(&pos->list);
kfree(pos); kfree(pos);
@@ -414,9 +341,9 @@ static bool is_uid_exist(uid_t uid, char *package, void *data)
struct uid_data *np; struct uid_data *np;
bool exist = false; bool exist = false;
list_for_each_entry (np, list, list) { list_for_each_entry(np, list, list) {
if (np->uid == uid % 100000 && if (np->uid == uid % 100000 &&
strncmp(np->package, package, KSU_MAX_PACKAGE_NAME) == 0) { strncmp(np->package, package, KSU_MAX_PACKAGE_NAME) == 0) {
exist = true; exist = true;
break; break;
} }
@@ -424,127 +351,65 @@ static bool is_uid_exist(uid_t uid, char *package, void *data)
return exist; return exist;
} }
void track_throne(bool prune_only) static void track_throne_function(void)
{ {
struct list_head uid_list; struct list_head uid_list;
struct uid_data *np, *n;
struct file *fp;
char chr = 0;
loff_t pos = 0;
loff_t line_start = 0;
char buf[KSU_MAX_PACKAGE_NAME];
static bool manager_exist = false;
static bool dynamic_manager_exist = false;
int current_manager_uid = ksu_get_manager_uid() % 100000;
// init uid list head
INIT_LIST_HEAD(&uid_list); INIT_LIST_HEAD(&uid_list);
// scan user data for uids
int ret = scan_user_data_for_uids(&uid_list, scan_all_users);
if (ksu_uid_scanner_enabled) { if (ret < 0) {
pr_info("Scanning %s directory..\n", KSU_UID_LIST_PATH); pr_err("Improved UserDE UID scan failed: %d. scan_all_users=%d\n", ret, scan_all_users);
goto out;
if (uid_from_um_list(&uid_list) == 0) {
pr_info("Loaded UIDs from %s success\n", KSU_UID_LIST_PATH);
goto uid_ready;
} }
pr_warn("%s read failed, fallback to %s\n", // now update uid list
KSU_UID_LIST_PATH, SYSTEM_PACKAGES_LIST_PATH); struct uid_data *np;
} struct uid_data *n;
{
fp = ksu_filp_open_compat(SYSTEM_PACKAGES_LIST_PATH, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_err("%s: open " SYSTEM_PACKAGES_LIST_PATH " failed: %ld\n", __func__, PTR_ERR(fp));
return;
}
for (;;) {
ssize_t count =
ksu_kernel_read_compat(fp, &chr, sizeof(chr), &pos);
if (count != sizeof(chr))
break;
if (chr != '\n')
continue;
count = ksu_kernel_read_compat(fp, buf, sizeof(buf),
&line_start);
struct uid_data *data =
kzalloc(sizeof(struct uid_data), GFP_ATOMIC);
if (!data) {
filp_close(fp, 0);
goto out;
}
char *tmp = buf;
const char *delim = " ";
char *package = strsep(&tmp, delim);
char *uid = strsep(&tmp, delim);
if (!uid || !package) {
pr_err("update_uid: package or uid is NULL!\n");
break;
}
u32 res;
if (kstrtou32(uid, 10, &res)) {
pr_err("update_uid: uid parse err\n");
break;
}
data->uid = res;
strncpy(data->package, package, KSU_MAX_PACKAGE_NAME);
list_add_tail(&data->list, &uid_list);
// reset line start
line_start = pos;
}
filp_close(fp, 0);
}
uid_ready:
if (prune_only)
goto prune;
// first, check if manager_uid exist! // first, check if manager_uid exist!
bool manager_exist = false;
bool dynamic_manager_exist = false;
list_for_each_entry(np, &uid_list, list) { list_for_each_entry(np, &uid_list, list) {
if (np->uid == current_manager_uid) { // if manager is installed in work profile, the uid in packages.list is still equals main profile
// don't delete it in this case!
int manager_uid = ksu_get_manager_uid() % 100000;
if (np->uid == manager_uid) {
manager_exist = true; manager_exist = true;
break; break;
} }
} }
if (!manager_exist && locked_manager_uid != KSU_INVALID_UID) { // Check for dynamic managers
pr_info("Manager APK removed, unlock previous UID: %d\n", if (ksu_is_dynamic_manager_enabled()) {
locked_manager_uid); dynamic_manager_exist = ksu_has_dynamic_managers();
ksu_invalidate_manager_uid();
locked_manager_uid = KSU_INVALID_UID;
}
// Check if the Dynamic Manager exists (only check locked UIDs)
if (ksu_is_dynamic_manager_enabled() &&
locked_dynamic_manager_uid != KSU_INVALID_UID) {
list_for_each_entry(np, &uid_list, list) {
if (np->uid == locked_dynamic_manager_uid) {
dynamic_manager_exist = true;
break;
}
}
if (!dynamic_manager_exist) { if (!dynamic_manager_exist) {
pr_info("Dynamic manager APK removed, unlock previous UID: %d\n", list_for_each_entry(np, &uid_list, list) {
locked_dynamic_manager_uid); // Check if this uid is a dynamic manager (not the traditional manager)
ksu_remove_manager(locked_dynamic_manager_uid); if (ksu_is_any_manager(np->uid) && np->uid != ksu_get_manager_uid()) {
locked_dynamic_manager_uid = KSU_INVALID_UID; dynamic_manager_exist = true;
break;
}
}
} }
} }
bool need_search = !manager_exist; if (!manager_exist) {
if (ksu_is_dynamic_manager_enabled() && !dynamic_manager_exist) if (ksu_is_manager_uid_valid()) {
need_search = true; pr_info("manager is uninstalled, invalidate it!\n");
ksu_invalidate_manager_uid();
if (need_search) { goto prune;
pr_info("Searching for manager(s)...\n"); }
pr_info("Searching manager...\n");
search_manager("/data/app", 2, &uid_list); search_manager("/data/app", 2, &uid_list);
pr_info("Manager search finished\n"); pr_info("Search manager finished\n");
// Always perform search when called from dynamic manager rescan
} else if (!dynamic_manager_exist && ksu_is_dynamic_manager_enabled()) {
pr_info("Dynamic sign enabled, Searching manager...\n");
search_manager("/data/app", 2, &uid_list);
pr_info("Search Dynamic sign manager finished\n");
} }
prune: prune:
@@ -558,6 +423,33 @@ out:
} }
} }
static int throne_tracker_thread(void *data)
{
pr_info("%s: pid: %d started\n", __func__, current->pid);
// for the kthread, we need to escape to root
// since it does not inherit the caller's context.
// this runs as root but without the capabilities, so call it with false
escape_to_root(false);
track_throne_function();
throne_thread = NULL;
smp_mb();
pr_info("%s: pid: %d exit!\n", __func__, current->pid);
return 0;
}
void track_throne(void)
{
smp_mb();
if (throne_thread != NULL) // single instance lock
return;
throne_thread = kthread_run(throne_tracker_thread, NULL, "throne_tracker");
if (IS_ERR(throne_thread)) {
throne_thread = NULL;
return;
}
}
void ksu_throne_tracker_init(void) void ksu_throne_tracker_init(void)
{ {
// nothing to do // nothing to do

View File

@@ -5,6 +5,6 @@ void ksu_throne_tracker_init(void);
void ksu_throne_tracker_exit(void); void ksu_throne_tracker_exit(void);
void track_throne(bool prune_only); void track_throne(void);
#endif #endif

View File

@@ -0,0 +1,412 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/stat.h>
#include <linux/namei.h>
#include "allowlist.h"
#include "klog.h" // IWYU pragma: keep
#include "ksu.h"
#include "manager.h"
#include "throne_tracker.h"
#include "kernel_compat.h"
// legacy throne tracker
// this is kept for UL purposes
// reason: can't solve deadlock on user_data_actor()
// xx - 20200914
uid_t ksu_manager_uid = KSU_INVALID_UID;
#define SYSTEM_PACKAGES_LIST_PATH "/data/system/packages.list.tmp"
struct uid_data {
struct list_head list;
u32 uid;
char package[KSU_MAX_PACKAGE_NAME];
};
static int get_pkg_from_apk_path(char *pkg, const char *path)
{
int len = strlen(path);
if (len >= KSU_MAX_PACKAGE_NAME || len < 1)
return -1;
const char *last_slash = NULL;
const char *second_last_slash = NULL;
int i;
for (i = len - 1; i >= 0; i--) {
if (path[i] == '/') {
if (!last_slash) {
last_slash = &path[i];
} else {
second_last_slash = &path[i];
break;
}
}
}
if (!last_slash || !second_last_slash)
return -1;
const char *last_hyphen = strchr(second_last_slash, '-');
if (!last_hyphen || last_hyphen > last_slash)
return -1;
int pkg_len = last_hyphen - second_last_slash - 1;
if (pkg_len >= KSU_MAX_PACKAGE_NAME || pkg_len <= 0)
return -1;
// Copying the package name
strncpy(pkg, second_last_slash + 1, pkg_len);
pkg[pkg_len] = '\0';
return 0;
}
static void crown_manager(const char *apk, struct list_head *uid_data)
{
char pkg[KSU_MAX_PACKAGE_NAME];
if (get_pkg_from_apk_path(pkg, apk) < 0) {
pr_err("Failed to get package name from apk path: %s\n", apk);
return;
}
pr_info("manager pkg: %s\n", pkg);
#ifdef KSU_MANAGER_PACKAGE
// pkg is `/<real package>`
if (strncmp(pkg, KSU_MANAGER_PACKAGE, sizeof(KSU_MANAGER_PACKAGE))) {
pr_info("manager package is inconsistent with kernel build: %s\n",
KSU_MANAGER_PACKAGE);
return;
}
#endif
struct list_head *list = (struct list_head *)uid_data;
struct uid_data *np;
list_for_each_entry (np, list, list) {
if (strncmp(np->package, pkg, KSU_MAX_PACKAGE_NAME) == 0) {
pr_info("Crowning manager: %s(uid=%d)\n", pkg, np->uid);
ksu_set_manager_uid(np->uid);
break;
}
}
}
#define DATA_PATH_LEN 384 // 384 is enough for /data/app/<package>/base.apk
struct data_path {
char dirpath[DATA_PATH_LEN];
int depth;
struct list_head list;
};
struct apk_path_hash {
unsigned int hash;
bool exists;
struct list_head list;
};
static struct list_head apk_path_hash_list = LIST_HEAD_INIT(apk_path_hash_list);
struct my_dir_context {
struct dir_context ctx;
struct list_head *data_path_list;
char *parent_dir;
void *private_data;
int depth;
int *stop;
};
// https://docs.kernel.org/filesystems/porting.html
// filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: callers never looked at specific -E... values anyway. -> iterate_shared() instances require no changes at all, all filldir_t ones in the tree converted.
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
#define FILLDIR_RETURN_TYPE bool
#define FILLDIR_ACTOR_CONTINUE true
#define FILLDIR_ACTOR_STOP false
#else
#define FILLDIR_RETURN_TYPE int
#define FILLDIR_ACTOR_CONTINUE 0
#define FILLDIR_ACTOR_STOP -EINVAL
#endif
FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino,
unsigned int d_type)
{
struct my_dir_context *my_ctx =
container_of(ctx, struct my_dir_context, ctx);
char dirpath[DATA_PATH_LEN];
if (!my_ctx) {
pr_err("Invalid context\n");
return FILLDIR_ACTOR_STOP;
}
if (my_ctx->stop && *my_ctx->stop) {
pr_info("Stop searching\n");
return FILLDIR_ACTOR_STOP;
}
if (!strncmp(name, "..", namelen) || !strncmp(name, ".", namelen))
return FILLDIR_ACTOR_CONTINUE; // Skip "." and ".."
if (d_type == DT_DIR && namelen >= 8 && !strncmp(name, "vmdl", 4) &&
!strncmp(name + namelen - 4, ".tmp", 4)) {
pr_info("Skipping directory: %.*s\n", namelen, name);
return FILLDIR_ACTOR_CONTINUE; // Skip staging package
}
if (snprintf(dirpath, DATA_PATH_LEN, "%s/%.*s", my_ctx->parent_dir,
namelen, name) >= DATA_PATH_LEN) {
pr_err("Path too long: %s/%.*s\n", my_ctx->parent_dir, namelen,
name);
return FILLDIR_ACTOR_CONTINUE;
}
if (d_type == DT_DIR && my_ctx->depth > 0 &&
(my_ctx->stop && !*my_ctx->stop)) {
struct data_path *data = kmalloc(sizeof(struct data_path), GFP_ATOMIC);
if (!data) {
pr_err("Failed to allocate memory for %s\n", dirpath);
return FILLDIR_ACTOR_CONTINUE;
}
strscpy(data->dirpath, dirpath, DATA_PATH_LEN);
data->depth = my_ctx->depth - 1;
list_add_tail(&data->list, my_ctx->data_path_list);
} else {
if ((namelen == 8) && (strncmp(name, "base.apk", namelen) == 0)) {
struct apk_path_hash *pos, *n;
unsigned int hash = full_name_hash(NULL, dirpath, strlen(dirpath));
list_for_each_entry(pos, &apk_path_hash_list, list) {
if (hash == pos->hash) {
pos->exists = true;
return FILLDIR_ACTOR_CONTINUE;
}
}
bool is_manager = is_manager_apk(dirpath);
pr_info("Found new base.apk at path: %s, is_manager: %d\n",
dirpath, is_manager);
if (is_manager) {
crown_manager(dirpath, my_ctx->private_data);
*my_ctx->stop = 1;
// Manager found, clear APK cache list
list_for_each_entry_safe(pos, n, &apk_path_hash_list, list) {
list_del(&pos->list);
kfree(pos);
}
} else {
struct apk_path_hash *apk_data = kmalloc(sizeof(struct apk_path_hash), GFP_ATOMIC);
apk_data->hash = hash;
apk_data->exists = true;
list_add_tail(&apk_data->list, &apk_path_hash_list);
}
}
}
return FILLDIR_ACTOR_CONTINUE;
}
void search_manager(const char *path, int depth, struct list_head *uid_data)
{
int i, stop = 0;
struct list_head data_path_list;
INIT_LIST_HEAD(&data_path_list);
unsigned long data_app_magic = 0;
// Initialize APK cache list
struct apk_path_hash *pos, *n;
list_for_each_entry(pos, &apk_path_hash_list, list) {
pos->exists = false;
}
// First depth
struct data_path data;
strscpy(data.dirpath, path, DATA_PATH_LEN);
data.depth = depth;
list_add_tail(&data.list, &data_path_list);
for (i = depth; i >= 0; i--) {
struct data_path *pos, *n;
list_for_each_entry_safe(pos, n, &data_path_list, list) {
struct my_dir_context ctx = { .ctx.actor = my_actor,
.data_path_list = &data_path_list,
.parent_dir = pos->dirpath,
.private_data = uid_data,
.depth = pos->depth,
.stop = &stop };
struct file *file;
if (!stop) {
file = ksu_filp_open_compat(pos->dirpath, O_RDONLY | O_NOFOLLOW, 0);
if (IS_ERR(file)) {
pr_err("Failed to open directory: %s, err: %ld\n", pos->dirpath, PTR_ERR(file));
goto skip_iterate;
}
// grab magic on first folder, which is /data/app
if (!data_app_magic) {
if (file->f_inode->i_sb->s_magic) {
data_app_magic = file->f_inode->i_sb->s_magic;
pr_info("%s: dir: %s got magic! 0x%lx\n", __func__, pos->dirpath, data_app_magic);
} else {
filp_close(file, NULL);
goto skip_iterate;
}
}
if (file->f_inode->i_sb->s_magic != data_app_magic) {
pr_info("%s: skip: %s magic: 0x%lx expected: 0x%lx\n", __func__, pos->dirpath,
file->f_inode->i_sb->s_magic, data_app_magic);
filp_close(file, NULL);
goto skip_iterate;
}
iterate_dir(file, &ctx.ctx);
filp_close(file, NULL);
}
skip_iterate:
list_del(&pos->list);
if (pos != &data)
kfree(pos);
}
}
// Remove stale cached APK entries
list_for_each_entry_safe(pos, n, &apk_path_hash_list, list) {
if (!pos->exists) {
list_del(&pos->list);
kfree(pos);
}
}
}
static bool is_uid_exist(uid_t uid, char *package, void *data)
{
struct list_head *list = (struct list_head *)data;
struct uid_data *np;
bool exist = false;
list_for_each_entry (np, list, list) {
if (np->uid == uid % 100000 &&
strncmp(np->package, package, KSU_MAX_PACKAGE_NAME) == 0) {
exist = true;
break;
}
}
return exist;
}
void track_throne()
{
struct file *fp =
ksu_filp_open_compat(SYSTEM_PACKAGES_LIST_PATH, O_RDONLY, 0);
if (IS_ERR(fp)) {
pr_err("%s: open " SYSTEM_PACKAGES_LIST_PATH " failed: %ld\n",
__func__, PTR_ERR(fp));
return;
}
struct list_head uid_list;
INIT_LIST_HEAD(&uid_list);
char chr = 0;
loff_t pos = 0;
loff_t line_start = 0;
char buf[KSU_MAX_PACKAGE_NAME];
for (;;) {
ssize_t count =
ksu_kernel_read_compat(fp, &chr, sizeof(chr), &pos);
if (count != sizeof(chr))
break;
if (chr != '\n')
continue;
count = ksu_kernel_read_compat(fp, buf, sizeof(buf),
&line_start);
struct uid_data *data =
kzalloc(sizeof(struct uid_data), GFP_ATOMIC);
if (!data) {
filp_close(fp, 0);
goto out;
}
char *tmp = buf;
const char *delim = " ";
char *package = strsep(&tmp, delim);
char *uid = strsep(&tmp, delim);
if (!uid || !package) {
pr_err("update_uid: package or uid is NULL!\n");
break;
}
u32 res;
if (kstrtou32(uid, 10, &res)) {
pr_err("update_uid: uid parse err\n");
break;
}
data->uid = res;
strncpy(data->package, package, KSU_MAX_PACKAGE_NAME);
list_add_tail(&data->list, &uid_list);
// reset line start
line_start = pos;
}
filp_close(fp, 0);
// now update uid list
struct uid_data *np;
struct uid_data *n;
// first, check if manager_uid exist!
bool manager_exist = false;
list_for_each_entry (np, &uid_list, list) {
// if manager is installed in work profile, the uid in packages.list is still equals main profile
// don't delete it in this case!
int manager_uid = ksu_get_manager_uid() % 100000;
if (np->uid == manager_uid) {
manager_exist = true;
break;
}
}
if (!manager_exist) {
if (ksu_is_manager_uid_valid()) {
pr_info("manager is uninstalled, invalidate it!\n");
ksu_invalidate_manager_uid();
goto prune;
}
pr_info("Searching manager...\n");
search_manager("/data/app", 2, &uid_list);
pr_info("Search manager finished\n");
}
prune:
// then prune the allowlist
ksu_prune_allowlist(is_uid_exist, &uid_list);
out:
// free uid_list
list_for_each_entry_safe (np, n, &uid_list, list) {
list_del(&np->list);
kfree(np);
}
}
void ksu_throne_tracker_init()
{
// nothing to do
}
void ksu_throne_tracker_exit()
{
// nothing to do
}

View File

@@ -1,354 +0,0 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/namei.h>
#include <linux/path.h>
#include <linux/mount.h>
#include <linux/cred.h>
#include <linux/fs.h>
#include "klog.h"
#include "kernel_umount.h"
#include "umount_manager.h"
static struct umount_manager g_umount_mgr = {
.entry_count = 0,
.max_entries = 512,
};
static void try_umount_path(struct umount_entry *entry)
{
try_umount(entry->path, entry->flags);
}
static struct umount_entry *find_entry_locked(const char *path)
{
struct umount_entry *entry;
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (strcmp(entry->path, path) == 0) {
return entry;
}
}
return NULL;
}
static bool is_path_in_mount_list(const char *path)
{
struct mount_entry *entry;
bool found = false;
down_read(&mount_list_lock);
list_for_each_entry(entry, &mount_list, list) {
if (entry->umountable && strcmp(entry->umountable, path) == 0) {
found = true;
break;
}
}
up_read(&mount_list_lock);
return found;
}
static int copy_mount_entry_to_user(struct ksu_umount_entry_info __user *entries,
u32 idx, const char *path, int flags)
{
struct ksu_umount_entry_info info;
memset(&info, 0, sizeof(info));
strncpy(info.path, path, sizeof(info.path) - 1);
info.path[sizeof(info.path) - 1] = '\0';
info.flags = flags;
info.is_default = 1;
info.state = UMOUNT_STATE_IDLE;
info.ref_count = 0;
if (copy_to_user(&entries[idx], &info, sizeof(info))) {
return -EFAULT;
}
return 0;
}
static int copy_umount_entry_to_user(struct ksu_umount_entry_info __user *entries,
u32 idx, struct umount_entry *entry)
{
struct ksu_umount_entry_info info;
memset(&info, 0, sizeof(info));
strncpy(info.path, entry->path, sizeof(info.path) - 1);
info.path[sizeof(info.path) - 1] = '\0';
info.flags = entry->flags;
info.is_default = entry->is_default;
info.state = entry->state;
info.ref_count = entry->ref_count;
if (copy_to_user(&entries[idx], &info, sizeof(info))) {
return -EFAULT;
}
return 0;
}
static int collect_mount_list_entries(struct ksu_umount_entry_info __user *entries,
u32 max_count, u32 *out_idx)
{
struct mount_entry *mount_entry;
u32 idx = 0;
down_read(&mount_list_lock);
list_for_each_entry(mount_entry, &mount_list, list) {
if (idx >= max_count) {
break;
}
if (!mount_entry->umountable) {
continue;
}
if (copy_mount_entry_to_user(entries, idx, mount_entry->umountable,
mount_entry->flags)) {
up_read(&mount_list_lock);
return -EFAULT;
}
idx++;
}
up_read(&mount_list_lock);
*out_idx = idx;
return 0;
}
static int collect_umount_manager_entries(struct ksu_umount_entry_info __user *entries,
u32 start_idx, u32 max_count, u32 *out_idx)
{
struct umount_entry *entry;
unsigned long flags;
u32 idx = start_idx;
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (idx >= max_count) {
break;
}
if (is_path_in_mount_list(entry->path)) {
continue;
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
if (copy_umount_entry_to_user(entries, idx, entry)) {
return -EFAULT;
}
idx++;
spin_lock_irqsave(&g_umount_mgr.lock, flags);
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
*out_idx = idx;
return 0;
}
int ksu_umount_manager_init(void)
{
INIT_LIST_HEAD(&g_umount_mgr.entry_list);
spin_lock_init(&g_umount_mgr.lock);
return 0;
}
void ksu_umount_manager_exit(void)
{
struct umount_entry *entry, *tmp;
unsigned long flags;
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry_safe(entry, tmp, &g_umount_mgr.entry_list, list) {
list_del(&entry->list);
kfree(entry);
g_umount_mgr.entry_count--;
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
pr_info("Umount manager cleaned up\n");
}
int ksu_umount_manager_add(const char *path, int flags, bool is_default)
{
struct umount_entry *entry;
unsigned long irqflags;
int ret = 0;
if (flags == -1)
flags = MNT_DETACH;
if (!path || strlen(path) == 0 || strlen(path) >= 256) {
return -EINVAL;
}
if (is_path_in_mount_list(path)) {
pr_warn("Umount manager: path already exists in mount_list: %s\n", path);
return -EEXIST;
}
spin_lock_irqsave(&g_umount_mgr.lock, irqflags);
if (g_umount_mgr.entry_count >= g_umount_mgr.max_entries) {
pr_err("Umount manager: max entries reached\n");
ret = -ENOMEM;
goto out;
}
if (find_entry_locked(path)) {
pr_warn("Umount manager: path already exists: %s\n", path);
ret = -EEXIST;
goto out;
}
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
ret = -ENOMEM;
goto out;
}
strncpy(entry->path, path, sizeof(entry->path) - 1);
entry->flags = flags;
entry->state = UMOUNT_STATE_IDLE;
entry->is_default = is_default;
entry->ref_count = 0;
list_add_tail(&entry->list, &g_umount_mgr.entry_list);
g_umount_mgr.entry_count++;
pr_info("Umount manager: added %s entry: %s\n",
is_default ? "default" : "custom", path);
out:
spin_unlock_irqrestore(&g_umount_mgr.lock, irqflags);
return ret;
}
int ksu_umount_manager_remove(const char *path)
{
struct umount_entry *entry;
unsigned long flags;
int ret = 0;
if (!path) {
return -EINVAL;
}
spin_lock_irqsave(&g_umount_mgr.lock, flags);
entry = find_entry_locked(path);
if (!entry) {
ret = -ENOENT;
goto out;
}
if (entry->is_default) {
pr_err("Umount manager: cannot remove default entry: %s\n", path);
ret = -EPERM;
goto out;
}
if (entry->state == UMOUNT_STATE_BUSY || entry->ref_count > 0) {
pr_err("Umount manager: entry is busy: %s\n", path);
ret = -EBUSY;
goto out;
}
list_del(&entry->list);
g_umount_mgr.entry_count--;
kfree(entry);
pr_info("Umount manager: removed entry: %s\n", path);
out:
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
return ret;
}
void ksu_umount_manager_execute_all(const struct cred *cred)
{
struct umount_entry *entry;
unsigned long flags;
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (entry->state == UMOUNT_STATE_IDLE) {
entry->ref_count++;
}
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (entry->ref_count > 0 && entry->state == UMOUNT_STATE_IDLE) {
try_umount_path(entry);
}
}
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry(entry, &g_umount_mgr.entry_list, list) {
if (entry->ref_count > 0) {
entry->ref_count--;
}
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
}
int ksu_umount_manager_get_entries(struct ksu_umount_entry_info __user *entries, u32 *count)
{
u32 max_count = *count;
u32 idx;
int ret;
ret = collect_mount_list_entries(entries, max_count, &idx);
if (ret) {
return ret;
}
if (idx < max_count) {
ret = collect_umount_manager_entries(entries, idx, max_count, &idx);
if (ret) {
return ret;
}
}
*count = idx;
return 0;
}
int ksu_umount_manager_clear_custom(void)
{
struct umount_entry *entry, *tmp;
unsigned long flags;
u32 cleared = 0;
spin_lock_irqsave(&g_umount_mgr.lock, flags);
list_for_each_entry_safe(entry, tmp, &g_umount_mgr.entry_list, list) {
if (!entry->is_default && entry->state == UMOUNT_STATE_IDLE && entry->ref_count == 0) {
list_del(&entry->list);
kfree(entry);
g_umount_mgr.entry_count--;
cleared++;
}
}
spin_unlock_irqrestore(&g_umount_mgr.lock, flags);
pr_info("Umount manager: cleared %u custom entries\n", cleared);
return 0;
}

View File

@@ -1,63 +0,0 @@
#ifndef __KSU_H_UMOUNT_MANAGER
#define __KSU_H_UMOUNT_MANAGER
#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
struct cred;
enum umount_entry_state {
UMOUNT_STATE_IDLE = 0,
UMOUNT_STATE_ACTIVE = 1,
UMOUNT_STATE_BUSY = 2,
};
struct umount_entry {
struct list_head list;
char path[256];
int flags;
enum umount_entry_state state;
bool is_default;
u32 ref_count;
};
struct umount_manager {
struct list_head entry_list;
spinlock_t lock;
u32 entry_count;
u32 max_entries;
};
enum umount_manager_op {
UMOUNT_OP_ADD = 0,
UMOUNT_OP_REMOVE = 1,
UMOUNT_OP_LIST = 2,
UMOUNT_OP_CLEAR_CUSTOM = 3,
};
struct ksu_umount_manager_cmd {
__u32 operation;
char path[256];
__s32 flags;
__u32 count;
__aligned_u64 entries_ptr;
};
struct ksu_umount_entry_info {
char path[256];
__s32 flags;
__u8 is_default;
__u32 state;
__u32 ref_count;
};
int ksu_umount_manager_init(void);
void ksu_umount_manager_exit(void);
int ksu_umount_manager_add(const char *path, int flags, bool is_default);
int ksu_umount_manager_remove(const char *path);
void ksu_umount_manager_execute_all(const struct cred *cred);
int ksu_umount_manager_get_entries(struct ksu_umount_entry_info __user *entries, u32 *count);
int ksu_umount_manager_clear_custom(void);
#endif // __KSU_H_UMOUNT_MANAGER

640
kernel/user_data_scanner.c Normal file
View File

@@ -0,0 +1,640 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/stat.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/magic.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/preempt.h>
#include <linux/hardirq.h>
#include "klog.h"
#include "ksu.h"
#include "kernel_compat.h"
#include "user_data_scanner.h"
#define KERN_PATH_TIMEOUT_MS 100
#define MAX_FUSE_CHECK_RETRIES 3
// Magic Number: File System Superblock Identifier
#define FUSE_SUPER_MAGIC 0x65735546 // FUSE (Userspace filesystem)
#define OVERLAYFS_SUPER_MAGIC 0x794c7630 // OverlayFS
#define TMPFS_MAGIC 0x01021994 // tmpfs
#define F2FS_SUPER_MAGIC 0xF2F52010 // F2FS (Flash-Friendly File System)
#define EXT4_SUPER_MAGIC 0xEF53 // ext4
extern bool is_lock_held(const char *path);
static struct workqueue_struct *scan_workqueue;
struct work_buffers *get_work_buffer(void)
{
static struct work_buffers global_buffer;
return &global_buffer;
}
// Check the file system type
static bool is_dangerous_fs_magic(unsigned long magic)
{
switch (magic) {
case FUSE_SUPER_MAGIC:
case OVERLAYFS_SUPER_MAGIC:
case TMPFS_MAGIC:
case F2FS_SUPER_MAGIC:
case EXT4_SUPER_MAGIC:
return true;
default:
return false;
}
}
// Check whether the file system is an encrypted user data file system
static bool is_encrypted_userdata_fs(struct super_block *sb, const char *path)
{
if (!sb || !path)
return true;
if (strstr(path, "/data/user_de") || strstr(path, "/data/user")) {
return true;
}
if (is_dangerous_fs_magic(sb->s_magic)) {
return true;
}
return false;
}
static bool is_path_for_kern_path(const char *path, struct super_block *expected_sb)
{
if (fatal_signal_pending(current)) {
pr_warn("Fatal signal pending, skip path: %s\n", path);
return false;
}
if (need_resched()) {
cond_resched();
if (fatal_signal_pending(current))
return false;
}
if (in_interrupt() || in_atomic()) {
pr_warn("Cannot scan path in atomic context: %s\n", path);
return false;
}
if (!path || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
return false;
}
if (strstr(path, ".tmp") || strstr(path, ".removing") ||
strstr(path, ".unmounting") || strstr(path, ".pending")) {
pr_debug("Path appears to be in transition state: %s\n", path);
return false;
}
if (expected_sb) {
if (is_dangerous_fs_magic(expected_sb->s_magic)) {
pr_info("Skipping dangerous filesystem (magic=0x%lx): %s\n",
expected_sb->s_magic, path);
return false;
}
if (is_encrypted_userdata_fs(expected_sb, path)) {
pr_warn("Skipping potentially encrypted userdata filesystem: %s\n", path);
return false;
}
}
return true;
}
static int kern_path_with_timeout(const char *path, unsigned int flags,
struct path *result)
{
unsigned long start_time = jiffies;
unsigned long timeout = start_time + msecs_to_jiffies(KERN_PATH_TIMEOUT_MS);
int retries = 0;
int err;
if (!is_path_for_kern_path(path, NULL)) {
return -EPERM;
}
do {
if (time_after(jiffies, timeout)) {
pr_warn("kern_path timeout for: %s\n", path);
return -ETIMEDOUT;
}
if (fatal_signal_pending(current)) {
pr_warn("Fatal signal during kern_path: %s\n", path);
return -EINTR;
}
if (in_atomic() || irqs_disabled()) {
pr_warn("Cannot call kern_path in atomic context: %s\n", path);
return -EINVAL;
}
err = kern_path(path, flags, result);
if (err == 0) {
if (!is_path_for_kern_path(path, result->mnt->mnt_sb)) {
path_put(result);
return -EPERM;
}
return 0;
}
if (err == -ENOENT || err == -ENOTDIR || err == -EACCES || err == -EPERM) {
return err;
}
if (err == -EBUSY || err == -EAGAIN) {
retries++;
if (retries >= MAX_FUSE_CHECK_RETRIES) {
pr_warn("Max retries reached for: %s (err=%d)\n", path, err);
return err;
}
usleep_range(1000, 2000);
continue;
}
return err;
} while (retries < MAX_FUSE_CHECK_RETRIES);
return err;
}
FILLDIR_RETURN_TYPE scan_user_packages(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type)
{
struct user_dir_ctx *uctx = container_of(ctx, struct user_dir_ctx, ctx);
struct user_scan_ctx *scan_ctx = uctx->scan_ctx;
if (!scan_ctx || !scan_ctx->deferred_paths)
return FILLDIR_ACTOR_STOP;
scan_ctx->processed_count++;
if (scan_ctx->processed_count % SCHEDULE_INTERVAL == 0) {
cond_resched();
if (fatal_signal_pending(current)) {
pr_info("Fatal signal received, stopping scan\n");
return FILLDIR_ACTOR_STOP;
}
}
if (d_type != DT_DIR || namelen <= 0)
return FILLDIR_ACTOR_CONTINUE;
if (name[0] == '.' && (namelen == 1 || (namelen == 2 && name[1] == '.')))
return FILLDIR_ACTOR_CONTINUE;
if (namelen >= KSU_MAX_PACKAGE_NAME) {
pr_warn("Package name too long: %.*s (user %u)\n", namelen, name, scan_ctx->user_id);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
struct deferred_path_info *path_info = kzalloc(sizeof(struct deferred_path_info), GFP_KERNEL);
if (!path_info) {
pr_err("Memory allocation failed for path info: %.*s\n", namelen, name);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
int path_len = snprintf(path_info->path, sizeof(path_info->path),
"%s/%u/%.*s", USER_DATA_BASE_PATH, scan_ctx->user_id, namelen, name);
if (path_len >= sizeof(path_info->path)) {
pr_err("Path too long for: %.*s (user %u)\n", namelen, name, scan_ctx->user_id);
kfree(path_info);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
path_info->user_id = scan_ctx->user_id;
size_t copy_len = min_t(size_t, namelen, KSU_MAX_PACKAGE_NAME - 1);
strncpy(path_info->package_name, name, copy_len);
path_info->package_name[copy_len] = '\0';
list_add_tail(&path_info->list, scan_ctx->deferred_paths);
scan_ctx->pkg_count++;
return FILLDIR_ACTOR_CONTINUE;
}
static int process_deferred_paths(struct list_head *deferred_paths, struct list_head *uid_list)
{
struct deferred_path_info *path_info, *n;
int success_count = 0;
int skip_count = 0;
list_for_each_entry_safe(path_info, n, deferred_paths, list) {
if (!is_path_for_kern_path(path_info->path, NULL)) {
pr_debug("Skipping unsafe path: %s\n", path_info->path);
skip_count++;
list_del(&path_info->list);
kfree(path_info);
continue;
}
// Retrieve path information
struct path path;
int err = kern_path_with_timeout(path_info->path, LOOKUP_FOLLOW, &path);
if (err) {
if (err != -ENOENT && err != -EPERM) {
pr_debug("Path lookup failed: %s (%d)\n", path_info->path, err);
}
list_del(&path_info->list);
kfree(path_info);
continue;
}
// Check lock status
int tries = 0;
do {
if (!is_lock_held(path_info->path))
break;
tries++;
pr_info("%s: waiting for lock on %s (try %d)\n", __func__, path_info->path, tries);
msleep(100);
} while (tries < 10);
struct kstat stat;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0) || defined(KSU_HAS_NEW_VFS_GETATTR)
err = vfs_getattr(&path, &stat, STATX_UID, AT_STATX_SYNC_AS_STAT);
#else
err = vfs_getattr(&path, &stat);
#endif
path_put(&path);
if (err) {
pr_debug("Failed to get attributes: %s (%d)\n", path_info->path, err);
list_del(&path_info->list);
kfree(path_info);
continue;
}
uid_t uid = from_kuid(&init_user_ns, stat.uid);
if (uid == (uid_t)-1) {
pr_warn("Invalid UID for: %s\n", path_info->path);
list_del(&path_info->list);
kfree(path_info);
continue;
}
struct uid_data *uid_entry = kzalloc(sizeof(struct uid_data), GFP_KERNEL);
if (!uid_entry) {
pr_err("Memory allocation failed for UID entry: %s\n", path_info->path);
list_del(&path_info->list);
kfree(path_info);
continue;
}
uid_entry->uid = uid;
uid_entry->user_id = path_info->user_id;
strncpy(uid_entry->package, path_info->package_name, KSU_MAX_PACKAGE_NAME - 1);
uid_entry->package[KSU_MAX_PACKAGE_NAME - 1] = '\0';
list_add_tail(&uid_entry->list, uid_list);
success_count++;
pr_info("Package: %s, UID: %u, User: %u\n", uid_entry->package, uid, path_info->user_id);
list_del(&path_info->list);
kfree(path_info);
if (success_count % 10 == 0) {
cond_resched();
if (fatal_signal_pending(current)) {
pr_info("Fatal signal received, stopping path processing\n");
break;
}
}
}
if (skip_count > 0) {
pr_info("Skipped %d potentially dangerous paths for safety\n", skip_count);
}
return success_count;
}
static int scan_primary_user_apps(struct list_head *uid_list,
size_t *pkg_count, size_t *error_count,
struct work_buffers *work_buf)
{
struct file *dir_file;
struct list_head deferred_paths;
int ret;
*pkg_count = *error_count = 0;
INIT_LIST_HEAD(&deferred_paths);
pr_info("Scanning primary user (0) applications in %s\n", PRIMARY_USER_PATH);
dir_file = ksu_filp_open_compat(PRIMARY_USER_PATH, O_RDONLY, 0);
if (IS_ERR(dir_file)) {
pr_err("Cannot open primary user path: %s (%ld)\n", PRIMARY_USER_PATH, PTR_ERR(dir_file));
return PTR_ERR(dir_file);
}
// Check file system security
if (!is_path_for_kern_path(PRIMARY_USER_PATH, dir_file->f_inode->i_sb)) {
pr_err("Primary user path is not safe for scanning, aborting\n");
filp_close(dir_file, NULL);
return -EOPNOTSUPP;
}
struct user_scan_ctx scan_ctx = {
.deferred_paths = &deferred_paths,
.user_id = 0,
.pkg_count = 0,
.error_count = 0,
.work_buf = work_buf,
.processed_count = 0
};
struct user_dir_ctx uctx = {
.ctx.actor = scan_user_packages,
.scan_ctx = &scan_ctx
};
ret = iterate_dir(dir_file, &uctx.ctx);
filp_close(dir_file, NULL);
int processed = process_deferred_paths(&deferred_paths, uid_list);
*pkg_count = processed;
*error_count = scan_ctx.error_count;
pr_info("Primary user scan completed: %zu packages found, %zu errors\n",
*pkg_count, *error_count);
return ret;
}
FILLDIR_RETURN_TYPE collect_user_ids(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type)
{
struct user_id_ctx *uctx = container_of(ctx, struct user_id_ctx, ctx);
uctx->processed_count++;
if (uctx->processed_count % SCHEDULE_INTERVAL == 0) {
cond_resched();
if (fatal_signal_pending(current))
return FILLDIR_ACTOR_STOP;
}
if (d_type != DT_DIR || namelen <= 0)
return FILLDIR_ACTOR_CONTINUE;
if (name[0] == '.' && (namelen == 1 || (namelen == 2 && name[1] == '.')))
return FILLDIR_ACTOR_CONTINUE;
uid_t uid = 0;
for (int i = 0; i < namelen; i++) {
if (name[i] < '0' || name[i] > '9')
return FILLDIR_ACTOR_CONTINUE;
uid = uid * 10 + (name[i] - '0');
}
if (uctx->count >= uctx->max_count)
return FILLDIR_ACTOR_STOP;
uctx->user_ids[uctx->count++] = uid;
return FILLDIR_ACTOR_CONTINUE;
}
static int get_all_active_users(struct work_buffers *work_buf, size_t *found_count)
{
struct file *dir_file;
int ret;
*found_count = 0;
dir_file = ksu_filp_open_compat(USER_DATA_BASE_PATH, O_RDONLY, 0);
if (IS_ERR(dir_file)) {
pr_err("Cannot open user data base path: %s (%ld)\n", USER_DATA_BASE_PATH, PTR_ERR(dir_file));
return PTR_ERR(dir_file);
}
// Check the file system type of the base path
if (!is_path_for_kern_path(USER_DATA_BASE_PATH, dir_file->f_inode->i_sb)) {
pr_warn("User data base path is not safe for scanning, using primary user only\n");
filp_close(dir_file, NULL);
work_buf->user_ids_buffer[0] = 0;
*found_count = 1;
return 0;
}
struct user_id_ctx uctx = {
.ctx.actor = collect_user_ids,
.user_ids = work_buf->user_ids_buffer,
.count = 0,
.max_count = MAX_SUPPORTED_USERS,
.processed_count = 0
};
ret = iterate_dir(dir_file, &uctx.ctx);
filp_close(dir_file, NULL);
*found_count = uctx.count;
if (uctx.count > 0) {
pr_info("Found %zu active users: ", uctx.count);
for (size_t i = 0; i < uctx.count; i++) {
pr_cont("%u ", work_buf->user_ids_buffer[i]);
}
pr_cont("\n");
}
return ret;
}
static void scan_user_worker(struct work_struct *work)
{
struct scan_work_item *item = container_of(work, struct scan_work_item, work);
char path_buffer[DATA_PATH_LEN];
struct file *dir_file;
struct list_head deferred_paths;
int processed = 0;
INIT_LIST_HEAD(&deferred_paths);
snprintf(path_buffer, sizeof(path_buffer), "%s/%u", USER_DATA_BASE_PATH, item->user_id);
dir_file = ksu_filp_open_compat(path_buffer, O_RDONLY, 0);
if (IS_ERR(dir_file)) {
pr_debug("Cannot open user path: %s (%ld)\n", path_buffer, PTR_ERR(dir_file));
atomic_inc(item->total_error_count);
goto done;
}
// Check User Directory Security
if (!is_path_for_kern_path(path_buffer, dir_file->f_inode->i_sb)) {
pr_warn("User path %s is not safe for scanning, skipping\n", path_buffer);
filp_close(dir_file, NULL);
goto done;
}
struct user_scan_ctx scan_ctx = {
.deferred_paths = &deferred_paths,
.user_id = item->user_id,
.pkg_count = 0,
.error_count = 0,
.work_buf = NULL,
.processed_count = 0
};
struct user_dir_ctx uctx = {
.ctx.actor = scan_user_packages,
.scan_ctx = &scan_ctx
};
iterate_dir(dir_file, &uctx.ctx);
filp_close(dir_file, NULL);
mutex_lock(item->uid_list_mutex);
processed = process_deferred_paths(&deferred_paths, item->uid_list);
mutex_unlock(item->uid_list_mutex);
atomic_add(processed, item->total_pkg_count);
atomic_add(scan_ctx.error_count, item->total_error_count);
if (processed > 0 || scan_ctx.error_count > 0) {
pr_info("User %u: %d packages, %zu errors\n", item->user_id, processed, scan_ctx.error_count);
}
done:
if (atomic_dec_and_test(item->remaining_workers)) {
complete(item->work_completion);
}
kfree(item);
}
static int scan_secondary_users_apps(struct list_head *uid_list,
struct work_buffers *work_buf, size_t user_count,
size_t *total_pkg_count, size_t *total_error_count)
{
DECLARE_COMPLETION(work_completion);
DEFINE_MUTEX(uid_list_mutex);
atomic_t atomic_pkg_count = ATOMIC_INIT(0);
atomic_t atomic_error_count = ATOMIC_INIT(0);
atomic_t remaining_workers = ATOMIC_INIT(0);
int submitted_workers = 0;
if (!scan_workqueue) {
scan_workqueue = create_workqueue("ksu_scan");
if (!scan_workqueue) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
}
}
for (size_t i = 0; i < user_count; i++) {
// Skip the main user since it has already been scanned.
if (work_buf->user_ids_buffer[i] == 0)
continue;
struct scan_work_item *work_item = kzalloc(sizeof(struct scan_work_item), GFP_KERNEL);
if (!work_item) {
pr_err("Failed to allocate work item for user %u\n", work_buf->user_ids_buffer[i]);
continue;
}
INIT_WORK(&work_item->work, scan_user_worker);
work_item->user_id = work_buf->user_ids_buffer[i];
work_item->uid_list = uid_list;
work_item->uid_list_mutex = &uid_list_mutex;
work_item->total_pkg_count = &atomic_pkg_count;
work_item->total_error_count = &atomic_error_count;
work_item->work_completion = &work_completion;
work_item->remaining_workers = &remaining_workers;
atomic_inc(&remaining_workers);
if (queue_work(scan_workqueue, &work_item->work)) {
submitted_workers++;
} else {
atomic_dec(&remaining_workers);
kfree(work_item);
}
}
if (submitted_workers > 0) {
pr_info("Submitted %d concurrent scan workers\n", submitted_workers);
wait_for_completion(&work_completion);
}
*total_pkg_count = atomic_read(&atomic_pkg_count);
*total_error_count = atomic_read(&atomic_error_count);
return 0;
}
int scan_user_data_for_uids(struct list_head *uid_list, bool scan_all_users)
{
if (!uid_list)
return -EINVAL;
if (in_interrupt() || in_atomic()) {
pr_err("Cannot scan user data in atomic context\n");
return -EINVAL;
}
struct work_buffers *work_buf = get_work_buffer();
if (!work_buf) {
pr_err("Failed to get work buffer\n");
return -ENOMEM;
}
// Scan primary user (User 0)
size_t primary_pkg_count, primary_error_count;
int ret = scan_primary_user_apps(uid_list, &primary_pkg_count, &primary_error_count, work_buf);
if (ret < 0 && primary_pkg_count == 0) {
pr_err("Primary user scan failed completely: %d\n", ret);
return ret;
}
// If scanning all users is not required, stop here.
if (!scan_all_users) {
pr_info("Scan completed (primary user only): %zu packages, %zu errors\n",
primary_pkg_count, primary_error_count);
return primary_pkg_count > 0 ? 0 : -ENOENT;
}
// Retrieve all active users
size_t active_users;
ret = get_all_active_users(work_buf, &active_users);
if (ret < 0 || active_users == 0) {
pr_warn("Failed to get active users or no additional users found, using primary user only: %d\n", ret);
return primary_pkg_count > 0 ? 0 : -ENOENT;
}
size_t secondary_pkg_count, secondary_error_count;
ret = scan_secondary_users_apps(uid_list, work_buf, active_users,
&secondary_pkg_count, &secondary_error_count);
size_t total_packages = primary_pkg_count + secondary_pkg_count;
size_t total_errors = primary_error_count + secondary_error_count;
if (total_errors > 0)
pr_warn("Scan completed with %zu errors\n", total_errors);
pr_info("Complete scan finished: %zu users, %zu total packages\n",
active_users, total_packages);
return total_packages > 0 ? 0 : -ENOENT;
}

View File

@@ -0,0 +1,96 @@
#ifndef _KSU_USER_DATA_SCANNER_H_
#define _KSU_USER_DATA_SCANNER_H_
#include <linux/list.h>
#include <linux/types.h>
#include <linux/fs.h>
#define USER_DATA_BASE_PATH "/data/user_de"
#define PRIMARY_USER_PATH "/data/user_de/0"
#define DATA_PATH_LEN 384 // 384 is enough for /data/user_de/{userid}/<package> and /data/app/<package>/base.apk
#define MAX_SUPPORTED_USERS 32 // Supports up to 32 users
#define SMALL_BUFFER_SIZE 64
#define SCHEDULE_INTERVAL 100
#define MAX_CONCURRENT_WORKERS 8
// https://docs.kernel.org/filesystems/porting.html
// filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: callers never looked at specific -E... values anyway. -> iterate_shared() instances require no changes at all, all filldir_t ones in the tree converted.
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
#define FILLDIR_RETURN_TYPE bool
#define FILLDIR_ACTOR_CONTINUE true
#define FILLDIR_ACTOR_STOP false
#else
#define FILLDIR_RETURN_TYPE int
#define FILLDIR_ACTOR_CONTINUE 0
#define FILLDIR_ACTOR_STOP -EINVAL
#endif
// Global work buffer to avoid stack allocation
struct work_buffers {
char path_buffer[DATA_PATH_LEN];
char package_buffer[KSU_MAX_PACKAGE_NAME];
char small_buffer[SMALL_BUFFER_SIZE];
uid_t user_ids_buffer[MAX_SUPPORTED_USERS];
};
struct work_buffers *get_work_buffer(void);
struct uid_data {
struct list_head list;
u32 uid;
char package[KSU_MAX_PACKAGE_NAME];
uid_t user_id;
};
struct deferred_path_info {
struct list_head list;
char path[DATA_PATH_LEN];
char package_name[KSU_MAX_PACKAGE_NAME];
uid_t user_id;
};
struct user_scan_ctx {
struct list_head *deferred_paths;
uid_t user_id;
size_t pkg_count;
size_t error_count;
struct work_buffers *work_buf;
size_t processed_count;
};
struct user_dir_ctx {
struct dir_context ctx;
struct user_scan_ctx *scan_ctx;
};
struct user_id_ctx {
struct dir_context ctx;
uid_t *user_ids;
size_t count;
size_t max_count;
size_t processed_count;
};
struct scan_work_item {
struct work_struct work;
uid_t user_id;
struct list_head *uid_list;
struct mutex *uid_list_mutex;
atomic_t *total_pkg_count;
atomic_t *total_error_count;
struct completion *work_completion;
atomic_t *remaining_workers;
};
int scan_user_data_for_uids(struct list_head *uid_list, bool scan_all_users);
FILLDIR_RETURN_TYPE scan_user_packages(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type);
FILLDIR_RETURN_TYPE collect_user_ids(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type);
static int process_deferred_paths(struct list_head *deferred_paths, struct list_head *uid_list);
static int scan_primary_user_apps(struct list_head *uid_list, size_t *pkg_count,
size_t *error_count, struct work_buffers *work_buf);
static int get_all_active_users(struct work_buffers *work_buf, size_t *found_count);
static void scan_user_worker(struct work_struct *work);
#endif /* _KSU_USER_DATA_SCANNER_H_ */