10 Commits

Author SHA1 Message Date
ShirkNeko
7b6074cfc3 kernel: Fix when calling iterate_dir() under an encrypted directory (F2FS + file-based encryption), the kernel encountered a NEON/FPSIMD register state error while decrypting filenames.
Error Log :
[ T4681] Call trace:
[ T4681]  fpsimd_save_state+0x4/0x58
[ T4681]  cts_cbc_decrypt+0x268/0x384
[ T4681]  fscrypt_fname_disk_to_usr+0x1dc/0x338
[ T4681]  f2fs_fill_dentries+0x1cc/0x330
[ T4681]  f2fs_readdir+0x1a0/0x3ec
[ T4681]  iterate_dir+0x80/0x170
[ T4681]  scan_user_data_for_uids+0x170/0x560
[ T4681]  throne_tracker_thread+0x68/0x290
2025-09-16 22:36:26 +08:00
ShirkNeko
4e8d699654 sporadic deadlock fix
move to always kthreaded and mitigate sporadic deadlocks on

Co-authored-by: backslashxx <118538522+backslashxx@users.noreply.github.com>
2025-09-16 19:36:47 +08:00
ShirkNeko
60d122c01b kernel: Add support for concurrent scanning of user data apps 2025-09-16 18:16:19 +08:00
ShirkNeko
335ddc4432 kernel: Enhanced user data scanning
Added filesystem type checks to prevent dangerous paths
2025-09-16 17:27:00 +08:00
ShirkNeko
765106c56a kernel: Separate and modularize the user data scanner scan function 2025-09-16 17:04:49 +08:00
ShirkNeko
b685f03a6e kernel: Separate kern_path() and iterate_dir() operations to avoid lock contention. 2025-09-16 15:45:29 +08:00
ShirkNeko
fae301c161 kernel: Remove duplicate #include <linux/list.h> 2025-09-16 15:09:13 +08:00
ShirkNeko
73cd1f2cf3 kernel: Optimizing thread scheduling during user scans
`[   23.379244][ T5074] ufshcd-qcom 1d84000.ufshc: ............. ufs dump complete ..........
[   23.379263][ T5074] dump-reason-buffer-size: 256
[   23.379267][ T5074] dump-reason-pidbuffer:PID: 5074, Process Name: throne_tracker
[   23.379295][ T5074] qcom_q6v5_pas a3380000.remoteproc-soccp: waking SOCCP from panic path
[   23.379455][ T5074] CPU0 next event is 23368000000
[   23.379456][ T5074] CP.rkp_only' to 'true' in property file '/odm/build.prop': SELinux permission check failed
[    1.248057][    T1] init: Do not have permissions to set 'ro.oplus.nfc.support.tee' to 'true' in pro   23.379459][ T5074] CPU5 next event is 23368000000
[   23.3794   1.248059][    T1] init: Do not have permissions to set 'ro.oplus.eid.enable.state' to '1' in property file '/odm/build.prop':l-3d0: snapshot: device is powered off
[   23.589323][ T5074] Kernel Offset: 0x1738a00000 from 0xffffffc080000000
[   23.589325][ T5074] PHYS_OFFSET: 0x80000000
[   23.589326][ T5074] CPU features: 0x000000,00000000,70024f43,95fffea7
[   23.589328][ T5074] Memory Limit: none
[   23.589490][ T5074] mhi mhi_110e_00.01.00: [E][mhi_debug_reg_dump] host pm_state:M2 dev_state:M2 ee:MISSION MODE
[   23.589505][ T5074] mhi mhi_110e_00.01.00: [E][mhi_debug_reg_dump] device ee: MISSION MODE dev_state: M2`

Signed-off-by: ShirkNeko <109797057+ShirkNeko@users.noreply.github.com>
2025-09-16 15:00:05 +08:00
ShirkNeko
eb5d8fa770 kernel: Use a cached buffer as an array-based stack to avoid panics caused by overly deep traversal.
INTCAM: no information
       TPU: no information
       TNR: no information
       MFC: no information
        BO: no information
[   4.715484] [I] [DSS] Last AVB: avb_ret=ERROR_VERIFICATION
[   4.715890] [I] [DSS] Last AVB: avb_veritymode=enforcing
[   4.716289] [I] [DSS] Last AVB: avb_error_parts=boot
[   4.717085] [I] [LNXDBG] build info set by kernel
RAMDUMP_MSG.txt:
  reset message: KP: kernel stack overflow
  UUID: e2faff80-83ea-c240-ac75-d7b8a528c892
  last kernel version: 6.1.134-android14-11-g23e556daebf3-ab13800907
  aosp kernel version: 6.1.145-android14-11-g8d713f9e8e7b-ab13202960
  build: google/shiba/shiba:16/BP3A.250905.014/13873947:user/release-keys
  RST_STAT: 0x1 - CLUSTER0_NONCPU_WDTRESET
  GSA_RESET_STATUS: 0x0 -
  Reboot reason: 0xbaba - Kernel PANIC
  Reboot mode: 0x0 - Normal Boot
[   4.719030] [I] [DSS] -------------------- DSS LOGS END --------------------

Reboot Info:
  RST_STAT: 0x180000 - PIN_RESET | PO_RESET
  GSA_RESET_STATUS: 0x0 -
  Reboot reason: 0xbaba - Kernel PANIC
  Reboot mode: 0x0 - Normal Boot

Signed-off-by: ShirkNeko <109797057+ShirkNeko@users.noreply.github.com>
2025-09-16 00:53:48 +08:00
ShirkNeko
a197600cb5 kernel: Add optional full-user scanning capability using prctl 2025-09-15 19:14:55 +08:00
10 changed files with 906 additions and 350 deletions

View File

@@ -22,6 +22,8 @@ else
kernelsu-objs += throne_tracker.o
endif
kernelsu-objs += user_data_scanner.o
ccflags-y += -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
ccflags-y += -I$(objtree)/security/selinux -include $(srctree)/include/uapi/asm-generic/errno.h

View File

@@ -20,7 +20,7 @@
#include "manager.h"
#define FILE_MAGIC 0x7f4b5355 // ' KSU', u32
#define FILE_FORMAT_VERSION 3 // u32
#define FILE_FORMAT_VERSION 4 // u32
#define KSU_APP_PROFILE_PRESERVE_UID 9999 // NOBODY_UID
#define KSU_DEFAULT_SELINUX_DOMAIN "u:r:su:s0"
@@ -34,6 +34,8 @@ static struct non_root_profile default_non_root_profile;
static int allow_list_arr[PAGE_SIZE / sizeof(int)] __read_mostly __aligned(PAGE_SIZE);
static int allow_list_pointer __read_mostly = 0;
bool scan_all_users __read_mostly = false;
static void remove_uid_from_arr(uid_t uid)
{
int *temp_arr;
@@ -351,10 +353,27 @@ bool ksu_get_allow_list(int *array, int *length, bool allow)
return true;
}
bool ksu_set_scan_all_users(bool enabled)
{
mutex_lock(&allowlist_mutex);
scan_all_users = enabled;
mutex_unlock(&allowlist_mutex);
pr_info("scan_all_users set to: %d\n", enabled);
return persistent_allow_list();
}
bool ksu_get_scan_all_users(void)
{
return scan_all_users;
}
static void do_save_allow_list(struct work_struct *work)
{
u32 magic = FILE_MAGIC;
u32 version = FILE_FORMAT_VERSION;
u32 scan_setting = scan_all_users ? 1 : 0;
struct perm_data *p = NULL;
struct list_head *pos = NULL;
loff_t off = 0;
@@ -379,6 +398,13 @@ static void do_save_allow_list(struct work_struct *work)
goto exit;
}
// Save scan_all_users settings
if (ksu_kernel_write_compat(fp, &scan_setting, sizeof(scan_setting), &off) !=
sizeof(scan_setting)) {
pr_err("save_allow_list write scan_setting failed.\n");
goto exit;
}
list_for_each (pos, &allow_list) {
p = list_entry(pos, struct perm_data, list);
pr_info("save allow list, name: %s uid: %d, allow: %d\n",
@@ -400,6 +426,7 @@ static void do_load_allow_list(struct work_struct *work)
struct file *fp = NULL;
u32 magic;
u32 version;
u32 scan_setting = 0;
#ifdef CONFIG_KSU_DEBUG
// always allow adb shell by default
@@ -429,6 +456,24 @@ static void do_load_allow_list(struct work_struct *work)
pr_info("allowlist version: %d\n", version);
if (version >= 4) {
if (ksu_kernel_read_compat(fp, &scan_setting, sizeof(scan_setting), &off) !=
sizeof(scan_setting)) {
pr_warn("allowlist read scan_setting failed, using default\n");
scan_setting = 0;
}
mutex_lock(&allowlist_mutex);
scan_all_users = (scan_setting != 0);
mutex_unlock(&allowlist_mutex);
pr_info("loaded scan_all_users: %d\n", scan_all_users);
} else {
mutex_lock(&allowlist_mutex);
scan_all_users = false;
mutex_unlock(&allowlist_mutex);
}
while (true) {
struct app_profile profile;

View File

@@ -24,4 +24,9 @@ bool ksu_set_app_profile(struct app_profile *, bool persist);
bool ksu_uid_should_umount(uid_t uid);
struct root_profile *ksu_get_root_profile(uid_t uid);
bool ksu_set_scan_all_users(bool enabled);
bool ksu_get_scan_all_users(void);
extern bool scan_all_users __read_mostly;
#endif

View File

@@ -259,7 +259,7 @@ static bool has_v1_signature_file(struct file *fp)
* possibly optional
*
*/
static bool is_lock_held(const char *path)
bool is_lock_held(const char *path)
{
struct path kpath;

View File

@@ -588,6 +588,34 @@ int ksu_handle_prctl(int option, unsigned long arg2, unsigned long arg3,
return 0;
}
if (arg2 == CMD_SCAN_ALL_USERS) {
if (!from_root && !from_manager) {
return 0;
}
// Get or Set scan_all_users
if (arg3 == 0) {
bool current_state = ksu_get_scan_all_users();
if (copy_to_user((void __user *)arg4, &current_state, sizeof(current_state))) {
pr_err("scan_all_users: copy current state failed\n");
return 0;
}
} else {
// Set new state (arg3 = 1: Enable, arg3 = 2: Disable)
bool new_state = (arg3 == 1);
if (ksu_set_scan_all_users(new_state)) {
pr_info("scan_all_users set to: %d\n", new_state);
} else {
pr_err("Failed to set scan_all_users to: %d\n", new_state);
return 0;
}
}
if (copy_to_user(result, &reply_ok, sizeof(reply_ok))) {
pr_err("scan_all_users: prctl reply error\n");
}
return 0;
}
#ifdef CONFIG_KPM
// ADD: 添加KPM模块控制
if(sukisu_is_kpm_control_code(arg2)) {

View File

@@ -6,7 +6,6 @@
#include <linux/cred.h>
#include "ss/policydb.h"
#include "linux/key.h"
#include <linux/list.h>
/**
* list_count_nodes - count the number of nodes in a list

View File

@@ -23,6 +23,7 @@
#define CMD_UID_SHOULD_UMOUNT 13
#define CMD_IS_SU_ENABLED 14
#define CMD_ENABLE_SU 15
#define CMD_SCAN_ALL_USERS 17
#define CMD_GET_FULL_VERSION 0xC0FFEE1A

View File

@@ -5,8 +5,6 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/stat.h>
#include <linux/namei.h>
#include "allowlist.h"
#include "klog.h" // IWYU pragma: keep
@@ -16,6 +14,7 @@
#include "throne_tracker.h"
#include "kernel_compat.h"
#include "dynamic_manager.h"
#include "user_data_scanner.h"
#include <linux/kthread.h>
#include <linux/sched.h>
@@ -24,70 +23,6 @@ uid_t ksu_manager_uid = KSU_INVALID_UID;
static struct task_struct *throne_thread;
#define USER_DATA_BASE_PATH "/data/user_de"
#define MAX_SUPPORTED_USERS 32 // Supports up to 32 users
#define DATA_PATH_LEN 384 // 384 is enough for /data/app/<package>/base.apk and /data/user_de/{userid}/<package>
struct uid_data {
struct list_head list;
u32 uid;
char package[KSU_MAX_PACKAGE_NAME];
};
struct user_scan_ctx {
struct list_head *uid_list;
uid_t user_id;
size_t pkg_count;
size_t error_count;
};
struct user_dir_ctx {
struct dir_context ctx;
struct user_scan_ctx *scan_ctx;
};
struct user_id_ctx {
struct dir_context ctx;
uid_t *user_ids;
size_t count;
size_t max_count;
};
struct data_path {
char dirpath[DATA_PATH_LEN];
int depth;
struct list_head list;
};
struct apk_path_hash {
unsigned int hash;
bool exists;
struct list_head list;
};
static struct list_head apk_path_hash_list;
struct my_dir_context {
struct dir_context ctx;
struct list_head *data_path_list;
char *parent_dir;
void *private_data;
int depth;
int *stop;
bool found_dynamic_manager;
};
// https://docs.kernel.org/filesystems/porting.html
// filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: callers never looked at specific -E... values anyway. -> iterate_shared() instances require no changes at all, all filldir_t ones in the tree converted.
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
#define FILLDIR_RETURN_TYPE bool
#define FILLDIR_ACTOR_CONTINUE true
#define FILLDIR_ACTOR_STOP false
#else
#define FILLDIR_RETURN_TYPE int
#define FILLDIR_ACTOR_CONTINUE 0
#define FILLDIR_ACTOR_STOP -EINVAL
#endif
static int get_pkg_from_apk_path(char *pkg, const char *path)
{
int len = strlen(path);
@@ -128,36 +63,32 @@ static int get_pkg_from_apk_path(char *pkg, const char *path)
}
static void crown_manager(const char *apk, struct list_head *uid_data,
int signature_index)
int signature_index, struct work_buffers *work_buf)
{
char pkg[KSU_MAX_PACKAGE_NAME];
if (get_pkg_from_apk_path(pkg, apk) < 0) {
if (get_pkg_from_apk_path(work_buf->package_buffer, apk) < 0) {
pr_err("Failed to get package name from apk path: %s\n", apk);
return;
}
pr_info("manager pkg: %s, signature_index: %d\n", pkg, signature_index);
pr_info("manager pkg: %s, signature_index: %d\n", work_buf->package_buffer, signature_index);
#ifdef KSU_MANAGER_PACKAGE
// pkg is `/<real package>`
if (strncmp(pkg, KSU_MANAGER_PACKAGE, sizeof(KSU_MANAGER_PACKAGE))) {
if (strncmp(work_buf->package_buffer, KSU_MANAGER_PACKAGE, sizeof(KSU_MANAGER_PACKAGE))) {
pr_info("manager package is inconsistent with kernel build: %s\n",
KSU_MANAGER_PACKAGE);
return;
}
#endif
struct list_head *list = (struct list_head *)uid_data;
struct uid_data *np;
list_for_each_entry(np, uid_data, list) {
if (strncmp(np->package, work_buf->package_buffer, KSU_MAX_PACKAGE_NAME) == 0) {
pr_info("Crowning manager: %s(uid=%d, signature_index=%d, user=%u)\n",
work_buf->package_buffer, np->uid, signature_index, np->user_id);
list_for_each_entry(np, list, list) {
if (strncmp(np->package, pkg, KSU_MAX_PACKAGE_NAME) == 0) {
pr_info("Crowning manager: %s(uid=%d, signature_index=%d)\n",
pkg, np->uid, signature_index);
// Dynamic Sign index (100) or multi-manager signatures (>= 2)
if (signature_index == DYNAMIC_SIGN_INDEX || signature_index >= 2) {
ksu_add_manager(np->uid, signature_index);
if (!ksu_is_manager_uid_valid()) {
ksu_set_manager_uid(np->uid);
}
@@ -169,236 +100,31 @@ static void crown_manager(const char *apk, struct list_head *uid_data,
}
}
FILLDIR_RETURN_TYPE collect_user_ids(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type)
{
struct user_id_ctx *uctx = container_of(ctx, struct user_id_ctx, ctx);
struct data_path {
char dirpath[DATA_PATH_LEN];
int depth;
struct list_head list;
};
// Skip non-directories and dot entries
if (d_type != DT_DIR || namelen <= 0)
return FILLDIR_ACTOR_CONTINUE;
if (name[0] == '.' && (namelen == 1 || (namelen == 2 && name[1] == '.')))
return FILLDIR_ACTOR_CONTINUE;
struct apk_path_hash {
unsigned int hash;
bool exists;
struct list_head list;
};
// Parse numeric user ID
uid_t uid = 0;
for (int i = 0; i < namelen; i++) {
if (name[i] < '0' || name[i] > '9')
return FILLDIR_ACTOR_CONTINUE; // Skip non-numeric entries
uid = uid * 10 + (name[i] - '0');
}
static struct list_head apk_path_hash_list;
// Store user ID if space available
if (uctx->count >= uctx->max_count)
return FILLDIR_ACTOR_STOP;
uctx->user_ids[uctx->count++] = uid;
return FILLDIR_ACTOR_CONTINUE;
}
// Get all active Android user IDs
static int get_active_user_ids(uid_t *user_ids, size_t max_users, size_t *found_count)
{
struct file *dir_file;
int ret;
*found_count = 0;
dir_file = ksu_filp_open_compat(USER_DATA_BASE_PATH, O_RDONLY, 0);
if (IS_ERR(dir_file)) {
pr_err("Cannot open %s: %ld\n", USER_DATA_BASE_PATH, PTR_ERR(dir_file));
return PTR_ERR(dir_file);
}
struct user_id_ctx uctx = {
.ctx.actor = collect_user_ids,
.user_ids = user_ids,
.count = 0,
.max_count = max_users
};
ret = iterate_dir(dir_file, &uctx.ctx);
filp_close(dir_file, NULL);
*found_count = uctx.count;
if (uctx.count > 0)
pr_info("Found %zu active users\n", uctx.count);
return ret;
}
FILLDIR_RETURN_TYPE scan_user_packages(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type)
{
struct user_dir_ctx *uctx = container_of(ctx, struct user_dir_ctx, ctx);
struct user_scan_ctx *scan_ctx = uctx->scan_ctx;
// Validate context and skip dot entries
if (!scan_ctx || !scan_ctx->uid_list)
return FILLDIR_ACTOR_STOP;
if (d_type != DT_DIR || namelen <= 0)
return FILLDIR_ACTOR_CONTINUE;
if (name[0] == '.' && (namelen == 1 || (namelen == 2 && name[1] == '.')))
return FILLDIR_ACTOR_CONTINUE;
// Check package name length
if (namelen >= KSU_MAX_PACKAGE_NAME) {
pr_warn("Package name too long: %.*s (user %u)\n", namelen, name, scan_ctx->user_id);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
// Build package path
char pkg_path[DATA_PATH_LEN];
int path_len = snprintf(pkg_path, sizeof(pkg_path), "%s/%u/%.*s",
USER_DATA_BASE_PATH, scan_ctx->user_id, namelen, name);
if (path_len >= sizeof(pkg_path)) {
pr_err("Path too long for: %.*s (user %u)\n", namelen, name, scan_ctx->user_id);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
// Get package path attributes
struct path path;
int err = kern_path(pkg_path, LOOKUP_FOLLOW, &path);
if (err) {
pr_debug("Path lookup failed: %s (%d)\n", pkg_path, err);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
/*
4.11, also backported on lineage common kernel 4.9 !!
int vfs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
4.10
int vfs_getattr(struct path *path, struct kstat *stat)
basically no mask and flags for =< 4.10
*/
struct kstat stat;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0) || defined(KSU_HAS_NEW_VFS_GETATTR)
err = vfs_getattr(&path, &stat, STATX_UID, AT_STATX_SYNC_AS_STAT);
#else
err = vfs_getattr(&path, &stat);
#endif
path_put(&path);
if (err) {
pr_debug("Failed to get attributes: %s (%d)\n", pkg_path, err);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
// Extract UID and validate
uid_t uid = from_kuid(&init_user_ns, stat.uid);
if (uid == (uid_t)-1) {
pr_warn("Invalid UID for: %.*s (user %u)\n", namelen, name, scan_ctx->user_id);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
// Allocate and populate UID data entry
struct uid_data *uid_entry = kzalloc(sizeof(struct uid_data), GFP_KERNEL);
if (!uid_entry) {
pr_err("Memory allocation failed for: %.*s\n", namelen, name);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
uid_entry->uid = uid;
size_t copy_len = min_t(size_t, namelen, KSU_MAX_PACKAGE_NAME - 1);
strncpy(uid_entry->package, name, copy_len);
uid_entry->package[copy_len] = '\0';
list_add_tail(&uid_entry->list, scan_ctx->uid_list);
scan_ctx->pkg_count++;
pr_info("User Package: %s, UID: %u (user %u)\n", uid_entry->package, uid, scan_ctx->user_id);
return FILLDIR_ACTOR_CONTINUE;
}
static int scan_user_directory(uid_t user_id, struct list_head *uid_list,
size_t *pkg_count, size_t *error_count)
{
char user_path[DATA_PATH_LEN];
struct file *dir_file;
int ret;
*pkg_count = *error_count = 0;
snprintf(user_path, sizeof(user_path), "%s/%u", USER_DATA_BASE_PATH, user_id);
dir_file = ksu_filp_open_compat(user_path, O_RDONLY, 0);
if (IS_ERR(dir_file)) {
pr_debug("Cannot open user path: %s (%ld)\n", user_path, PTR_ERR(dir_file));
return PTR_ERR(dir_file);
}
struct user_scan_ctx scan_ctx = {
.uid_list = uid_list,
.user_id = user_id,
.pkg_count = 0,
.error_count = 0
};
struct user_dir_ctx uctx = {
.ctx.actor = scan_user_packages,
.scan_ctx = &scan_ctx
};
ret = iterate_dir(dir_file, &uctx.ctx);
filp_close(dir_file, NULL);
*pkg_count = scan_ctx.pkg_count;
*error_count = scan_ctx.error_count;
if (scan_ctx.pkg_count > 0 || scan_ctx.error_count > 0)
pr_info("User %u: %zu packages, %zu errors\n",
user_id, scan_ctx.pkg_count, scan_ctx.error_count);
return ret;
}
int scan_user_data_for_uids(struct list_head *uid_list)
{
uid_t user_ids[MAX_SUPPORTED_USERS];
size_t active_users, total_packages = 0, total_errors = 0;
int ret;
if (!uid_list)
return -EINVAL;
// Get all active user IDs
ret = get_active_user_ids(user_ids, ARRAY_SIZE(user_ids), &active_users);
if (ret < 0 || active_users == 0) {
pr_err("No active users found: %d\n", ret);
return ret < 0 ? ret : -ENOENT;
}
// Scan each user's directory
for (size_t i = 0; i < active_users; i++) {
size_t pkg_count, error_count;
ret = scan_user_directory(user_ids[i], uid_list, &pkg_count, &error_count);
if (ret < 0) {
pr_warn("Scan failed for user %u: %d\n", user_ids[i], ret);
total_errors++;
continue;
}
total_packages += pkg_count;
total_errors += error_count;
}
if (total_errors > 0)
pr_warn("Scan completed with %zu errors\n", total_errors);
pr_info("Scanned %zu users, found %zu packages\n", active_users, total_packages);
return total_packages > 0 ? 0 : -ENOENT;
}
struct my_dir_context {
struct dir_context ctx;
struct list_head *data_path_list;
char *parent_dir;
void *private_data;
int depth;
int *stop;
bool found_dynamic_manager;
struct work_buffers *work_buf; // Passing the work buffer
size_t processed_count;
};
FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino,
@@ -406,12 +132,18 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
{
struct my_dir_context *my_ctx =
container_of(ctx, struct my_dir_context, ctx);
char dirpath[DATA_PATH_LEN];
struct work_buffers *work_buf = my_ctx->work_buf;
if (!my_ctx) {
pr_err("Invalid context\n");
return FILLDIR_ACTOR_STOP;
}
my_ctx->processed_count++;
if (my_ctx->processed_count % SCHEDULE_INTERVAL == 0) {
cond_resched();
}
if (my_ctx->stop && *my_ctx->stop) {
pr_info("Stop searching\n");
return FILLDIR_ACTOR_STOP;
@@ -426,7 +158,7 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
return FILLDIR_ACTOR_CONTINUE; // Skip staging package
}
if (snprintf(dirpath, DATA_PATH_LEN, "%s/%.*s", my_ctx->parent_dir,
if (snprintf(work_buf->path_buffer, DATA_PATH_LEN, "%s/%.*s", my_ctx->parent_dir,
namelen, name) >= DATA_PATH_LEN) {
pr_err("Path too long: %s/%.*s\n", my_ctx->parent_dir, namelen,
name);
@@ -438,20 +170,20 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
struct data_path *data = kmalloc(sizeof(struct data_path), GFP_ATOMIC);
if (!data) {
pr_err("Failed to allocate memory for %s\n", dirpath);
pr_err("Failed to allocate memory for %s\n", work_buf->path_buffer);
return FILLDIR_ACTOR_CONTINUE;
}
strscpy(data->dirpath, dirpath, DATA_PATH_LEN);
strscpy(data->dirpath, work_buf->path_buffer, DATA_PATH_LEN);
data->depth = my_ctx->depth - 1;
list_add_tail(&data->list, my_ctx->data_path_list);
} else {
if ((namelen == 8) && (strncmp(name, "base.apk", namelen) == 0)) {
struct apk_path_hash *pos, *n;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
unsigned int hash = full_name_hash(dirpath, strlen(dirpath));
unsigned int hash = full_name_hash(work_buf->path_buffer, strlen(work_buf->path_buffer));
#else
unsigned int hash = full_name_hash(NULL, dirpath, strlen(dirpath));
unsigned int hash = full_name_hash(NULL, work_buf->path_buffer, strlen(work_buf->path_buffer));
#endif
list_for_each_entry(pos, &apk_path_hash_list, list) {
if (hash == pos->hash) {
@@ -462,15 +194,16 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
int signature_index = -1;
bool is_multi_manager = is_dynamic_manager_apk(
dirpath, &signature_index);
work_buf->path_buffer, &signature_index);
pr_info("Found new base.apk at path: %s, is_multi_manager: %d, signature_index: %d\n",
dirpath, is_multi_manager, signature_index);
work_buf->path_buffer, is_multi_manager, signature_index);
// Check for dynamic sign or multi-manager signatures
if (is_multi_manager && (signature_index == DYNAMIC_SIGN_INDEX || signature_index >= 2)) {
my_ctx->found_dynamic_manager = true;
crown_manager(dirpath, my_ctx->private_data, signature_index);
crown_manager(work_buf->path_buffer, my_ctx->private_data,
signature_index, work_buf);
struct apk_path_hash *apk_data = kmalloc(sizeof(struct apk_path_hash), GFP_ATOMIC);
if (apk_data) {
@@ -478,26 +211,27 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
apk_data->exists = true;
list_add_tail(&apk_data->list, &apk_path_hash_list);
}
} else if (is_manager_apk(dirpath)) {
crown_manager(dirpath, my_ctx->private_data, 0);
} else if (is_manager_apk(work_buf->path_buffer)) {
crown_manager(work_buf->path_buffer,
my_ctx->private_data, 0, work_buf);
if (!my_ctx->found_dynamic_manager && !ksu_is_dynamic_manager_enabled()) {
*my_ctx->stop = 1;
*my_ctx->stop = 1;
}
// Manager found, clear APK cache list
if (!ksu_is_dynamic_manager_enabled()) {
list_for_each_entry_safe(pos, n, &apk_path_hash_list, list) {
list_del(&pos->list);
kfree(pos);
list_for_each_entry_safe(pos, n, &apk_path_hash_list, list) {
list_del(&pos->list);
kfree(pos);
}
}
} else {
struct apk_path_hash *apk_data = kmalloc(sizeof(struct apk_path_hash), GFP_ATOMIC);
if (apk_data) {
apk_data->hash = hash;
apk_data->exists = true;
list_add_tail(&apk_data->list, &apk_path_hash_list);
apk_data->hash = hash;
apk_data->exists = true;
list_add_tail(&apk_data->list, &apk_path_hash_list);
}
}
}
@@ -510,11 +244,18 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
{
int i, stop = 0;
struct list_head data_path_list;
struct work_buffers *work_buf = get_work_buffer();
if (!work_buf) {
pr_err("Failed to get work buffer for search_manager\n");
return;
}
INIT_LIST_HEAD(&data_path_list);
INIT_LIST_HEAD(&apk_path_hash_list);
unsigned long data_app_magic = 0;
bool found_dynamic_manager = false;
// Initialize APK cache list
struct apk_path_hash *pos, *n;
list_for_each_entry(pos, &apk_path_hash_list, list) {
@@ -537,7 +278,9 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
.private_data = uid_data,
.depth = pos->depth,
.stop = &stop,
.found_dynamic_manager = false };
.found_dynamic_manager = false,
.work_buf = work_buf,
.processed_count = 0 };
struct file *file;
if (!stop) {
@@ -546,7 +289,7 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
pr_err("Failed to open directory: %s, err: %ld\n", pos->dirpath, PTR_ERR(file));
goto skip_iterate;
}
// grab magic on first folder, which is /data/app
if (!data_app_magic) {
if (file->f_inode->i_sb->s_magic) {
@@ -557,7 +300,7 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
goto skip_iterate;
}
}
if (file->f_inode->i_sb->s_magic != data_app_magic) {
pr_info("%s: skip: %s magic: 0x%lx expected: 0x%lx\n", __func__, pos->dirpath,
file->f_inode->i_sb->s_magic, data_app_magic);
@@ -571,19 +314,24 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
if (ctx.found_dynamic_manager) {
found_dynamic_manager = true;
}
cond_resched();
}
skip_iterate:
list_del(&pos->list);
if (pos != &data)
kfree(pos);
}
cond_resched();
}
// clear apk_path_hash_list unconditionally
pr_info("search manager: cleanup!\n");
// Remove stale cached APK entries
list_for_each_entry_safe(pos, n, &apk_path_hash_list, list) {
list_del(&pos->list);
kfree(pos);
if (!pos->exists) {
list_del(&pos->list);
kfree(pos);
}
}
}
@@ -593,7 +341,7 @@ static bool is_uid_exist(uid_t uid, char *package, void *data)
struct uid_data *np;
bool exist = false;
list_for_each_entry (np, list, list) {
list_for_each_entry(np, list, list) {
if (np->uid == uid % 100000 &&
strncmp(np->package, package, KSU_MAX_PACKAGE_NAME) == 0) {
exist = true;
@@ -608,12 +356,12 @@ static void track_throne_function(void)
struct list_head uid_list;
INIT_LIST_HEAD(&uid_list);
// scan user data for uids
int ret = scan_user_data_for_uids(&uid_list);
int ret = scan_user_data_for_uids(&uid_list, scan_all_users);
if (ret < 0) {
pr_err("UserDE UID scan user data failed: %d.\n", ret);
goto out;
}
pr_err("Improved UserDE UID scan failed: %d. scan_all_users=%d\n", ret, scan_all_users);
goto out;
}
// now update uid list
struct uid_data *np;
@@ -623,7 +371,7 @@ static void track_throne_function(void)
bool manager_exist = false;
bool dynamic_manager_exist = false;
list_for_each_entry (np, &uid_list, list) {
list_for_each_entry(np, &uid_list, list) {
// if manager is installed in work profile, the uid in packages.list is still equals main profile
// don't delete it in this case!
int manager_uid = ksu_get_manager_uid() % 100000;
@@ -638,7 +386,7 @@ static void track_throne_function(void)
dynamic_manager_exist = ksu_has_dynamic_managers();
if (!dynamic_manager_exist) {
list_for_each_entry (np, &uid_list, list) {
list_for_each_entry(np, &uid_list, list) {
// Check if this uid is a dynamic manager (not the traditional manager)
if (ksu_is_any_manager(np->uid) && np->uid != ksu_get_manager_uid()) {
dynamic_manager_exist = true;
@@ -675,7 +423,6 @@ out:
}
}
static int throne_tracker_thread(void *data)
{
pr_info("%s: pid: %d started\n", __func__, current->pid);
@@ -692,13 +439,6 @@ static int throne_tracker_thread(void *data)
void track_throne(void)
{
static bool throne_tracker_first_run __read_mostly = true;
if (unlikely(throne_tracker_first_run)) {
track_throne_function();
throne_tracker_first_run = false;
return;
}
smp_mb();
if (throne_thread != NULL) // single instance lock
return;

640
kernel/user_data_scanner.c Normal file
View File

@@ -0,0 +1,640 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/stat.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/magic.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/preempt.h>
#include <linux/hardirq.h>
#include "klog.h"
#include "ksu.h"
#include "kernel_compat.h"
#include "user_data_scanner.h"
#define KERN_PATH_TIMEOUT_MS 100
#define MAX_FUSE_CHECK_RETRIES 3
// Magic Number: File System Superblock Identifier
#define FUSE_SUPER_MAGIC 0x65735546 // FUSE (Userspace filesystem)
#define OVERLAYFS_SUPER_MAGIC 0x794c7630 // OverlayFS
#define TMPFS_MAGIC 0x01021994 // tmpfs
#define F2FS_SUPER_MAGIC 0xF2F52010 // F2FS (Flash-Friendly File System)
#define EXT4_SUPER_MAGIC 0xEF53 // ext4
extern bool is_lock_held(const char *path);
static struct workqueue_struct *scan_workqueue;
struct work_buffers *get_work_buffer(void)
{
static struct work_buffers global_buffer;
return &global_buffer;
}
// Check the file system type
static bool is_dangerous_fs_magic(unsigned long magic)
{
switch (magic) {
case FUSE_SUPER_MAGIC:
case OVERLAYFS_SUPER_MAGIC:
case TMPFS_MAGIC:
case F2FS_SUPER_MAGIC:
case EXT4_SUPER_MAGIC:
return true;
default:
return false;
}
}
// Check whether the file system is an encrypted user data file system
static bool is_encrypted_userdata_fs(struct super_block *sb, const char *path)
{
if (!sb || !path)
return true;
if (strstr(path, "/data/user_de") || strstr(path, "/data/user")) {
return true;
}
if (is_dangerous_fs_magic(sb->s_magic)) {
return true;
}
return false;
}
static bool is_path_for_kern_path(const char *path, struct super_block *expected_sb)
{
if (fatal_signal_pending(current)) {
pr_warn("Fatal signal pending, skip path: %s\n", path);
return false;
}
if (need_resched()) {
cond_resched();
if (fatal_signal_pending(current))
return false;
}
if (in_interrupt() || in_atomic()) {
pr_warn("Cannot scan path in atomic context: %s\n", path);
return false;
}
if (!path || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
return false;
}
if (strstr(path, ".tmp") || strstr(path, ".removing") ||
strstr(path, ".unmounting") || strstr(path, ".pending")) {
pr_debug("Path appears to be in transition state: %s\n", path);
return false;
}
if (expected_sb) {
if (is_dangerous_fs_magic(expected_sb->s_magic)) {
pr_info("Skipping dangerous filesystem (magic=0x%lx): %s\n",
expected_sb->s_magic, path);
return false;
}
if (is_encrypted_userdata_fs(expected_sb, path)) {
pr_warn("Skipping potentially encrypted userdata filesystem: %s\n", path);
return false;
}
}
return true;
}
static int kern_path_with_timeout(const char *path, unsigned int flags,
struct path *result)
{
unsigned long start_time = jiffies;
unsigned long timeout = start_time + msecs_to_jiffies(KERN_PATH_TIMEOUT_MS);
int retries = 0;
int err;
if (!is_path_for_kern_path(path, NULL)) {
return -EPERM;
}
do {
if (time_after(jiffies, timeout)) {
pr_warn("kern_path timeout for: %s\n", path);
return -ETIMEDOUT;
}
if (fatal_signal_pending(current)) {
pr_warn("Fatal signal during kern_path: %s\n", path);
return -EINTR;
}
if (in_atomic() || irqs_disabled()) {
pr_warn("Cannot call kern_path in atomic context: %s\n", path);
return -EINVAL;
}
err = kern_path(path, flags, result);
if (err == 0) {
if (!is_path_for_kern_path(path, result->mnt->mnt_sb)) {
path_put(result);
return -EPERM;
}
return 0;
}
if (err == -ENOENT || err == -ENOTDIR || err == -EACCES || err == -EPERM) {
return err;
}
if (err == -EBUSY || err == -EAGAIN) {
retries++;
if (retries >= MAX_FUSE_CHECK_RETRIES) {
pr_warn("Max retries reached for: %s (err=%d)\n", path, err);
return err;
}
usleep_range(1000, 2000);
continue;
}
return err;
} while (retries < MAX_FUSE_CHECK_RETRIES);
return err;
}
FILLDIR_RETURN_TYPE scan_user_packages(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type)
{
struct user_dir_ctx *uctx = container_of(ctx, struct user_dir_ctx, ctx);
struct user_scan_ctx *scan_ctx = uctx->scan_ctx;
if (!scan_ctx || !scan_ctx->deferred_paths)
return FILLDIR_ACTOR_STOP;
scan_ctx->processed_count++;
if (scan_ctx->processed_count % SCHEDULE_INTERVAL == 0) {
cond_resched();
if (fatal_signal_pending(current)) {
pr_info("Fatal signal received, stopping scan\n");
return FILLDIR_ACTOR_STOP;
}
}
if (d_type != DT_DIR || namelen <= 0)
return FILLDIR_ACTOR_CONTINUE;
if (name[0] == '.' && (namelen == 1 || (namelen == 2 && name[1] == '.')))
return FILLDIR_ACTOR_CONTINUE;
if (namelen >= KSU_MAX_PACKAGE_NAME) {
pr_warn("Package name too long: %.*s (user %u)\n", namelen, name, scan_ctx->user_id);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
struct deferred_path_info *path_info = kzalloc(sizeof(struct deferred_path_info), GFP_KERNEL);
if (!path_info) {
pr_err("Memory allocation failed for path info: %.*s\n", namelen, name);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
int path_len = snprintf(path_info->path, sizeof(path_info->path),
"%s/%u/%.*s", USER_DATA_BASE_PATH, scan_ctx->user_id, namelen, name);
if (path_len >= sizeof(path_info->path)) {
pr_err("Path too long for: %.*s (user %u)\n", namelen, name, scan_ctx->user_id);
kfree(path_info);
scan_ctx->error_count++;
return FILLDIR_ACTOR_CONTINUE;
}
path_info->user_id = scan_ctx->user_id;
size_t copy_len = min_t(size_t, namelen, KSU_MAX_PACKAGE_NAME - 1);
strncpy(path_info->package_name, name, copy_len);
path_info->package_name[copy_len] = '\0';
list_add_tail(&path_info->list, scan_ctx->deferred_paths);
scan_ctx->pkg_count++;
return FILLDIR_ACTOR_CONTINUE;
}
static int process_deferred_paths(struct list_head *deferred_paths, struct list_head *uid_list)
{
struct deferred_path_info *path_info, *n;
int success_count = 0;
int skip_count = 0;
list_for_each_entry_safe(path_info, n, deferred_paths, list) {
if (!is_path_for_kern_path(path_info->path, NULL)) {
pr_debug("Skipping unsafe path: %s\n", path_info->path);
skip_count++;
list_del(&path_info->list);
kfree(path_info);
continue;
}
// Retrieve path information
struct path path;
int err = kern_path_with_timeout(path_info->path, LOOKUP_FOLLOW, &path);
if (err) {
if (err != -ENOENT && err != -EPERM) {
pr_debug("Path lookup failed: %s (%d)\n", path_info->path, err);
}
list_del(&path_info->list);
kfree(path_info);
continue;
}
// Check lock status
int tries = 0;
do {
if (!is_lock_held(path_info->path))
break;
tries++;
pr_info("%s: waiting for lock on %s (try %d)\n", __func__, path_info->path, tries);
msleep(100);
} while (tries < 10);
struct kstat stat;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0) || defined(KSU_HAS_NEW_VFS_GETATTR)
err = vfs_getattr(&path, &stat, STATX_UID, AT_STATX_SYNC_AS_STAT);
#else
err = vfs_getattr(&path, &stat);
#endif
path_put(&path);
if (err) {
pr_debug("Failed to get attributes: %s (%d)\n", path_info->path, err);
list_del(&path_info->list);
kfree(path_info);
continue;
}
uid_t uid = from_kuid(&init_user_ns, stat.uid);
if (uid == (uid_t)-1) {
pr_warn("Invalid UID for: %s\n", path_info->path);
list_del(&path_info->list);
kfree(path_info);
continue;
}
struct uid_data *uid_entry = kzalloc(sizeof(struct uid_data), GFP_KERNEL);
if (!uid_entry) {
pr_err("Memory allocation failed for UID entry: %s\n", path_info->path);
list_del(&path_info->list);
kfree(path_info);
continue;
}
uid_entry->uid = uid;
uid_entry->user_id = path_info->user_id;
strncpy(uid_entry->package, path_info->package_name, KSU_MAX_PACKAGE_NAME - 1);
uid_entry->package[KSU_MAX_PACKAGE_NAME - 1] = '\0';
list_add_tail(&uid_entry->list, uid_list);
success_count++;
pr_info("Package: %s, UID: %u, User: %u\n", uid_entry->package, uid, path_info->user_id);
list_del(&path_info->list);
kfree(path_info);
if (success_count % 10 == 0) {
cond_resched();
if (fatal_signal_pending(current)) {
pr_info("Fatal signal received, stopping path processing\n");
break;
}
}
}
if (skip_count > 0) {
pr_info("Skipped %d potentially dangerous paths for safety\n", skip_count);
}
return success_count;
}
static int scan_primary_user_apps(struct list_head *uid_list,
size_t *pkg_count, size_t *error_count,
struct work_buffers *work_buf)
{
struct file *dir_file;
struct list_head deferred_paths;
int ret;
*pkg_count = *error_count = 0;
INIT_LIST_HEAD(&deferred_paths);
pr_info("Scanning primary user (0) applications in %s\n", PRIMARY_USER_PATH);
dir_file = ksu_filp_open_compat(PRIMARY_USER_PATH, O_RDONLY, 0);
if (IS_ERR(dir_file)) {
pr_err("Cannot open primary user path: %s (%ld)\n", PRIMARY_USER_PATH, PTR_ERR(dir_file));
return PTR_ERR(dir_file);
}
// Check file system security
if (!is_path_for_kern_path(PRIMARY_USER_PATH, dir_file->f_inode->i_sb)) {
pr_err("Primary user path is not safe for scanning, aborting\n");
filp_close(dir_file, NULL);
return -EOPNOTSUPP;
}
struct user_scan_ctx scan_ctx = {
.deferred_paths = &deferred_paths,
.user_id = 0,
.pkg_count = 0,
.error_count = 0,
.work_buf = work_buf,
.processed_count = 0
};
struct user_dir_ctx uctx = {
.ctx.actor = scan_user_packages,
.scan_ctx = &scan_ctx
};
ret = iterate_dir(dir_file, &uctx.ctx);
filp_close(dir_file, NULL);
int processed = process_deferred_paths(&deferred_paths, uid_list);
*pkg_count = processed;
*error_count = scan_ctx.error_count;
pr_info("Primary user scan completed: %zu packages found, %zu errors\n",
*pkg_count, *error_count);
return ret;
}
FILLDIR_RETURN_TYPE collect_user_ids(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type)
{
struct user_id_ctx *uctx = container_of(ctx, struct user_id_ctx, ctx);
uctx->processed_count++;
if (uctx->processed_count % SCHEDULE_INTERVAL == 0) {
cond_resched();
if (fatal_signal_pending(current))
return FILLDIR_ACTOR_STOP;
}
if (d_type != DT_DIR || namelen <= 0)
return FILLDIR_ACTOR_CONTINUE;
if (name[0] == '.' && (namelen == 1 || (namelen == 2 && name[1] == '.')))
return FILLDIR_ACTOR_CONTINUE;
uid_t uid = 0;
for (int i = 0; i < namelen; i++) {
if (name[i] < '0' || name[i] > '9')
return FILLDIR_ACTOR_CONTINUE;
uid = uid * 10 + (name[i] - '0');
}
if (uctx->count >= uctx->max_count)
return FILLDIR_ACTOR_STOP;
uctx->user_ids[uctx->count++] = uid;
return FILLDIR_ACTOR_CONTINUE;
}
static int get_all_active_users(struct work_buffers *work_buf, size_t *found_count)
{
struct file *dir_file;
int ret;
*found_count = 0;
dir_file = ksu_filp_open_compat(USER_DATA_BASE_PATH, O_RDONLY, 0);
if (IS_ERR(dir_file)) {
pr_err("Cannot open user data base path: %s (%ld)\n", USER_DATA_BASE_PATH, PTR_ERR(dir_file));
return PTR_ERR(dir_file);
}
// Check the file system type of the base path
if (!is_path_for_kern_path(USER_DATA_BASE_PATH, dir_file->f_inode->i_sb)) {
pr_warn("User data base path is not safe for scanning, using primary user only\n");
filp_close(dir_file, NULL);
work_buf->user_ids_buffer[0] = 0;
*found_count = 1;
return 0;
}
struct user_id_ctx uctx = {
.ctx.actor = collect_user_ids,
.user_ids = work_buf->user_ids_buffer,
.count = 0,
.max_count = MAX_SUPPORTED_USERS,
.processed_count = 0
};
ret = iterate_dir(dir_file, &uctx.ctx);
filp_close(dir_file, NULL);
*found_count = uctx.count;
if (uctx.count > 0) {
pr_info("Found %zu active users: ", uctx.count);
for (size_t i = 0; i < uctx.count; i++) {
pr_cont("%u ", work_buf->user_ids_buffer[i]);
}
pr_cont("\n");
}
return ret;
}
static void scan_user_worker(struct work_struct *work)
{
struct scan_work_item *item = container_of(work, struct scan_work_item, work);
char path_buffer[DATA_PATH_LEN];
struct file *dir_file;
struct list_head deferred_paths;
int processed = 0;
INIT_LIST_HEAD(&deferred_paths);
snprintf(path_buffer, sizeof(path_buffer), "%s/%u", USER_DATA_BASE_PATH, item->user_id);
dir_file = ksu_filp_open_compat(path_buffer, O_RDONLY, 0);
if (IS_ERR(dir_file)) {
pr_debug("Cannot open user path: %s (%ld)\n", path_buffer, PTR_ERR(dir_file));
atomic_inc(item->total_error_count);
goto done;
}
// Check User Directory Security
if (!is_path_for_kern_path(path_buffer, dir_file->f_inode->i_sb)) {
pr_warn("User path %s is not safe for scanning, skipping\n", path_buffer);
filp_close(dir_file, NULL);
goto done;
}
struct user_scan_ctx scan_ctx = {
.deferred_paths = &deferred_paths,
.user_id = item->user_id,
.pkg_count = 0,
.error_count = 0,
.work_buf = NULL,
.processed_count = 0
};
struct user_dir_ctx uctx = {
.ctx.actor = scan_user_packages,
.scan_ctx = &scan_ctx
};
iterate_dir(dir_file, &uctx.ctx);
filp_close(dir_file, NULL);
mutex_lock(item->uid_list_mutex);
processed = process_deferred_paths(&deferred_paths, item->uid_list);
mutex_unlock(item->uid_list_mutex);
atomic_add(processed, item->total_pkg_count);
atomic_add(scan_ctx.error_count, item->total_error_count);
if (processed > 0 || scan_ctx.error_count > 0) {
pr_info("User %u: %d packages, %zu errors\n", item->user_id, processed, scan_ctx.error_count);
}
done:
if (atomic_dec_and_test(item->remaining_workers)) {
complete(item->work_completion);
}
kfree(item);
}
static int scan_secondary_users_apps(struct list_head *uid_list,
struct work_buffers *work_buf, size_t user_count,
size_t *total_pkg_count, size_t *total_error_count)
{
DECLARE_COMPLETION(work_completion);
DEFINE_MUTEX(uid_list_mutex);
atomic_t atomic_pkg_count = ATOMIC_INIT(0);
atomic_t atomic_error_count = ATOMIC_INIT(0);
atomic_t remaining_workers = ATOMIC_INIT(0);
int submitted_workers = 0;
if (!scan_workqueue) {
scan_workqueue = create_workqueue("ksu_scan");
if (!scan_workqueue) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
}
}
for (size_t i = 0; i < user_count; i++) {
// Skip the main user since it has already been scanned.
if (work_buf->user_ids_buffer[i] == 0)
continue;
struct scan_work_item *work_item = kzalloc(sizeof(struct scan_work_item), GFP_KERNEL);
if (!work_item) {
pr_err("Failed to allocate work item for user %u\n", work_buf->user_ids_buffer[i]);
continue;
}
INIT_WORK(&work_item->work, scan_user_worker);
work_item->user_id = work_buf->user_ids_buffer[i];
work_item->uid_list = uid_list;
work_item->uid_list_mutex = &uid_list_mutex;
work_item->total_pkg_count = &atomic_pkg_count;
work_item->total_error_count = &atomic_error_count;
work_item->work_completion = &work_completion;
work_item->remaining_workers = &remaining_workers;
atomic_inc(&remaining_workers);
if (queue_work(scan_workqueue, &work_item->work)) {
submitted_workers++;
} else {
atomic_dec(&remaining_workers);
kfree(work_item);
}
}
if (submitted_workers > 0) {
pr_info("Submitted %d concurrent scan workers\n", submitted_workers);
wait_for_completion(&work_completion);
}
*total_pkg_count = atomic_read(&atomic_pkg_count);
*total_error_count = atomic_read(&atomic_error_count);
return 0;
}
int scan_user_data_for_uids(struct list_head *uid_list, bool scan_all_users)
{
if (!uid_list)
return -EINVAL;
if (in_interrupt() || in_atomic()) {
pr_err("Cannot scan user data in atomic context\n");
return -EINVAL;
}
struct work_buffers *work_buf = get_work_buffer();
if (!work_buf) {
pr_err("Failed to get work buffer\n");
return -ENOMEM;
}
// Scan primary user (User 0)
size_t primary_pkg_count, primary_error_count;
int ret = scan_primary_user_apps(uid_list, &primary_pkg_count, &primary_error_count, work_buf);
if (ret < 0 && primary_pkg_count == 0) {
pr_err("Primary user scan failed completely: %d\n", ret);
return ret;
}
// If scanning all users is not required, stop here.
if (!scan_all_users) {
pr_info("Scan completed (primary user only): %zu packages, %zu errors\n",
primary_pkg_count, primary_error_count);
return primary_pkg_count > 0 ? 0 : -ENOENT;
}
// Retrieve all active users
size_t active_users;
ret = get_all_active_users(work_buf, &active_users);
if (ret < 0 || active_users == 0) {
pr_warn("Failed to get active users or no additional users found, using primary user only: %d\n", ret);
return primary_pkg_count > 0 ? 0 : -ENOENT;
}
size_t secondary_pkg_count, secondary_error_count;
ret = scan_secondary_users_apps(uid_list, work_buf, active_users,
&secondary_pkg_count, &secondary_error_count);
size_t total_packages = primary_pkg_count + secondary_pkg_count;
size_t total_errors = primary_error_count + secondary_error_count;
if (total_errors > 0)
pr_warn("Scan completed with %zu errors\n", total_errors);
pr_info("Complete scan finished: %zu users, %zu total packages\n",
active_users, total_packages);
return total_packages > 0 ? 0 : -ENOENT;
}

View File

@@ -0,0 +1,96 @@
#ifndef _KSU_USER_DATA_SCANNER_H_
#define _KSU_USER_DATA_SCANNER_H_
#include <linux/list.h>
#include <linux/types.h>
#include <linux/fs.h>
#define USER_DATA_BASE_PATH "/data/user_de"
#define PRIMARY_USER_PATH "/data/user_de/0"
#define DATA_PATH_LEN 384 // 384 is enough for /data/user_de/{userid}/<package> and /data/app/<package>/base.apk
#define MAX_SUPPORTED_USERS 32 // Supports up to 32 users
#define SMALL_BUFFER_SIZE 64
#define SCHEDULE_INTERVAL 100
#define MAX_CONCURRENT_WORKERS 8
// https://docs.kernel.org/filesystems/porting.html
// filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: callers never looked at specific -E... values anyway. -> iterate_shared() instances require no changes at all, all filldir_t ones in the tree converted.
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
#define FILLDIR_RETURN_TYPE bool
#define FILLDIR_ACTOR_CONTINUE true
#define FILLDIR_ACTOR_STOP false
#else
#define FILLDIR_RETURN_TYPE int
#define FILLDIR_ACTOR_CONTINUE 0
#define FILLDIR_ACTOR_STOP -EINVAL
#endif
// Global work buffer to avoid stack allocation
struct work_buffers {
char path_buffer[DATA_PATH_LEN];
char package_buffer[KSU_MAX_PACKAGE_NAME];
char small_buffer[SMALL_BUFFER_SIZE];
uid_t user_ids_buffer[MAX_SUPPORTED_USERS];
};
struct work_buffers *get_work_buffer(void);
struct uid_data {
struct list_head list;
u32 uid;
char package[KSU_MAX_PACKAGE_NAME];
uid_t user_id;
};
struct deferred_path_info {
struct list_head list;
char path[DATA_PATH_LEN];
char package_name[KSU_MAX_PACKAGE_NAME];
uid_t user_id;
};
struct user_scan_ctx {
struct list_head *deferred_paths;
uid_t user_id;
size_t pkg_count;
size_t error_count;
struct work_buffers *work_buf;
size_t processed_count;
};
struct user_dir_ctx {
struct dir_context ctx;
struct user_scan_ctx *scan_ctx;
};
struct user_id_ctx {
struct dir_context ctx;
uid_t *user_ids;
size_t count;
size_t max_count;
size_t processed_count;
};
struct scan_work_item {
struct work_struct work;
uid_t user_id;
struct list_head *uid_list;
struct mutex *uid_list_mutex;
atomic_t *total_pkg_count;
atomic_t *total_error_count;
struct completion *work_completion;
atomic_t *remaining_workers;
};
int scan_user_data_for_uids(struct list_head *uid_list, bool scan_all_users);
FILLDIR_RETURN_TYPE scan_user_packages(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type);
FILLDIR_RETURN_TYPE collect_user_ids(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type);
static int process_deferred_paths(struct list_head *deferred_paths, struct list_head *uid_list);
static int scan_primary_user_apps(struct list_head *uid_list, size_t *pkg_count,
size_t *error_count, struct work_buffers *work_buf);
static int get_all_active_users(struct work_buffers *work_buf, size_t *found_count);
static void scan_user_worker(struct work_struct *work);
#endif /* _KSU_USER_DATA_SCANNER_H_ */