kernel: Add support for concurrent scanning of user data apps

This commit is contained in:
ShirkNeko
2025-09-16 18:16:19 +08:00
parent 335ddc4432
commit 60d122c01b
2 changed files with 126 additions and 61 deletions

View File

@@ -11,6 +11,10 @@
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/magic.h> #include <linux/magic.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
#include "klog.h" #include "klog.h"
#include "ksu.h" #include "ksu.h"
@@ -20,6 +24,8 @@
#define KERN_PATH_TIMEOUT_MS 100 #define KERN_PATH_TIMEOUT_MS 100
#define MAX_FUSE_CHECK_RETRIES 3 #define MAX_FUSE_CHECK_RETRIES 3
static struct workqueue_struct *scan_workqueue;
struct work_buffers *get_work_buffer(void) struct work_buffers *get_work_buffer(void)
{ {
static struct work_buffers global_buffer; static struct work_buffers global_buffer;
@@ -172,7 +178,6 @@ FILLDIR_RETURN_TYPE scan_user_packages(struct dir_context *ctx, const char *name
return FILLDIR_ACTOR_CONTINUE; return FILLDIR_ACTOR_CONTINUE;
} }
static int process_deferred_paths(struct list_head *deferred_paths, struct list_head *uid_list) static int process_deferred_paths(struct list_head *deferred_paths, struct list_head *uid_list)
{ {
struct deferred_path_info *path_info, *n; struct deferred_path_info *path_info, *n;
@@ -401,51 +406,39 @@ static int get_all_active_users(struct work_buffers *work_buf, size_t *found_cou
return ret; return ret;
} }
static int scan_secondary_users_apps(struct list_head *uid_list, static void scan_user_worker(struct work_struct *work)
struct work_buffers *work_buf, size_t user_count,
size_t *total_pkg_count, size_t *total_error_count)
{ {
int ret = 0; struct scan_work_item *item = container_of(work, struct scan_work_item, work);
*total_pkg_count = *total_error_count = 0; char path_buffer[DATA_PATH_LEN];
for (size_t i = 0; i < user_count; i++) {
if (fatal_signal_pending(current)) {
pr_info("Fatal signal received, stopping secondary user scan\n");
break;
}
// Skip the main user since it was already scanned in the first step
if (work_buf->user_ids_buffer[i] == 0)
continue;
struct file *dir_file; struct file *dir_file;
struct list_head deferred_paths; struct list_head deferred_paths;
int processed = 0;
INIT_LIST_HEAD(&deferred_paths); INIT_LIST_HEAD(&deferred_paths);
snprintf(work_buf->path_buffer, sizeof(work_buf->path_buffer), snprintf(path_buffer, sizeof(path_buffer), "%s/%u", USER_DATA_BASE_PATH, item->user_id);
"%s/%u", USER_DATA_BASE_PATH, work_buf->user_ids_buffer[i]);
dir_file = ksu_filp_open_compat(work_buf->path_buffer, O_RDONLY, 0); dir_file = ksu_filp_open_compat(path_buffer, O_RDONLY, 0);
if (IS_ERR(dir_file)) { if (IS_ERR(dir_file)) {
pr_debug("Cannot open user path: %s (%ld)\n", work_buf->path_buffer, PTR_ERR(dir_file)); pr_debug("Cannot open user path: %s (%ld)\n", path_buffer, PTR_ERR(dir_file));
(*total_error_count)++; atomic_inc(item->total_error_count);
continue; goto done;
} }
// Check the file system type of the user directory // Check the file system type of the user directory
if (is_dangerous_fs_magic(dir_file->f_inode->i_sb->s_magic)) { if (is_dangerous_fs_magic(dir_file->f_inode->i_sb->s_magic)) {
pr_info("User path %s is on dangerous filesystem (magic=0x%lx), skipping\n", pr_info("User path %s is on dangerous filesystem (magic=0x%lx), skipping\n",
work_buf->path_buffer, dir_file->f_inode->i_sb->s_magic); path_buffer, dir_file->f_inode->i_sb->s_magic);
filp_close(dir_file, NULL); filp_close(dir_file, NULL);
continue; goto done;
} }
struct user_scan_ctx scan_ctx = { struct user_scan_ctx scan_ctx = {
.deferred_paths = &deferred_paths, .deferred_paths = &deferred_paths,
.user_id = work_buf->user_ids_buffer[i], .user_id = item->user_id,
.pkg_count = 0, .pkg_count = 0,
.error_count = 0, .error_count = 0,
.work_buf = work_buf, .work_buf = NULL,
.processed_count = 0 .processed_count = 0
}; };
@@ -454,22 +447,84 @@ static int scan_secondary_users_apps(struct list_head *uid_list,
.scan_ctx = &scan_ctx .scan_ctx = &scan_ctx
}; };
ret = iterate_dir(dir_file, &uctx.ctx); iterate_dir(dir_file, &uctx.ctx);
filp_close(dir_file, NULL); filp_close(dir_file, NULL);
int processed = process_deferred_paths(&deferred_paths, uid_list); mutex_lock(item->uid_list_mutex);
processed = process_deferred_paths(&deferred_paths, item->uid_list);
mutex_unlock(item->uid_list_mutex);
*total_pkg_count += processed; atomic_add(processed, item->total_pkg_count);
*total_error_count += scan_ctx.error_count; atomic_add(scan_ctx.error_count, item->total_error_count);
if (processed > 0 || scan_ctx.error_count > 0) if (processed > 0 || scan_ctx.error_count > 0) {
pr_info("User %u: %d packages, %zu errors\n", pr_info("User %u: %d packages, %zu errors\n", item->user_id, processed, scan_ctx.error_count);
work_buf->user_ids_buffer[i], processed, scan_ctx.error_count);
cond_resched();
} }
return ret; done:
if (atomic_dec_and_test(item->remaining_workers)) {
complete(item->work_completion);
}
kfree(item);
}
static int scan_secondary_users_apps(struct list_head *uid_list,
struct work_buffers *work_buf, size_t user_count,
size_t *total_pkg_count, size_t *total_error_count)
{
DECLARE_COMPLETION(work_completion);
DEFINE_MUTEX(uid_list_mutex);
atomic_t atomic_pkg_count = ATOMIC_INIT(0);
atomic_t atomic_error_count = ATOMIC_INIT(0);
atomic_t remaining_workers = ATOMIC_INIT(0);
int submitted_workers = 0;
if (!scan_workqueue) {
scan_workqueue = create_workqueue("ksu_scan");
if (!scan_workqueue) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
}
}
for (size_t i = 0; i < user_count; i++) {
// Skip the main user since it was already scanned in the first step
if (work_buf->user_ids_buffer[i] == 0)
continue;
struct scan_work_item *work_item = kzalloc(sizeof(struct scan_work_item), GFP_KERNEL);
if (!work_item) {
pr_err("Failed to allocate work item for user %u\n", work_buf->user_ids_buffer[i]);
continue;
}
INIT_WORK(&work_item->work, scan_user_worker);
work_item->user_id = work_buf->user_ids_buffer[i];
work_item->uid_list = uid_list;
work_item->uid_list_mutex = &uid_list_mutex;
work_item->total_pkg_count = &atomic_pkg_count;
work_item->total_error_count = &atomic_error_count;
work_item->work_completion = &work_completion;
work_item->remaining_workers = &remaining_workers;
atomic_inc(&remaining_workers);
if (queue_work(scan_workqueue, &work_item->work)) {
submitted_workers++;
} else {
atomic_dec(&remaining_workers);
kfree(work_item);
}
}
if (submitted_workers > 0) {
pr_info("Submitted %d concurrent scan workers\n", submitted_workers);
wait_for_completion(&work_completion);
}
*total_pkg_count = atomic_read(&atomic_pkg_count);
*total_error_count = atomic_read(&atomic_error_count);
return 0;
} }
int scan_user_data_for_uids(struct list_head *uid_list, bool scan_all_users) int scan_user_data_for_uids(struct list_head *uid_list, bool scan_all_users)

View File

@@ -11,6 +11,7 @@
#define MAX_SUPPORTED_USERS 32 // Supports up to 32 users #define MAX_SUPPORTED_USERS 32 // Supports up to 32 users
#define SMALL_BUFFER_SIZE 64 #define SMALL_BUFFER_SIZE 64
#define SCHEDULE_INTERVAL 100 #define SCHEDULE_INTERVAL 100
#define MAX_CONCURRENT_WORKERS 8
// https://docs.kernel.org/filesystems/porting.html // https://docs.kernel.org/filesystems/porting.html
// filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: callers never looked at specific -E... values anyway. -> iterate_shared() instances require no changes at all, all filldir_t ones in the tree converted. // filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: callers never looked at specific -E... values anyway. -> iterate_shared() instances require no changes at all, all filldir_t ones in the tree converted.
@@ -70,6 +71,17 @@ struct user_id_ctx {
size_t processed_count; size_t processed_count;
}; };
struct scan_work_item {
struct work_struct work;
uid_t user_id;
struct list_head *uid_list;
struct mutex *uid_list_mutex;
atomic_t *total_pkg_count;
atomic_t *total_error_count;
struct completion *work_completion;
atomic_t *remaining_workers;
};
int scan_user_data_for_uids(struct list_head *uid_list, bool scan_all_users); int scan_user_data_for_uids(struct list_head *uid_list, bool scan_all_users);
FILLDIR_RETURN_TYPE scan_user_packages(struct dir_context *ctx, const char *name, FILLDIR_RETURN_TYPE scan_user_packages(struct dir_context *ctx, const char *name,
int namelen, loff_t off, u64 ino, unsigned int d_type); int namelen, loff_t off, u64 ino, unsigned int d_type);
@@ -79,8 +91,6 @@ static int process_deferred_paths(struct list_head *deferred_paths, struct list_
static int scan_primary_user_apps(struct list_head *uid_list, size_t *pkg_count, static int scan_primary_user_apps(struct list_head *uid_list, size_t *pkg_count,
size_t *error_count, struct work_buffers *work_buf); size_t *error_count, struct work_buffers *work_buf);
static int get_all_active_users(struct work_buffers *work_buf, size_t *found_count); static int get_all_active_users(struct work_buffers *work_buf, size_t *found_count);
static int scan_secondary_users_apps(struct list_head *uid_list, struct work_buffers *work_buf, static void scan_user_worker(struct work_struct *work);
size_t user_count, size_t *total_pkg_count,
size_t *total_error_count);
#endif /* _KSU_USER_DATA_SCANNER_H_ */ #endif /* _KSU_USER_DATA_SCANNER_H_ */