kernel: Optimizing thread scheduling during user scans

`[   23.379244][ T5074] ufshcd-qcom 1d84000.ufshc: ............. ufs dump complete ..........
[   23.379263][ T5074] dump-reason-buffer-size: 256
[   23.379267][ T5074] dump-reason-pidbuffer:PID: 5074, Process Name: throne_tracker
[   23.379295][ T5074] qcom_q6v5_pas a3380000.remoteproc-soccp: waking SOCCP from panic path
[   23.379455][ T5074] CPU0 next event is 23368000000
[   23.379456][ T5074] CP.rkp_only' to 'true' in property file '/odm/build.prop': SELinux permission check failed
[    1.248057][    T1] init: Do not have permissions to set 'ro.oplus.nfc.support.tee' to 'true' in pro   23.379459][ T5074] CPU5 next event is 23368000000
[   23.3794   1.248059][    T1] init: Do not have permissions to set 'ro.oplus.eid.enable.state' to '1' in property file '/odm/build.prop':l-3d0: snapshot: device is powered off
[   23.589323][ T5074] Kernel Offset: 0x1738a00000 from 0xffffffc080000000
[   23.589325][ T5074] PHYS_OFFSET: 0x80000000
[   23.589326][ T5074] CPU features: 0x000000,00000000,70024f43,95fffea7
[   23.589328][ T5074] Memory Limit: none
[   23.589490][ T5074] mhi mhi_110e_00.01.00: [E][mhi_debug_reg_dump] host pm_state:M2 dev_state:M2 ee:MISSION MODE
[   23.589505][ T5074] mhi mhi_110e_00.01.00: [E][mhi_debug_reg_dump] device ee: MISSION MODE dev_state: M2`

Signed-off-by: ShirkNeko <109797057+ShirkNeko@users.noreply.github.com>
This commit is contained in:
ShirkNeko
2025-09-16 15:00:05 +08:00
parent eb5d8fa770
commit 73cd1f2cf3

View File

@@ -19,6 +19,7 @@
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/delay.h>
uid_t ksu_manager_uid = KSU_INVALID_UID;
@@ -29,6 +30,7 @@ static struct task_struct *throne_thread;
#define MAX_SUPPORTED_USERS 32 // Supports up to 32 users
#define DATA_PATH_LEN 384 // 384 is enough for /data/app/<package>/base.apk and /data/user_de/{userid}/<package>
#define SMALL_BUFFER_SIZE 64
#define SCHEDULE_INTERVAL 100
// Global work buffer to avoid stack allocation
struct work_buffers {
@@ -57,6 +59,7 @@ struct user_scan_ctx {
size_t pkg_count;
size_t error_count;
struct work_buffers *work_buf; // Passing the work buffer
size_t processed_count;
};
struct user_dir_ctx {
@@ -69,6 +72,7 @@ struct user_id_ctx {
uid_t *user_ids;
size_t count;
size_t max_count;
size_t processed_count;
};
struct data_path {
@@ -94,6 +98,7 @@ struct my_dir_context {
int *stop;
bool found_dynamic_manager;
struct work_buffers *work_buf; // Passing the work buffer
size_t processed_count;
};
// https://docs.kernel.org/filesystems/porting.html
// filldir_t (readdir callbacks) calling conventions have changed. Instead of returning 0 or -E... it returns bool now. false means "no more" (as -E... used to) and true - "keep going" (as 0 in old calling conventions). Rationale: callers never looked at specific -E... values anyway. -> iterate_shared() instances require no changes at all, all filldir_t ones in the tree converted.
@@ -234,6 +239,11 @@ FILLDIR_RETURN_TYPE collect_user_ids(struct dir_context *ctx, const char *name,
{
struct user_id_ctx *uctx = container_of(ctx, struct user_id_ctx, ctx);
uctx->processed_count++;
if (uctx->processed_count % SCHEDULE_INTERVAL == 0) {
cond_resched();
}
if (d_type != DT_DIR || namelen <= 0)
return FILLDIR_ACTOR_CONTINUE;
if (name[0] == '.' && (namelen == 1 || (namelen == 2 && name[1] == '.')))
@@ -271,7 +281,8 @@ static int get_all_active_users(struct work_buffers *work_buf, size_t *found_cou
.ctx.actor = collect_user_ids,
.user_ids = work_buf->user_ids_buffer,
.count = 0,
.max_count = MAX_SUPPORTED_USERS
.max_count = MAX_SUPPORTED_USERS,
.processed_count = 0
};
ret = iterate_dir(dir_file, &uctx.ctx);
@@ -298,6 +309,12 @@ FILLDIR_RETURN_TYPE scan_user_packages(struct dir_context *ctx, const char *name
if (!scan_ctx || !scan_ctx->uid_list)
return FILLDIR_ACTOR_STOP;
scan_ctx->processed_count++;
if (scan_ctx->processed_count % SCHEDULE_INTERVAL == 0) {
cond_resched();
}
if (d_type != DT_DIR || namelen <= 0)
return FILLDIR_ACTOR_CONTINUE;
if (name[0] == '.' && (namelen == 1 || (namelen == 2 && name[1] == '.')))
@@ -374,7 +391,7 @@ basically no mask and flags for =< 4.10
list_add_tail(&uid_entry->list, scan_ctx->uid_list);
scan_ctx->pkg_count++;
pr_debug("Package: %s, UID: %u, User: %u\n", uid_entry->package, uid, scan_ctx->user_id);
pr_info("Package: %s, UID: %u, User: %u\n", uid_entry->package, uid, scan_ctx->user_id);
return FILLDIR_ACTOR_CONTINUE;
}
@@ -407,7 +424,8 @@ static int scan_secondary_users_apps(struct list_head *uid_list,
.user_id = work_buf->user_ids_buffer[i],
.pkg_count = 0,
.error_count = 0,
.work_buf = work_buf
.work_buf = work_buf,
.processed_count = 0
};
struct user_dir_ctx uctx = {
@@ -424,7 +442,9 @@ static int scan_secondary_users_apps(struct list_head *uid_list,
if (scan_ctx.pkg_count > 0 || scan_ctx.error_count > 0)
pr_info("User %u: %zu packages, %zu errors\n",
work_buf->user_ids_buffer[i], scan_ctx.pkg_count, scan_ctx.error_count);
}
cond_resched();
}
return ret;
}
@@ -490,6 +510,12 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
pr_err("Invalid context\n");
return FILLDIR_ACTOR_STOP;
}
my_ctx->processed_count++;
if (my_ctx->processed_count % SCHEDULE_INTERVAL == 0) {
cond_resched();
}
if (my_ctx->stop && *my_ctx->stop) {
pr_info("Stop searching\n");
return FILLDIR_ACTOR_STOP;
@@ -513,7 +539,7 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
if (d_type == DT_DIR && my_ctx->depth > 0 &&
(my_ctx->stop && !*my_ctx->stop)) {
struct data_path *data = kmalloc(sizeof(struct data_path), GFP_ATOMIC);
struct data_path *data = kmalloc(sizeof(struct data_path), GFP_KERNEL);
if (!data) {
pr_err("Failed to allocate memory for %s\n", work_buf->path_buffer);
@@ -551,7 +577,7 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
crown_manager(work_buf->path_buffer, my_ctx->private_data,
signature_index, work_buf);
struct apk_path_hash *apk_data = kmalloc(sizeof(struct apk_path_hash), GFP_ATOMIC);
struct apk_path_hash *apk_data = kmalloc(sizeof(struct apk_path_hash), GFP_KERNEL);
if (apk_data) {
apk_data->hash = hash;
apk_data->exists = true;
@@ -573,7 +599,7 @@ FILLDIR_RETURN_TYPE my_actor(struct dir_context *ctx, const char *name,
}
}
} else {
struct apk_path_hash *apk_data = kmalloc(sizeof(struct apk_path_hash), GFP_ATOMIC);
struct apk_path_hash *apk_data = kmalloc(sizeof(struct apk_path_hash), GFP_KERNEL);
if (apk_data) {
apk_data->hash = hash;
apk_data->exists = true;
@@ -625,7 +651,8 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
.depth = pos->depth,
.stop = &stop,
.found_dynamic_manager = false,
.work_buf = work_buf };
.work_buf = work_buf,
.processed_count = 0 };
struct file *file;
if (!stop) {
@@ -659,12 +686,16 @@ void search_manager(const char *path, int depth, struct list_head *uid_data)
if (ctx.found_dynamic_manager) {
found_dynamic_manager = true;
}
cond_resched();
}
skip_iterate:
list_del(&pos->list);
if (pos != &data)
kfree(pos);
}
cond_resched();
}
// clear apk_path_hash_list unconditionally