@@ -726,10 +726,12 @@ int tracepoint__sched__sched_process_fork(struct bpf_raw_tracepoint_args *ctx)
726
726
return 0 ;
727
727
}
728
728
729
- // number of iterations - value that the verifier was seen to cope with - the higher, the better
730
- #define MAX_NUM_MODULES 440
731
- #define HISTORY_SCAN_FAILURE 0
732
- #define HISTORY_SCAN_SUCCESSFUL 1
729
+ #define MAX_NUM_MODULES 440
730
+ #define MAX_MODULES_MAP_ENTRIES 2 * MAX_NUM_MODULES
731
+ #define MOD_TREE_LOOP_ITERATIONS 240
732
+ #define MOD_TREE_LOOP_DEPTH 14
733
+ #define HISTORY_SCAN_FAILURE 0
734
+ #define HISTORY_SCAN_SUCCESSFUL 1
733
735
734
736
enum
735
737
{
747
749
748
750
struct modules_map {
749
751
__uint (type , BPF_MAP_TYPE_HASH );
750
- __uint (max_entries , MAX_NUM_MODULES );
752
+ __uint (max_entries , MAX_MODULES_MAP_ENTRIES );
751
753
__type (key , u64 );
752
754
__type (value , kernel_module_t );
753
755
} modules_map SEC (".maps" );
@@ -763,6 +765,21 @@ struct new_module_map {
763
765
764
766
typedef struct new_module_map new_module_map_t ;
765
767
768
+ typedef struct module_context_args {
769
+ struct rb_node * curr ;
770
+ int iteration_num ;
771
+ int idx ;
772
+ } module_context_args_t ;
773
+
774
+ struct module_context_map {
775
+ __uint (type , BPF_MAP_TYPE_PERCPU_ARRAY );
776
+ __uint (max_entries , 1 );
777
+ __type (key , u32 );
778
+ __type (value , module_context_args_t );
779
+ } module_context_map SEC (".maps" );
780
+
781
+ typedef struct module_context_map module_context_map_t ;
782
+
766
783
// We only care for modules that got deleted or inserted between our scan and if
767
784
// we detected something suspicious. Since it's a very small time frame, it's
768
785
// not likely that a large amount of modules will be deleted. Instead of saving
@@ -786,6 +803,7 @@ u64 last_module_insert_time = 0;
786
803
bool hidden_old_mod_scan_done = false;
787
804
static const int HID_MOD_RACE_CONDITION = -1 ;
788
805
static const int HID_MOD_UNCOMPLETED_ITERATIONS = -2 ;
806
+ static const int HID_MOD_COMPLETED_ITERATIONS = 0 ;
789
807
static const int HID_MOD_MEM_ZEROED = -3 ;
790
808
static const int MOD_HIDDEN = 1 ;
791
809
static const int MOD_NOT_HIDDEN = 0 ;
@@ -841,7 +859,6 @@ statfunc int init_shown_modules()
841
859
if (& pos -> list == head ) {
842
860
return 0 ;
843
861
}
844
-
845
862
bpf_map_update_elem (& modules_map , & pos , & ker_mod , BPF_ANY );
846
863
}
847
864
@@ -933,15 +950,34 @@ statfunc struct latch_tree_node *__lt_from_rb(struct rb_node *node, int idx)
933
950
return container_of (node , struct latch_tree_node , node [idx ]);
934
951
}
935
952
936
- statfunc int walk_mod_tree (program_data_t * p , struct rb_node * root , int idx )
953
+ struct mod_tree_root {
954
+ struct latch_tree_root root ;
955
+ };
956
+
957
+ SEC ("uprobe/lkm_seeker_modtree_loop_tail" )
958
+ int lkm_seeker_modtree_loop (struct pt_regs * ctx )
937
959
{
960
+ program_data_t p = {};
961
+ if (!init_tailcall_program_data (& p , ctx ))
962
+ return -1 ;
963
+
938
964
struct latch_tree_node * ltn ;
939
965
struct module * mod ;
940
- struct rb_node * curr = root ;
941
966
u32 flags = MOD_TREE ;
942
967
968
+ int key = 0 ;
969
+ module_context_args_t * module_ctx_args = bpf_map_lookup_elem (& module_context_map , & key );
970
+ if (module_ctx_args == NULL )
971
+ return -1 ;
972
+
973
+ struct rb_node * curr = module_ctx_args -> curr ;
974
+ int idx = module_ctx_args -> idx ;
975
+ int iteration_num = module_ctx_args -> iteration_num ;
976
+
977
+ int loop_result = HID_MOD_UNCOMPLETED_ITERATIONS ;
978
+
943
979
#pragma unroll
944
- for (int i = 0 ; i < MAX_NUM_MODULES ; i ++ ) {
980
+ for (int i = 0 ; i < MOD_TREE_LOOP_ITERATIONS ; i ++ ) {
945
981
if (curr != NULL ) {
946
982
rb_node_t rb_nod = {.node = curr };
947
983
bpf_map_push_elem (& walk_mod_tree_queue , & rb_nod , BPF_EXIST );
@@ -950,17 +986,19 @@ statfunc int walk_mod_tree(program_data_t *p, struct rb_node *root, int idx)
950
986
} else {
951
987
rb_node_t rb_nod ;
952
988
if (bpf_map_pop_elem (& walk_mod_tree_queue , & rb_nod ) != 0 ) {
953
- return 0 ; // Finished iterating
989
+ loop_result = HID_MOD_COMPLETED_ITERATIONS ;
990
+ break ;
954
991
} else {
955
992
curr = rb_nod .node ;
956
993
ltn = __lt_from_rb (curr , idx );
957
994
mod = BPF_CORE_READ (container_of (ltn , struct mod_tree_node , node ), mod );
958
995
959
996
int ret = is_hidden ((u64 ) mod );
960
997
if (ret == MOD_HIDDEN ) {
961
- lkm_seeker_send_to_userspace (mod , & flags , p );
998
+ lkm_seeker_send_to_userspace (mod , & flags , & p );
962
999
} else if (ret == HID_MOD_RACE_CONDITION ) {
963
- return ret ;
1000
+ loop_result = HID_MOD_RACE_CONDITION ;
1001
+ break ;
964
1002
}
965
1003
966
1004
/* We have visited the node and its left subtree.
@@ -970,12 +1008,27 @@ statfunc int walk_mod_tree(program_data_t *p, struct rb_node *root, int idx)
970
1008
}
971
1009
}
972
1010
973
- return HID_MOD_UNCOMPLETED_ITERATIONS ;
974
- }
1011
+ iteration_num ++ ;
975
1012
976
- struct mod_tree_root {
977
- struct latch_tree_root root ;
978
- };
1013
+ if (loop_result == HID_MOD_COMPLETED_ITERATIONS ) {
1014
+ flags = HISTORY_SCAN_FINISHED ;
1015
+ lkm_seeker_send_to_userspace ((struct module * ) HISTORY_SCAN_SUCCESSFUL , & flags , & p );
1016
+ bpf_tail_call (ctx , & prog_array , TAIL_HIDDEN_KERNEL_MODULE_PROC );
1017
+ } else if (loop_result == HID_MOD_RACE_CONDITION || iteration_num == MOD_TREE_LOOP_DEPTH ) {
1018
+ flags = HISTORY_SCAN_FINISHED ;
1019
+ tracee_log (ctx , BPF_LOG_LVL_WARN , BPF_LOG_ID_HID_KER_MOD , loop_result ^ iteration_num );
1020
+ lkm_seeker_send_to_userspace ((struct module * ) HISTORY_SCAN_FAILURE , & flags , & p );
1021
+ bpf_tail_call (ctx , & prog_array , TAIL_HIDDEN_KERNEL_MODULE_PROC );
1022
+ }
1023
+
1024
+ // Update context args for the next recursive call
1025
+ module_ctx_args -> iteration_num = iteration_num ;
1026
+ module_ctx_args -> curr = curr ;
1027
+
1028
+ bpf_tail_call (ctx , & prog_array , TAIL_HIDDEN_KERNEL_MODULE_MODTREE_LOOP );
1029
+
1030
+ return -1 ;
1031
+ }
979
1032
980
1033
statfunc int find_modules_from_mod_tree (program_data_t * p )
981
1034
{
@@ -989,9 +1042,16 @@ statfunc int find_modules_from_mod_tree(program_data_t *p)
989
1042
seq = BPF_CORE_READ (m_tree , root .seq .seqcount .sequence ); // version >= v5.10
990
1043
}
991
1044
992
- struct rb_node * node = BPF_CORE_READ (m_tree , root .tree [seq & 1 ].rb_node );
1045
+ int idx = seq & 1 ;
1046
+ struct rb_node * root = BPF_CORE_READ (m_tree , root .tree [idx ].rb_node );
1047
+ module_context_args_t module_ctx_args = {.idx = idx , .iteration_num = 0 , .curr = root };
1048
+
1049
+ int key = 0 ;
1050
+ bpf_map_update_elem (& module_context_map , & key , & module_ctx_args , BPF_ANY );
993
1051
994
- return walk_mod_tree (p , node , seq & 1 );
1052
+ bpf_tail_call (p -> ctx , & prog_array , TAIL_HIDDEN_KERNEL_MODULE_MODTREE_LOOP );
1053
+
1054
+ return -1 ;
995
1055
}
996
1056
997
1057
static __always_inline u64 check_new_mods_only (program_data_t * p )
@@ -1221,19 +1281,9 @@ int lkm_seeker_mod_tree_tail(struct pt_regs *ctx)
1221
1281
1222
1282
// This method is efficient only when the kernel is compiled with
1223
1283
// CONFIG_MODULES_TREE_LOOKUP=y
1224
- int ret = find_modules_from_mod_tree (& p );
1225
- if (ret < 0 ) {
1226
- tracee_log (ctx , BPF_LOG_LVL_WARN , BPF_LOG_ID_HID_KER_MOD , ret );
1227
- lkm_seeker_send_to_userspace (
1228
- (struct module * ) HISTORY_SCAN_FAILURE , & flags , & p ); // Report failure of history scan
1229
- return -1 ;
1230
- }
1231
-
1232
- // Report to userspace that the history scan finished successfully
1233
- lkm_seeker_send_to_userspace ((struct module * ) HISTORY_SCAN_SUCCESSFUL , & flags , & p );
1284
+ find_modules_from_mod_tree (& p );
1234
1285
1235
1286
bpf_tail_call (ctx , & prog_array , TAIL_HIDDEN_KERNEL_MODULE_PROC );
1236
-
1237
1287
return -1 ;
1238
1288
}
1239
1289
0 commit comments