@@ -9,7 +9,8 @@ struct multicore_worker __percpu *
9
9
wg_packet_percpu_multicore_worker_alloc (work_func_t function , void * ptr )
10
10
{
11
11
int cpu ;
12
- struct multicore_worker __percpu * worker = alloc_percpu (struct multicore_worker );
12
+ struct multicore_worker __percpu * worker =
13
+ alloc_percpu (struct multicore_worker );
13
14
14
15
if (!worker )
15
16
return NULL ;
@@ -22,86 +23,33 @@ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
22
23
}
23
24
24
25
int wg_packet_queue_init (struct crypt_queue * queue , work_func_t function ,
25
- unsigned int len )
26
+ bool multicore , unsigned int len )
26
27
{
27
28
int ret ;
28
29
29
30
memset (queue , 0 , sizeof (* queue ));
30
31
ret = ptr_ring_init (& queue -> ring , len , GFP_KERNEL );
31
32
if (ret )
32
33
return ret ;
33
- queue -> worker = wg_packet_percpu_multicore_worker_alloc (function , queue );
34
- if (!queue -> worker ) {
35
- ptr_ring_cleanup (& queue -> ring , NULL );
36
- return - ENOMEM ;
34
+ if (function ) {
35
+ if (multicore ) {
36
+ queue -> worker = wg_packet_percpu_multicore_worker_alloc (
37
+ function , queue );
38
+ if (!queue -> worker ) {
39
+ ptr_ring_cleanup (& queue -> ring , NULL );
40
+ return - ENOMEM ;
41
+ }
42
+ } else {
43
+ INIT_WORK (& queue -> work , function );
44
+ }
37
45
}
38
46
return 0 ;
39
47
}
40
48
41
- void wg_packet_queue_free (struct crypt_queue * queue )
49
+ void wg_packet_queue_free (struct crypt_queue * queue , bool multicore )
42
50
{
43
- free_percpu (queue -> worker );
51
+ if (multicore )
52
+ free_percpu (queue -> worker );
44
53
WARN_ON (!__ptr_ring_empty (& queue -> ring ));
45
54
ptr_ring_cleanup (& queue -> ring , NULL );
46
55
}
47
-
48
- #define NEXT (skb ) ((skb)->prev)
49
- #define STUB (queue ) ((struct sk_buff *)&queue->empty)
50
-
51
- void wg_prev_queue_init (struct prev_queue * queue )
52
- {
53
- NEXT (STUB (queue )) = NULL ;
54
- queue -> head = queue -> tail = STUB (queue );
55
- queue -> peeked = NULL ;
56
- atomic_set (& queue -> count , 0 );
57
- BUILD_BUG_ON (
58
- offsetof(struct sk_buff , next ) != offsetof(struct prev_queue , empty .next ) -
59
- offsetof(struct prev_queue , empty ) ||
60
- offsetof(struct sk_buff , prev ) != offsetof(struct prev_queue , empty .prev ) -
61
- offsetof(struct prev_queue , empty ));
62
- }
63
-
64
- static void __wg_prev_queue_enqueue (struct prev_queue * queue , struct sk_buff * skb )
65
- {
66
- WRITE_ONCE (NEXT (skb ), NULL );
67
- WRITE_ONCE (NEXT (xchg_release (& queue -> head , skb )), skb );
68
- }
69
-
70
- bool wg_prev_queue_enqueue (struct prev_queue * queue , struct sk_buff * skb )
71
- {
72
- if (!atomic_add_unless (& queue -> count , 1 , MAX_QUEUED_PACKETS ))
73
- return false;
74
- __wg_prev_queue_enqueue (queue , skb );
75
- return true;
76
- }
77
-
78
- struct sk_buff * wg_prev_queue_dequeue (struct prev_queue * queue )
79
- {
80
- struct sk_buff * tail = queue -> tail , * next = smp_load_acquire (& NEXT (tail ));
81
-
82
- if (tail == STUB (queue )) {
83
- if (!next )
84
- return NULL ;
85
- queue -> tail = next ;
86
- tail = next ;
87
- next = smp_load_acquire (& NEXT (next ));
88
- }
89
- if (next ) {
90
- queue -> tail = next ;
91
- atomic_dec (& queue -> count );
92
- return tail ;
93
- }
94
- if (tail != READ_ONCE (queue -> head ))
95
- return NULL ;
96
- __wg_prev_queue_enqueue (queue , STUB (queue ));
97
- next = smp_load_acquire (& NEXT (tail ));
98
- if (next ) {
99
- queue -> tail = next ;
100
- atomic_dec (& queue -> count );
101
- return tail ;
102
- }
103
- return NULL ;
104
- }
105
-
106
- #undef NEXT
107
- #undef STUB
0 commit comments