@@ -94,6 +94,7 @@ pub struct Global<L = NoOpLifecycle> {
94
94
lifecycle : L ,
95
95
container_semaphore : Arc < Semaphore > ,
96
96
process_semaphore : Arc < Semaphore > ,
97
+ container_request_semaphore : Arc < Semaphore > ,
97
98
start : u64 ,
98
99
id : AtomicU64 ,
99
100
}
@@ -136,6 +137,7 @@ where
136
137
pub fn with_lifecycle ( container_limit : usize , process_limit : usize , lifecycle : L ) -> Self {
137
138
let container_semaphore = Arc :: new ( Semaphore :: new ( container_limit) ) ;
138
139
let process_semaphore = Arc :: new ( Semaphore :: new ( process_limit) ) ;
140
+ let container_request_semaphore = Arc :: new ( Semaphore :: new ( 0 ) ) ;
139
141
140
142
let now = std:: time:: SystemTime :: now ( ) ;
141
143
let start = now
@@ -149,6 +151,7 @@ where
149
151
lifecycle,
150
152
container_semaphore,
151
153
process_semaphore,
154
+ container_request_semaphore,
152
155
start,
153
156
id,
154
157
}
@@ -163,13 +166,44 @@ where
163
166
let lifecycle = self . lifecycle . clone ( ) ;
164
167
let container_semaphore = self . container_semaphore . clone ( ) ;
165
168
let process_semaphore = self . process_semaphore . clone ( ) ;
169
+ let container_request_semaphore = self . container_request_semaphore . clone ( ) ;
166
170
let start = self . start ;
167
171
let id = self . id . fetch_add ( 1 , Ordering :: SeqCst ) ;
168
172
169
173
async move {
170
174
let guard = ContainerAcquireGuard :: start ( & lifecycle) ;
171
175
172
- let container_permit = container_semaphore. acquire_owned ( ) . await ;
176
+ // Attempt to acquire the container semaphore. If we don't
177
+ // immediately get it, notify the container request
178
+ // semaphore. Any idle-but-not-yet-exited connections
179
+ // should watch that semaphore to see if they should give
180
+ // up thier container to allow someone else in.
181
+ //
182
+ // There *is* a race here: a container might naturally
183
+ // exit after we attempt to acquire the first time. In
184
+ // that case, we'd spuriously notify the request semaphore
185
+ // and a container might exit earlier than it needed
186
+ // to. However, this should be a transient issue and only
187
+ // occur when we are already at the upper bounds of our
188
+ // limits. In those cases, freeing an extra container or
189
+ // two shouldn't be the worst thing.
190
+ let container_permit = {
191
+ let fallback = {
192
+ let container_semaphore = container_semaphore. clone ( ) ;
193
+ async {
194
+ container_request_semaphore. add_permits ( 1 ) ;
195
+ container_semaphore. acquire_owned ( ) . await
196
+ }
197
+ } ;
198
+
199
+ tokio:: select! {
200
+ biased;
201
+
202
+ permit = container_semaphore. acquire_owned( ) => permit,
203
+ permit = fallback => permit,
204
+ }
205
+ } ;
206
+
173
207
let container_permit = guard. complete ( container_permit) ?;
174
208
175
209
let token = TrackContainer {
@@ -183,6 +217,23 @@ where
183
217
}
184
218
. boxed ( )
185
219
}
220
+
221
+ fn container_requested ( & self ) -> BoxFuture < ' static , ( ) > {
222
+ let container_request_semaphore = self . container_request_semaphore . clone ( ) ;
223
+
224
+ async move {
225
+ let permit = container_request_semaphore
226
+ . acquire ( )
227
+ . await
228
+ . expect ( "The semaphore is never closed" ) ;
229
+
230
+ // We're now dealing with the request to return a
231
+ // container so we discard the permit to prevent anyone
232
+ // else from trying to handle it.
233
+ permit. forget ( ) ;
234
+ }
235
+ . boxed ( )
236
+ }
186
237
}
187
238
188
239
impl < L > fmt:: Display for TrackContainer < L >
0 commit comments