Skip to content

Commit 16b9e93

Browse files
committed
wqueue: wqueue remove csection
reason: We decouple semcount from business logic by using an independent counting variable, which allows us to remove critical sections in many cases. Signed-off-by: hujun5 <[email protected]>
1 parent 904b6ff commit 16b9e93

File tree

8 files changed

+75
-63
lines changed

8 files changed

+75
-63
lines changed

arch/arm/src/sama5/sam_hsmci.c

-4
Original file line numberDiff line numberDiff line change
@@ -3213,8 +3213,6 @@ static void sam_callback(void *arg)
32133213
ret = work_cancel(LPWORK, &priv->cbwork);
32143214
if (ret < 0)
32153215
{
3216-
/* NOTE: Currently, work_cancel only returns success */
3217-
32183216
lcderr("ERROR: Failed to cancel work: %d\n", ret);
32193217
}
32203218

@@ -3223,8 +3221,6 @@ static void sam_callback(void *arg)
32233221
priv->cbarg, 0);
32243222
if (ret < 0)
32253223
{
3226-
/* NOTE: Currently, work_queue only returns success */
3227-
32283224
lcderr("ERROR: Failed to schedule work: %d\n", ret);
32293225
}
32303226
}

arch/arm/src/samv7/sam_hsmci.c

-4
Original file line numberDiff line numberDiff line change
@@ -3353,8 +3353,6 @@ static void sam_callback(void *arg)
33533353
ret = work_cancel(LPWORK, &priv->cbwork);
33543354
if (ret < 0)
33553355
{
3356-
/* NOTE: Currently, work_cancel only returns success */
3357-
33583356
mcerr("ERROR: Failed to cancel work: %d\n", ret);
33593357
}
33603358

@@ -3363,8 +3361,6 @@ static void sam_callback(void *arg)
33633361
priv->cbarg, 0);
33643362
if (ret < 0)
33653363
{
3366-
/* NOTE: Currently, work_queue only returns success */
3367-
33683364
mcerr("ERROR: Failed to schedule work: %d\n", ret);
33693365
}
33703366
}

fs/mount/fs_automount.c

-6
Original file line numberDiff line numberDiff line change
@@ -658,8 +658,6 @@ static void automount_timeout(wdparm_t arg)
658658
ret = work_queue(LPWORK, &priv->work, automount_worker, priv, 0);
659659
if (ret < 0)
660660
{
661-
/* NOTE: Currently, work_queue only returns success */
662-
663661
ferr("ERROR: Failed to schedule work: %d\n", ret);
664662
}
665663
}
@@ -771,8 +769,6 @@ static int automount_interrupt(FAR const struct automount_lower_s *lower,
771769
priv->lower->ddelay);
772770
if (ret < 0)
773771
{
774-
/* NOTE: Currently, work_queue only returns success */
775-
776772
ferr("ERROR: Failed to schedule work: %d\n", ret);
777773
}
778774
else
@@ -840,8 +836,6 @@ FAR void *automount_initialize(FAR const struct automount_lower_s *lower)
840836
priv->lower->ddelay);
841837
if (ret < 0)
842838
{
843-
/* NOTE: Currently, work_queue only returns success */
844-
845839
ferr("ERROR: Failed to schedule work: %d\n", ret);
846840
}
847841

sched/wqueue/kwork_cancel.c

+8-10
Original file line numberDiff line numberDiff line change
@@ -58,23 +58,20 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync,
5858
* new work is typically added to the work queue from interrupt handlers.
5959
*/
6060

61-
flags = enter_critical_section();
61+
flags = spin_lock_irqsave(&g_wqueue_lock);
6262
if (work->worker != NULL)
6363
{
6464
/* Remove the entry from the work queue and make sure that it is
6565
* marked as available (i.e., the worker field is nullified).
6666
*/
6767

68-
if (WDOG_ISACTIVE(&work->u.timer))
69-
{
70-
wd_cancel(&work->u.timer);
71-
}
72-
else
68+
work->worker = NULL;
69+
wd_cancel(&work->u.timer);
70+
if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
7371
{
7472
dq_rem((FAR dq_entry_t *)work, &wqueue->q);
7573
}
7674

77-
work->worker = NULL;
7875
ret = OK;
7976
}
8077
else if (!up_interrupt_context() && !sched_idletask() && sync)
@@ -86,14 +83,15 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync,
8683
if (wqueue->worker[wndx].work == work &&
8784
wqueue->worker[wndx].pid != nxsched_gettid())
8885
{
86+
wqueue->worker[wndx].wait_count--;
87+
spin_unlock_irqrestore(&g_wqueue_lock, flags);
8988
nxsem_wait_uninterruptible(&wqueue->worker[wndx].wait);
90-
ret = 1;
91-
break;
89+
return 1;
9290
}
9391
}
9492
}
9593

96-
leave_critical_section(flags);
94+
spin_unlock_irqrestore(&g_wqueue_lock, flags);
9795
return ret;
9896
}
9997

sched/wqueue/kwork_notifier.c

+25-20
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,8 @@ static dq_queue_t g_notifier_free;
8787

8888
static dq_queue_t g_notifier_pending;
8989

90+
static spinlock_t g_work_notifier_lock;
91+
9092
/****************************************************************************
9193
* Private Functions
9294
****************************************************************************/
@@ -166,17 +168,21 @@ static void work_notifier_worker(FAR void *arg)
166168

167169
/* Disable interrupts very briefly. */
168170

169-
flags = enter_critical_section();
171+
flags = spin_lock_irqsave(&g_work_notifier_lock);
170172

171173
/* Remove the notification from the pending list */
172174

173-
dq_rem(&notifier->entry, &g_notifier_pending);
175+
notifier = work_notifier_find(notifier->key);
176+
if (notifier != NULL)
177+
{
178+
dq_rem(&notifier->entry, &g_notifier_pending);
174179

175-
/* Put the notification to the free list */
180+
/* Put the notification to the free list */
176181

177-
dq_addlast(&notifier->entry, &g_notifier_free);
182+
dq_addlast(&notifier->entry, &g_notifier_free);
183+
}
178184

179-
leave_critical_section(flags);
185+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
180186
}
181187

182188
/****************************************************************************
@@ -213,14 +219,14 @@ int work_notifier_setup(FAR struct work_notifier_s *info)
213219

214220
/* Disable interrupts very briefly. */
215221

216-
flags = enter_critical_section();
222+
flags = spin_lock_irqsave(&g_work_notifier_lock);
217223

218224
/* Try to get the entry from the free list */
219225

220226
notifier = (FAR struct work_notifier_entry_s *)
221227
dq_remfirst(&g_notifier_free);
222228

223-
leave_critical_section(flags);
229+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
224230

225231
if (notifier == NULL)
226232
{
@@ -245,7 +251,7 @@ int work_notifier_setup(FAR struct work_notifier_s *info)
245251

246252
/* Disable interrupts very briefly. */
247253

248-
flags = enter_critical_section();
254+
flags = spin_lock_irqsave(&g_work_notifier_lock);
249255

250256
/* Generate a unique key for this notification */
251257

@@ -262,7 +268,7 @@ int work_notifier_setup(FAR struct work_notifier_s *info)
262268
dq_addlast(&notifier->entry, &g_notifier_pending);
263269
ret = notifier->key;
264270

265-
leave_critical_section(flags);
271+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
266272
}
267273

268274
return ret;
@@ -293,7 +299,7 @@ void work_notifier_teardown(int key)
293299

294300
/* Disable interrupts very briefly. */
295301

296-
flags = enter_critical_section();
302+
flags = spin_lock_irqsave(&g_work_notifier_lock);
297303

298304
/* Find the entry matching this key in the g_notifier_pending list. We
299305
* assume that there is only one.
@@ -304,19 +310,18 @@ void work_notifier_teardown(int key)
304310
{
305311
/* Cancel the work, this may be waiting */
306312

307-
if (work_cancel_sync(notifier->info.qid, &notifier->work) != 1)
308-
{
309-
/* Remove the notification from the pending list */
313+
work_cancel(notifier->info.qid, &notifier->work);
310314

311-
dq_rem(&notifier->entry, &g_notifier_pending);
315+
/* Remove the notification from the pending list */
312316

313-
/* Put the notification to the free list */
317+
dq_rem(&notifier->entry, &g_notifier_pending);
314318

315-
dq_addlast(&notifier->entry, &g_notifier_free);
316-
}
319+
/* Put the notification to the free list */
320+
321+
dq_addlast(&notifier->entry, &g_notifier_free);
317322
}
318323

319-
leave_critical_section(flags);
324+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
320325
}
321326

322327
/****************************************************************************
@@ -352,7 +357,7 @@ void work_notifier_signal(enum work_evtype_e evtype,
352357
* the notifications have been sent.
353358
*/
354359

355-
flags = enter_critical_section();
360+
flags = spin_lock_irqsave(&g_work_notifier_lock);
356361
sched_lock();
357362

358363
/* Process the notification at the head of the pending list until the
@@ -397,7 +402,7 @@ void work_notifier_signal(enum work_evtype_e evtype,
397402
}
398403

399404
sched_unlock();
400-
leave_critical_section(flags);
405+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
401406
}
402407

403408
#endif /* CONFIG_WQUEUE_NOTIFIER */

sched/wqueue/kwork_queue.c

+24-12
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,10 @@
4747
#define queue_work(wqueue, work) \
4848
do \
4949
{ \
50-
int sem_count; \
5150
dq_addlast((FAR dq_entry_t *)(work), &(wqueue)->q); \
52-
nxsem_get_value(&(wqueue)->sem, &sem_count); \
53-
if (sem_count < 0) /* There are threads waiting for sem. */ \
51+
if ((wqueue)->wait_count < 0) /* There are threads waiting for sem. */ \
5452
{ \
53+
(wqueue)->wait_count++; \
5554
nxsem_post(&(wqueue)->sem); \
5655
} \
5756
} \
@@ -68,24 +67,28 @@
6867
static void work_timer_expiry(wdparm_t arg)
6968
{
7069
FAR struct work_s *work = (FAR struct work_s *)arg;
71-
irqstate_t flags = enter_critical_section();
70+
irqstate_t flags = spin_lock_irqsave(&g_wqueue_lock);
7271

73-
queue_work(work->wq, work);
74-
leave_critical_section(flags);
72+
/* We have being canceled */
73+
74+
if (work->worker != NULL)
75+
{
76+
queue_work(work->wq, work);
77+
}
78+
79+
spin_unlock_irqrestore(&g_wqueue_lock, flags);
7580
}
7681

7782
static bool work_is_canceling(FAR struct kworker_s *kworkers, int nthreads,
7883
FAR struct work_s *work)
7984
{
80-
int semcount;
8185
int wndx;
8286

8387
for (wndx = 0; wndx < nthreads; wndx++)
8488
{
8589
if (kworkers[wndx].work == work)
8690
{
87-
nxsem_get_value(&kworkers[wndx].wait, &semcount);
88-
if (semcount < 0)
91+
if (kworkers[wndx].wait_count < 0)
8992
{
9093
return true;
9194
}
@@ -145,13 +148,22 @@ int work_queue_wq(FAR struct kwork_wqueue_s *wqueue,
145148
* task logic or from interrupt handling logic.
146149
*/
147150

148-
flags = enter_critical_section();
151+
flags = spin_lock_irqsave(&g_wqueue_lock);
149152

150153
/* Remove the entry from the timer and work queue. */
151154

152155
if (work->worker != NULL)
153156
{
154-
work_cancel_wq(wqueue, work);
157+
/* Remove the entry from the work queue and make sure that it is
158+
* marked as available (i.e., the worker field is nullified).
159+
*/
160+
161+
work->worker = NULL;
162+
wd_cancel(&work->u.timer);
163+
if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
164+
{
165+
dq_rem((FAR dq_entry_t *)work, &wqueue->q);
166+
}
155167
}
156168

157169
if (work_is_canceling(wqueue->worker, wqueue->nthreads, work))
@@ -177,7 +189,7 @@ int work_queue_wq(FAR struct kwork_wqueue_s *wqueue,
177189
}
178190

179191
out:
180-
leave_critical_section(flags);
192+
spin_unlock_irqrestore(&g_wqueue_lock, flags);
181193
return ret;
182194
}
183195

sched/wqueue/kwork_thread.c

+13-7
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,8 @@ struct lp_wqueue_s g_lpwork =
104104

105105
#endif /* CONFIG_SCHED_LPWORK */
106106

107+
spinlock_t g_wqueue_lock = SP_UNLOCKED;
108+
107109
/****************************************************************************
108110
* Private Functions
109111
****************************************************************************/
@@ -138,7 +140,6 @@ static int work_thread(int argc, FAR char *argv[])
138140
worker_t worker;
139141
irqstate_t flags;
140142
FAR void *arg;
141-
int semcount;
142143

143144
/* Get the handle from argv */
144145

@@ -147,7 +148,7 @@ static int work_thread(int argc, FAR char *argv[])
147148
kworker = (FAR struct kworker_s *)
148149
((uintptr_t)strtoul(argv[2], NULL, 16));
149150

150-
flags = enter_critical_section();
151+
flags = spin_lock_irqsave(&g_wqueue_lock);
151152

152153
/* Loop forever */
153154

@@ -189,19 +190,19 @@ static int work_thread(int argc, FAR char *argv[])
189190
* performed... we don't have any idea how long this will take!
190191
*/
191192

192-
leave_critical_section(flags);
193+
spin_unlock_irqrestore(&g_wqueue_lock, flags);
193194
CALL_WORKER(worker, arg);
194-
flags = enter_critical_section();
195+
flags = spin_lock_irqsave(&g_wqueue_lock);
195196

196197
/* Mark the thread un-busy */
197198

198199
kworker->work = NULL;
199200

200201
/* Check if someone is waiting, if so, wakeup it */
201202

202-
nxsem_get_value(&kworker->wait, &semcount);
203-
while (semcount++ < 0)
203+
while (kworker->wait_count < 0)
204204
{
205+
kworker->wait_count++;
205206
nxsem_post(&kworker->wait);
206207
}
207208
}
@@ -211,10 +212,13 @@ static int work_thread(int argc, FAR char *argv[])
211212
* posted.
212213
*/
213214

215+
wqueue->wait_count--;
216+
spin_unlock_irqrestore(&g_wqueue_lock, flags);
214217
nxsem_wait_uninterruptible(&wqueue->sem);
218+
flags = spin_lock_irqsave(&g_wqueue_lock);
215219
}
216220

217-
leave_critical_section(flags);
221+
spin_unlock_irqrestore(&g_wqueue_lock, flags);
218222

219223
nxsem_post(&wqueue->exsem);
220224
return OK;
@@ -276,6 +280,7 @@ static int work_thread_create(FAR const char *name, int priority,
276280
}
277281

278282
wqueue->worker[wndx].pid = pid;
283+
wqueue->worker[wndx].wait_count = 0;
279284
}
280285

281286
sched_unlock();
@@ -334,6 +339,7 @@ FAR struct kwork_wqueue_s *work_queue_create(FAR const char *name,
334339
nxsem_init(&wqueue->sem, 0, 0);
335340
nxsem_init(&wqueue->exsem, 0, 0);
336341
wqueue->nthreads = nthreads;
342+
wqueue->wait_count = 0;
337343

338344
/* Create the work queue thread pool */
339345

0 commit comments

Comments
 (0)