Skip to content

Commit 4f00b69

Browse files
committed
wqueue: wqueue remove csection
reason: We decouple semcount from business logic by using an independent counting variable, which allows us to remove critical sections in many cases. Signed-off-by: hujun5 <[email protected]>
1 parent 5f4a15b commit 4f00b69

File tree

8 files changed

+100
-67
lines changed

8 files changed

+100
-67
lines changed

arch/arm/src/sama5/sam_hsmci.c

-4
Original file line numberDiff line numberDiff line change
@@ -3215,8 +3215,6 @@ static void sam_callback(void *arg)
32153215
ret = work_cancel(LPWORK, &priv->cbwork);
32163216
if (ret < 0)
32173217
{
3218-
/* NOTE: Currently, work_cancel only returns success */
3219-
32203218
lcderr("ERROR: Failed to cancel work: %d\n", ret);
32213219
}
32223220

@@ -3225,8 +3223,6 @@ static void sam_callback(void *arg)
32253223
priv->cbarg, 0);
32263224
if (ret < 0)
32273225
{
3228-
/* NOTE: Currently, work_queue only returns success */
3229-
32303226
lcderr("ERROR: Failed to schedule work: %d\n", ret);
32313227
}
32323228
}

arch/arm/src/samv7/sam_hsmci.c

-4
Original file line numberDiff line numberDiff line change
@@ -3355,8 +3355,6 @@ static void sam_callback(void *arg)
33553355
ret = work_cancel(LPWORK, &priv->cbwork);
33563356
if (ret < 0)
33573357
{
3358-
/* NOTE: Currently, work_cancel only returns success */
3359-
33603358
mcerr("ERROR: Failed to cancel work: %d\n", ret);
33613359
}
33623360

@@ -3365,8 +3363,6 @@ static void sam_callback(void *arg)
33653363
priv->cbarg, 0);
33663364
if (ret < 0)
33673365
{
3368-
/* NOTE: Currently, work_queue only returns success */
3369-
33703366
mcerr("ERROR: Failed to schedule work: %d\n", ret);
33713367
}
33723368
}

fs/mount/fs_automount.c

-6
Original file line numberDiff line numberDiff line change
@@ -659,8 +659,6 @@ static void automount_timeout(wdparm_t arg)
659659
ret = work_queue(LPWORK, &priv->work, automount_worker, priv, 0);
660660
if (ret < 0)
661661
{
662-
/* NOTE: Currently, work_queue only returns success */
663-
664662
ferr("ERROR: Failed to schedule work: %d\n", ret);
665663
}
666664
}
@@ -772,8 +770,6 @@ static int automount_interrupt(FAR const struct automount_lower_s *lower,
772770
priv->lower->ddelay);
773771
if (ret < 0)
774772
{
775-
/* NOTE: Currently, work_queue only returns success */
776-
777773
ferr("ERROR: Failed to schedule work: %d\n", ret);
778774
}
779775
else
@@ -848,8 +844,6 @@ FAR void *automount_initialize(FAR const struct automount_lower_s *lower)
848844
priv->lower->ddelay);
849845
if (ret < 0)
850846
{
851-
/* NOTE: Currently, work_queue only returns success */
852-
853847
ferr("ERROR: Failed to schedule work: %d\n", ret);
854848
}
855849

sched/wqueue/kwork_cancel.c

+8-10
Original file line numberDiff line numberDiff line change
@@ -58,23 +58,20 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync,
5858
* new work is typically added to the work queue from interrupt handlers.
5959
*/
6060

61-
flags = enter_critical_section();
61+
flags = spin_lock_irqsave(&wqueue->lock);
6262
if (work->worker != NULL)
6363
{
6464
/* Remove the entry from the work queue and make sure that it is
6565
* marked as available (i.e., the worker field is nullified).
6666
*/
6767

68-
if (WDOG_ISACTIVE(&work->u.timer))
69-
{
70-
wd_cancel(&work->u.timer);
71-
}
72-
else
68+
work->worker = NULL;
69+
wd_cancel(&work->u.timer);
70+
if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
7371
{
7472
dq_rem((FAR dq_entry_t *)work, &wqueue->q);
7573
}
7674

77-
work->worker = NULL;
7875
ret = OK;
7976
}
8077
else if (!up_interrupt_context() && !sched_idletask() && sync)
@@ -86,14 +83,15 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync,
8683
if (wqueue->worker[wndx].work == work &&
8784
wqueue->worker[wndx].pid != nxsched_gettid())
8885
{
86+
wqueue->worker[wndx].wait_count++;
87+
spin_unlock_irqrestore(&wqueue->lock, flags);
8988
nxsem_wait_uninterruptible(&wqueue->worker[wndx].wait);
90-
ret = 1;
91-
break;
89+
return 1;
9290
}
9391
}
9492
}
9593

96-
leave_critical_section(flags);
94+
spin_unlock_irqrestore(&wqueue->lock, flags);
9795
return ret;
9896
}
9997

sched/wqueue/kwork_notifier.c

+36-24
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,8 @@ struct work_notifier_entry_s
7373
* Private Data
7474
****************************************************************************/
7575

76+
static spinlock_t g_work_notifier_lock;
77+
7678
/* This is a doubly linked list of free notifications. */
7779

7880
static dq_queue_t g_notifier_free;
@@ -160,23 +162,34 @@ static void work_notifier_worker(FAR void *arg)
160162
(FAR struct work_notifier_entry_s *)arg;
161163
irqstate_t flags;
162164

163-
/* Forward to the real worker */
164-
165-
notifier->info.worker(notifier->info.arg);
166-
167165
/* Disable interrupts very briefly. */
168166

169-
flags = enter_critical_section();
167+
flags = spin_lock_irqsave(&g_work_notifier_lock);
170168

171169
/* Remove the notification from the pending list */
172170

173-
dq_rem(&notifier->entry, &g_notifier_pending);
171+
notifier = work_notifier_find(notifier->key);
172+
if (notifier != NULL)
173+
{
174+
/* Forward to the real worker */
175+
176+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
177+
178+
notifier->info.worker(notifier->info.arg);
179+
180+
flags = spin_lock_irqsave(&g_work_notifier_lock);
181+
notifier = work_notifier_find(notifier->key);
182+
if (notifier != NULL)
183+
{
184+
dq_rem(&notifier->entry, &g_notifier_pending);
174185

175-
/* Put the notification to the free list */
186+
/* Put the notification to the free list */
176187

177-
dq_addlast(&notifier->entry, &g_notifier_free);
188+
dq_addlast(&notifier->entry, &g_notifier_free);
189+
}
190+
}
178191

179-
leave_critical_section(flags);
192+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
180193
}
181194

182195
/****************************************************************************
@@ -213,14 +226,14 @@ int work_notifier_setup(FAR struct work_notifier_s *info)
213226

214227
/* Disable interrupts very briefly. */
215228

216-
flags = enter_critical_section();
229+
flags = spin_lock_irqsave(&g_work_notifier_lock);
217230

218231
/* Try to get the entry from the free list */
219232

220233
notifier = (FAR struct work_notifier_entry_s *)
221234
dq_remfirst(&g_notifier_free);
222235

223-
leave_critical_section(flags);
236+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
224237

225238
if (notifier == NULL)
226239
{
@@ -245,7 +258,7 @@ int work_notifier_setup(FAR struct work_notifier_s *info)
245258

246259
/* Disable interrupts very briefly. */
247260

248-
flags = enter_critical_section();
261+
flags = spin_lock_irqsave(&g_work_notifier_lock);
249262

250263
/* Generate a unique key for this notification */
251264

@@ -262,7 +275,7 @@ int work_notifier_setup(FAR struct work_notifier_s *info)
262275
dq_addlast(&notifier->entry, &g_notifier_pending);
263276
ret = notifier->key;
264277

265-
leave_critical_section(flags);
278+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
266279
}
267280

268281
return ret;
@@ -293,7 +306,7 @@ void work_notifier_teardown(int key)
293306

294307
/* Disable interrupts very briefly. */
295308

296-
flags = enter_critical_section();
309+
flags = spin_lock_irqsave(&g_work_notifier_lock);
297310

298311
/* Find the entry matching this key in the g_notifier_pending list. We
299312
* assume that there is only one.
@@ -304,19 +317,18 @@ void work_notifier_teardown(int key)
304317
{
305318
/* Cancel the work, this may be waiting */
306319

307-
if (work_cancel_sync(notifier->info.qid, &notifier->work) != 1)
308-
{
309-
/* Remove the notification from the pending list */
320+
work_cancel(notifier->info.qid, &notifier->work);
310321

311-
dq_rem(&notifier->entry, &g_notifier_pending);
322+
/* Remove the notification from the pending list */
312323

313-
/* Put the notification to the free list */
324+
dq_rem(&notifier->entry, &g_notifier_pending);
314325

315-
dq_addlast(&notifier->entry, &g_notifier_free);
316-
}
326+
/* Put the notification to the free list */
327+
328+
dq_addlast(&notifier->entry, &g_notifier_free);
317329
}
318330

319-
leave_critical_section(flags);
331+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
320332
}
321333

322334
/****************************************************************************
@@ -352,7 +364,7 @@ void work_notifier_signal(enum work_evtype_e evtype,
352364
* the notifications have been sent.
353365
*/
354366

355-
flags = enter_critical_section();
367+
flags = spin_lock_irqsave(&g_work_notifier_lock);
356368
sched_lock();
357369

358370
/* Process the notification at the head of the pending list until the
@@ -397,7 +409,7 @@ void work_notifier_signal(enum work_evtype_e evtype,
397409
}
398410

399411
sched_unlock();
400-
leave_critical_section(flags);
412+
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
401413
}
402414

403415
#endif /* CONFIG_WQUEUE_NOTIFIER */

sched/wqueue/kwork_queue.c

+28-12
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,10 @@
4747
#define queue_work(wqueue, work) \
4848
do \
4949
{ \
50-
int sem_count; \
5150
dq_addlast((FAR dq_entry_t *)(work), &(wqueue)->q); \
52-
nxsem_get_value(&(wqueue)->sem, &sem_count); \
53-
if (sem_count < 0) /* There are threads waiting for sem. */ \
51+
if ((wqueue)->wait_count > 0) /* There are threads waiting for sem. */ \
5452
{ \
53+
(wqueue)->wait_count--; \
5554
nxsem_post(&(wqueue)->sem); \
5655
} \
5756
} \
@@ -68,24 +67,30 @@
6867
static void work_timer_expiry(wdparm_t arg)
6968
{
7069
FAR struct work_s *work = (FAR struct work_s *)arg;
71-
irqstate_t flags = enter_critical_section();
70+
irqstate_t flags = spin_lock_irqsave(&work->wq->lock);
71+
sched_lock();
7272

73-
queue_work(work->wq, work);
74-
leave_critical_section(flags);
73+
/* We have being canceled */
74+
75+
if (work->worker != NULL)
76+
{
77+
queue_work(work->wq, work);
78+
}
79+
80+
spin_unlock_irqrestore(&work->wq->lock, flags);
81+
sched_unlock();
7582
}
7683

7784
static bool work_is_canceling(FAR struct kworker_s *kworkers, int nthreads,
7885
FAR struct work_s *work)
7986
{
80-
int semcount;
8187
int wndx;
8288

8389
for (wndx = 0; wndx < nthreads; wndx++)
8490
{
8591
if (kworkers[wndx].work == work)
8692
{
87-
nxsem_get_value(&kworkers[wndx].wait, &semcount);
88-
if (semcount < 0)
93+
if (kworkers[wndx].wait_count > 0)
8994
{
9095
return true;
9196
}
@@ -145,13 +150,23 @@ int work_queue_wq(FAR struct kwork_wqueue_s *wqueue,
145150
* task logic or from interrupt handling logic.
146151
*/
147152

148-
flags = enter_critical_section();
153+
flags = spin_lock_irqsave(&wqueue->lock);
154+
sched_lock();
149155

150156
/* Remove the entry from the timer and work queue. */
151157

152158
if (work->worker != NULL)
153159
{
154-
work_cancel_wq(wqueue, work);
160+
/* Remove the entry from the work queue and make sure that it is
161+
* marked as available (i.e., the worker field is nullified).
162+
*/
163+
164+
work->worker = NULL;
165+
wd_cancel(&work->u.timer);
166+
if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
167+
{
168+
dq_rem((FAR dq_entry_t *)work, &wqueue->q);
169+
}
155170
}
156171

157172
if (work_is_canceling(wqueue->worker, wqueue->nthreads, work))
@@ -177,7 +192,8 @@ int work_queue_wq(FAR struct kwork_wqueue_s *wqueue,
177192
}
178193

179194
out:
180-
leave_critical_section(flags);
195+
spin_unlock_irqrestore(&wqueue->lock, flags);
196+
sched_unlock();
181197
return ret;
182198
}
183199

0 commit comments

Comments
 (0)