Select one of the symbols to view example projects that use it.
 
Outline
...
...
...
...
#define TX_SOURCE_CODE
#include "tx_api.h"
#include "tx_trace.h"
#include "tx_thread.h"
#include "tx_event_flags.h"
...
...
_tx_event_flags_set(TX_EVENT_FLAGS_GROUP *, ULONG, UINT)
Files
loading...
SourceVuSTM32 Libraries and Samplesthreadxcommon/src/tx_event_flags_set.c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
/**************************************************************************/ /* */ /* Copyright (c) Microsoft Corporation. All rights reserved. */ /* */ /* This software is licensed under the Microsoft Software License */ /* Terms for Microsoft Azure RTOS. Full text of the license can be */ /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */ /* and in the root directory of this software. */ /* */... /**************************************************************************/ ... /**************************************************************************/ /**************************************************************************/ /** */ /** ThreadX Component */ /** */ /** Event Flags */ /** */... /**************************************************************************/ /**************************************************************************/ #define TX_SOURCE_CODE /* Include necessary system files. */ #include "tx_api.h" #include "tx_trace.h" #include "tx_thread.h" #include "tx_event_flags.h" ... /**************************************************************************/ /* */ /* FUNCTION RELEASE */ /* */ /* _tx_event_flags_set PORTABLE C */ /* 6.1 */ /* AUTHOR */ /* */ /* William E. Lamie, Microsoft Corporation */ /* */ /* DESCRIPTION */ /* */ /* This function sets the specified flags in the event group based on */ /* the set option specified. All threads suspended on the group whose */ /* get request can now be satisfied are resumed. */ /* */ /* INPUT */ /* */ /* group_ptr Pointer to group control block */ /* flags_to_set Event flags to set */ /* set_option Specified either AND or OR */ /* operation on the event flags */ /* */ /* OUTPUT */ /* */ /* TX_SUCCESS Always returns success */ /* */ /* CALLS */ /* */ /* _tx_thread_system_preempt_check Check for preemption */ /* _tx_thread_system_resume Resume thread service */ /* _tx_thread_system_ni_resume Non-interruptable resume thread */ /* */ /* CALLED BY */ /* */ /* Application Code */ /* */ /* RELEASE HISTORY */ /* */ /* DATE NAME DESCRIPTION */ /* */ /* 05-19-2020 William E. Lamie Initial Version 6.0 */ /* 09-30-2020 Yuxin Zhou Modified comment(s), */ /* resulting in version 6.1 */ /* */... /**************************************************************************/ UINT _tx_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set, UINT set_option) { TX_INTERRUPT_SAVE_AREA TX_THREAD *thread_ptr; TX_THREAD *next_thread_ptr; TX_THREAD *next_thread; TX_THREAD *previous_thread; TX_THREAD *satisfied_list; TX_THREAD *last_satisfied; TX_THREAD *suspended_list; UINT suspended_count; ULONG current_event_flags; ULONG requested_flags; ULONG flags_satisfied; ULONG *suspend_info_ptr; UINT and_request; UINT get_option; UINT clear_request; UINT preempt_check; #ifndef TX_NOT_INTERRUPTABLE UINT interrupted_set_request; #endif #ifndef TX_DISABLE_NOTIFY_CALLBACKS VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_group_ptr); #endif /* Disable interrupts to remove the semaphore from the created list. */ TX_DISABLE #ifdef TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO /* Increment the total event flags set counter. */ _tx_event_flags_performance_set_count++; /* Increment the number of event flags sets on this semaphore. */ group_ptr -> tx_event_flags_group_performance_set_count++;/* ... */ #endif /* If trace is enabled, insert this event into the trace buffer. */ TX_TRACE_IN_LINE_INSERT(TX_TRACE_EVENT_FLAGS_SET, group_ptr, flags_to_set, set_option, group_ptr -> tx_event_flags_group_suspended_count, TX_TRACE_EVENT_FLAGS_EVENTS) /* Log this kernel call. */ TX_EL_EVENT_FLAGS_SET_INSERT /* Determine how to set this group's event flags. */ if ((set_option & TX_EVENT_FLAGS_AND_MASK) == TX_AND) { #ifndef TX_NOT_INTERRUPTABLE /* Set interrupted set request flag to false. */ interrupted_set_request = TX_FALSE; /* Determine if the suspension list is being processed by an interrupted set request. *//* ... */ if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS) { if (group_ptr -> tx_event_flags_group_suspension_list == TX_NULL) { /* Set the interrupted set request flag. */ interrupted_set_request = TX_TRUE; }if (group_ptr -> tx_event_flags_group_suspension_list == TX_NULL) { ... } }if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS) { ... } /* Was a set request interrupted? */ if (interrupted_set_request == TX_TRUE) { /* A previous set operation was interrupted, we need to defer the event clearing until the set operation is complete. *//* ... */ /* Remember the events to clear. */ group_ptr -> tx_event_flags_group_delayed_clear = group_ptr -> tx_event_flags_group_delayed_clear | ~flags_to_set; }if (interrupted_set_request == TX_TRUE) { ... } else { #endif /* Previous set operation was not interrupted, simply clear the specified flags by "ANDing" the flags into the current events of the group. *//* ... */ group_ptr -> tx_event_flags_group_current = group_ptr -> tx_event_flags_group_current & flags_to_set; #ifndef TX_NOT_INTERRUPTABLE }/* ... */ else { ... }#endif /* Restore interrupts. */ TX_RESTORE }if ((set_option & TX_EVENT_FLAGS_AND_MASK) == TX_AND) { ... } else { #ifndef TX_DISABLE_NOTIFY_CALLBACKS /* Pickup the notify callback routine for this event flag group. */ events_set_notify = group_ptr -> tx_event_flags_group_set_notify;/* ... */ #endif /* "OR" the flags into the current events of the group. */ group_ptr -> tx_event_flags_group_current = group_ptr -> tx_event_flags_group_current | flags_to_set; #ifndef TX_NOT_INTERRUPTABLE /* Determine if there are any delayed flags to clear. */ if (group_ptr -> tx_event_flags_group_delayed_clear != ((ULONG) 0)) { /* Yes, we need to neutralize the delayed clearing as well. */ group_ptr -> tx_event_flags_group_delayed_clear = group_ptr -> tx_event_flags_group_delayed_clear & ~flags_to_set; }if (group_ptr -> tx_event_flags_group_delayed_clear != ((ULONG) 0)) { ... } /* ... */#endif /* Clear the preempt check flag. */ preempt_check = TX_FALSE; /* Pickup the thread suspended count. */ suspended_count = group_ptr -> tx_event_flags_group_suspended_count; /* Determine if there are any threads suspended on the event flag group. */ if (group_ptr -> tx_event_flags_group_suspension_list != TX_NULL) { /* Determine if there is just a single thread waiting on the event flag group. *//* ... */ if (suspended_count == ((UINT) 1)) { /* Single thread waiting for event flags. Bypass the multiple thread logic. *//* ... */ /* Setup thread pointer. */ thread_ptr = group_ptr -> tx_event_flags_group_suspension_list; /* Pickup the current event flags. */ current_event_flags = group_ptr -> tx_event_flags_group_current; /* Pickup the suspend information. */ requested_flags = thread_ptr -> tx_thread_suspend_info; /* Pickup the suspend option. */ get_option = thread_ptr -> tx_thread_suspend_option; /* Isolate the AND selection. */ and_request = (get_option & TX_AND); /* Check for AND condition. All flags must be present to satisfy request. */ if (and_request == TX_AND) { /* AND request is present. */ /* Calculate the flags present. */ flags_satisfied = (current_event_flags & requested_flags); /* Determine if they satisfy the AND request. */ if (flags_satisfied != requested_flags) { /* No, not all the requested flags are present. Clear the flags present variable. */ flags_satisfied = ((ULONG) 0); }if (flags_satisfied != requested_flags) { ... } }if (and_request == TX_AND) { ... } else { /* OR request is present. Simply or the requested flags and the current flags. */ flags_satisfied = (current_event_flags & requested_flags); }else { ... } /* Determine if the request is satisfied. */ if (flags_satisfied != ((ULONG) 0)) { /* Yes, resume the thread and apply any event flag clearing. *//* ... */ /* Set the preempt check flag. */ preempt_check = TX_TRUE; /* Return the actual event flags that satisfied the request. */ suspend_info_ptr = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_additional_suspend_info); *suspend_info_ptr = current_event_flags; /* Pickup the clear bit. */ clear_request = (get_option & TX_EVENT_FLAGS_CLEAR_MASK); /* Determine whether or not clearing needs to take place. */ if (clear_request == TX_TRUE) { /* Yes, clear the flags that satisfied this request. */ group_ptr -> tx_event_flags_group_current = group_ptr -> tx_event_flags_group_current & (~requested_flags); }if (clear_request == TX_TRUE) { ... } /* Clear the suspension information in the event flag group. */ group_ptr -> tx_event_flags_group_suspension_list = TX_NULL; group_ptr -> tx_event_flags_group_suspended_count = TX_NO_SUSPENSIONS; /* Clear cleanup routine to avoid timeout. */ thread_ptr -> tx_thread_suspend_cleanup = TX_NULL; /* Put return status into the thread control block. */ thread_ptr -> tx_thread_suspend_status = TX_SUCCESS; #ifdef TX_NOT_INTERRUPTABLE /* Resume the thread! */ _tx_thread_system_ni_resume(thread_ptr);/* ... */ #else /* Temporarily disable preemption. */ _tx_thread_preempt_disable++; /* Restore interrupts. */ TX_RESTORE /* Resume thread. */ _tx_thread_system_resume(thread_ptr); /* Disable interrupts to remove the semaphore from the created list. */ TX_DISABLE/* ... */ #endif }if (flags_satisfied != ((ULONG) 0)) { ... } }if (suspended_count == ((UINT) 1)) { ... } else { /* Otherwise, the event flag requests of multiple threads must be examined. *//* ... */ /* Setup thread pointer, keep a local copy of the head pointer. */ suspended_list = group_ptr -> tx_event_flags_group_suspension_list; thread_ptr = suspended_list; /* Clear the suspended list head pointer to thwart manipulation of the list in ISR's while we are processing here. *//* ... */ group_ptr -> tx_event_flags_group_suspension_list = TX_NULL; /* Setup the satisfied thread pointers. */ satisfied_list = TX_NULL; last_satisfied = TX_NULL; /* Pickup the current event flags. */ current_event_flags = group_ptr -> tx_event_flags_group_current; /* Disable preemption while we process the suspended list. */ _tx_thread_preempt_disable++; /* Loop to examine all of the suspended threads. */ do { #ifndef TX_NOT_INTERRUPTABLE /* Restore interrupts temporarily. */ TX_RESTORE /* Disable interrupts again. */ TX_DISABLE/* ... */ #endif /* Determine if we need to reset the search. */ if (group_ptr -> tx_event_flags_group_reset_search != TX_FALSE) { /* Clear the reset search flag. */ group_ptr -> tx_event_flags_group_reset_search = TX_FALSE; /* Move the thread pointer to the beginning of the search list. */ thread_ptr = suspended_list; /* Reset the suspended count. */ suspended_count = group_ptr -> tx_event_flags_group_suspended_count; /* Update the current events with any new ones that might have been set in a nested set events call from an ISR. *//* ... */ current_event_flags = current_event_flags | group_ptr -> tx_event_flags_group_current; }if (group_ptr -> tx_event_flags_group_reset_search != TX_FALSE) { ... } /* Save next thread pointer. */ next_thread_ptr = thread_ptr -> tx_thread_suspended_next; /* Pickup the suspend information. */ requested_flags = thread_ptr -> tx_thread_suspend_info; /* Pickup this thread's suspension get option. */ get_option = thread_ptr -> tx_thread_suspend_option; /* Isolate the AND selection. */ and_request = (get_option & TX_AND); /* Check for AND condition. All flags must be present to satisfy request. */ if (and_request == TX_AND) { /* AND request is present. */ /* Calculate the flags present. */ flags_satisfied = (current_event_flags & requested_flags); /* Determine if they satisfy the AND request. */ if (flags_satisfied != requested_flags) { /* No, not all the requested flags are present. Clear the flags present variable. */ flags_satisfied = ((ULONG) 0); }if (flags_satisfied != requested_flags) { ... } }if (and_request == TX_AND) { ... } else { /* OR request is present. Simply or the requested flags and the current flags. */ flags_satisfied = (current_event_flags & requested_flags); }else { ... } /* Check to see if the thread had a timeout or wait abort during the event search processing. If so, just set the flags satisfied to ensure the processing here removes the thread from the suspension list. *//* ... */ if (thread_ptr -> tx_thread_state != TX_EVENT_FLAG) { /* Simply set the satisfied flags to 1 in order to remove the thread from the suspension list. */ flags_satisfied = ((ULONG) 1); }if (thread_ptr -> tx_thread_state != TX_EVENT_FLAG) { ... } /* Determine if the request is satisfied. */ if (flags_satisfied != ((ULONG) 0)) { /* Yes, this request can be handled now. */ /* Set the preempt check flag. */ preempt_check = TX_TRUE; /* Determine if the thread is still suspended on the event flag group. If not, a wait abort must have been done from an ISR. *//* ... */ if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG) { /* Return the actual event flags that satisfied the request. */ suspend_info_ptr = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_additional_suspend_info); *suspend_info_ptr = current_event_flags; /* Pickup the clear bit. */ clear_request = (get_option & TX_EVENT_FLAGS_CLEAR_MASK); /* Determine whether or not clearing needs to take place. */ if (clear_request == TX_TRUE) { /* Yes, clear the flags that satisfied this request. */ group_ptr -> tx_event_flags_group_current = group_ptr -> tx_event_flags_group_current & ~requested_flags; }if (clear_request == TX_TRUE) { ... } /* Prepare for resumption of the first thread. */ /* Clear cleanup routine to avoid timeout. */ thread_ptr -> tx_thread_suspend_cleanup = TX_NULL; /* Put return status into the thread control block. */ thread_ptr -> tx_thread_suspend_status = TX_SUCCESS; }if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG) { ... } /* We need to remove the thread from the suspension list and place it in the expired list. *//* ... */ /* See if this is the only suspended thread on the list. */ if (thread_ptr == thread_ptr -> tx_thread_suspended_next) { /* Yes, the only suspended thread. */ /* Update the head pointer. */ suspended_list = TX_NULL; }if (thread_ptr == thread_ptr -> tx_thread_suspended_next) { ... } else { /* At least one more thread is on the same expiration list. */ /* Update the links of the adjacent threads. */ next_thread = thread_ptr -> tx_thread_suspended_next; previous_thread = thread_ptr -> tx_thread_suspended_previous; next_thread -> tx_thread_suspended_previous = previous_thread; previous_thread -> tx_thread_suspended_next = next_thread; /* Update the list head pointer, if removing the head of the list. *//* ... */ if (suspended_list == thread_ptr) { /* Yes, head pointer needs to be updated. */ suspended_list = thread_ptr -> tx_thread_suspended_next; }if (suspended_list == thread_ptr) { ... } }else { ... } /* Decrement the suspension count. */ group_ptr -> tx_event_flags_group_suspended_count--; /* Place this thread on the expired list. */ if (satisfied_list == TX_NULL) { /* First thread on the satisfied list. */ satisfied_list = thread_ptr; last_satisfied = thread_ptr; /* Setup initial next pointer. */ thread_ptr -> tx_thread_suspended_next = TX_NULL; }if (satisfied_list == TX_NULL) { ... } else { /* Not the first thread on the satisfied list. */ /* Link it up at the end. */ last_satisfied -> tx_thread_suspended_next = thread_ptr; thread_ptr -> tx_thread_suspended_next = TX_NULL; last_satisfied = thread_ptr; }else { ... } }if (flags_satisfied != ((ULONG) 0)) { ... } /* Copy next thread pointer to working thread ptr. */ thread_ptr = next_thread_ptr; /* Decrement the suspension count. */ suspended_count--; ...} while (suspended_count != TX_NO_SUSPENSIONS); /* Setup the group's suspension list head again. */ group_ptr -> tx_event_flags_group_suspension_list = suspended_list; #ifndef TX_NOT_INTERRUPTABLE /* Determine if there is any delayed event clearing to perform. */ if (group_ptr -> tx_event_flags_group_delayed_clear != ((ULONG) 0)) { /* Perform the delayed event clearing. */ group_ptr -> tx_event_flags_group_current = group_ptr -> tx_event_flags_group_current & ~(group_ptr -> tx_event_flags_group_delayed_clear); /* Clear the delayed event flag clear value. */ group_ptr -> tx_event_flags_group_delayed_clear = ((ULONG) 0); }if (group_ptr -> tx_event_flags_group_delayed_clear != ((ULONG) 0)) { ... } /* ... */#endif /* Restore interrupts. */ TX_RESTORE /* Walk through the satisfied list, setup initial thread pointer. */ thread_ptr = satisfied_list; while(thread_ptr != TX_NULL) { /* Get next pointer first. */ next_thread_ptr = thread_ptr -> tx_thread_suspended_next; /* Disable interrupts. */ TX_DISABLE #ifdef TX_NOT_INTERRUPTABLE /* Resume the thread! */ _tx_thread_system_ni_resume(thread_ptr); /* Restore interrupts. */ TX_RESTORE/* ... */ #else /* Disable preemption again. */ _tx_thread_preempt_disable++; /* Restore interrupt posture. */ TX_RESTORE /* Resume the thread. */ _tx_thread_system_resume(thread_ptr);/* ... */ #endif /* Move next thread to current. */ thread_ptr = next_thread_ptr; }while (thread_ptr != TX_NULL) { ... } /* Disable interrupts. */ TX_DISABLE /* Release thread preemption disable. */ _tx_thread_preempt_disable--; }else { ... } }if (group_ptr -> tx_event_flags_group_suspension_list != TX_NULL) { ... } else { /* Determine if we need to set the reset search field. */ if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS) { /* We interrupted a search of an event flag group suspension list. Make sure we reset the search. *//* ... */ group_ptr -> tx_event_flags_group_reset_search = TX_TRUE; }if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS) { ... } }else { ... } /* Restore interrupts. */ TX_RESTORE #ifndef TX_DISABLE_NOTIFY_CALLBACKS /* Determine if a notify callback is required. */ if (events_set_notify != TX_NULL) { /* Call application event flags set notification. */ (events_set_notify)(group_ptr); }if (events_set_notify != TX_NULL) { ... } /* ... */#endif /* Determine if a check for preemption is necessary. */ if (preempt_check == TX_TRUE) { /* Yes, one or more threads were resumed, check for preemption. */ _tx_thread_system_preempt_check(); }if (preempt_check == TX_TRUE) { ... } }else { ... } /* Return completion status. */ return(TX_SUCCESS); }{ ... }
Details