1
6
7
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
41
42
43
44
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
67
68
69
70
71
72
77
78
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
113
114
115
116
125
126
129
130
131
132
135
136
137
140
143
151
152
153
154
155
159
160
164
165
169
170
174
175
178
179
180
183
184
185
186
202
203
204
208
209
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
240
241
242
243
244
245
246
247
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
279
280
281
282
287
288
293
294
298
299
303
304
305
309
310
316
317
323
324
330
331
337
338
343
344
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
368
369
370
371
372
373
374
375
378
379
380
381
382
383
384
385
386
387
388
389
394
395
396
397
398
399
400
401
402
403
404
405
406
407
/* ... */
#include <sys/lock.h>
#include <stdlib.h>
#include <sys/reent.h>
#include "esp_attr.h"
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
#include "freertos/task.h"
#include "freertos/portable.h"
#include "esp_rom_caps.h"9 includes
/* ... */
static portMUX_TYPE lock_init_spinlock = portMUX_INITIALIZER_UNLOCKED;
/* ... */
static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type)
{
portENTER_CRITICAL(&lock_init_spinlock);
if (*lock) {
/* ... */
}{...} else {
/* ... */
SemaphoreHandle_t new_sem = xQueueCreateMutex(mutex_type);
if (!new_sem) {
abort();
}{...}
*lock = (_lock_t)new_sem;
}{...}
portEXIT_CRITICAL(&lock_init_spinlock);
}{ ... }
void IRAM_ATTR _lock_init(_lock_t *lock)
{
*lock = 0;
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
}{ ... }
void IRAM_ATTR _lock_init_recursive(_lock_t *lock)
{
*lock = 0;
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}{ ... }
/* ... */
void IRAM_ATTR _lock_close(_lock_t *lock)
{
portENTER_CRITICAL(&lock_init_spinlock);
if (*lock) {
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
#if (INCLUDE_xSemaphoreGetMutexHolder == 1)
configASSERT(xSemaphoreGetMutexHolder(h) == NULL);
#endif
vSemaphoreDelete(h);
*lock = 0;
}{...}
portEXIT_CRITICAL(&lock_init_spinlock);
}{ ... }
void _lock_close_recursive(_lock_t *lock) __attribute__((alias("_lock_close")));
/* ... */
static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type)
{
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
if (!h) {
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
return 0;
}{...}
lock_init_generic(lock, mutex_type);
h = (SemaphoreHandle_t)(*lock);
configASSERT(h != NULL);
}{...}
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
return 0;
}{...}
BaseType_t success;
if (!xPortCanYield()) {
if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
abort();
}{...}
BaseType_t higher_task_woken = false;
success = xSemaphoreTakeFromISR(h, &higher_task_woken);
if (!success && delay > 0) {
abort();
}{...}
if (higher_task_woken) {
portYIELD_FROM_ISR();
}{...}
}{...} else {
if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
success = xSemaphoreTakeRecursive(h, delay);
}{...} else {
success = xSemaphoreTake(h, delay);
}{...}
}{...}
return (success == pdTRUE) ? 0 : -1;
}{ ... }
void IRAM_ATTR _lock_acquire(_lock_t *lock)
{
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX);
}{ ... }
void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock)
{
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}{ ... }
int IRAM_ATTR _lock_try_acquire(_lock_t *lock)
{
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX);
}{ ... }
int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
{
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}{ ... }
/* ... */
static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type)
{
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
return;
}{...}
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
assert(h);
if (!xPortCanYield()) {
if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
abort();
}{...}
BaseType_t higher_task_woken = false;
xSemaphoreGiveFromISR(h, &higher_task_woken);
if (higher_task_woken) {
portYIELD_FROM_ISR();
}{...}
}{...} else {
if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
xSemaphoreGiveRecursive(h);
}{...} else {
xSemaphoreGive(h);
}{...}
}{...}
}{ ... }
void IRAM_ATTR _lock_release(_lock_t *lock)
{
lock_release_generic(lock, queueQUEUE_TYPE_MUTEX);
}{ ... }
void IRAM_ATTR _lock_release_recursive(_lock_t *lock)
{
lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}{ ... }
/* ... */
/* ... */
_Static_assert(sizeof(struct __lock) >= sizeof(StaticSemaphore_t),
"Incorrect size of struct __lock");
_Static_assert(configSUPPORT_STATIC_ALLOCATION,
"FreeRTOS should be configured with static allocation support");
/* ... */
static StaticSemaphore_t s_common_mutex;
static StaticSemaphore_t s_common_recursive_mutex;
#if ESP_ROM_HAS_RETARGETABLE_LOCKING
/* ... */
#define ROM_NEEDS_MUTEX_OVERRIDE/* ... */
#endif
#ifdef ROM_NEEDS_MUTEX_OVERRIDE
#define ROM_MUTEX_MAGIC 0xbb10c433
#define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead) \
if (*(int*)_lock == ROM_MUTEX_MAGIC) { \
(_lock) = (_LOCK_T) (_lock_to_use_instead); \
}{...}
...#else/* ... */
#define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead)
#endif
void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock)
{
*lock = NULL;
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
}{ ... }
void IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock)
{
*lock = NULL;
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}{ ... }
void IRAM_ATTR __retarget_lock_close(_LOCK_T lock)
{
_lock_close(&lock);
}{ ... }
void IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock)
{
_lock_close_recursive(&lock);
}{ ... }
static void IRAM_ATTR check_lock_nonzero(_LOCK_T lock)
{
assert(lock != NULL && "Uninitialized lock used");
}{ ... }
void IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
_lock_acquire(&lock);
}{ ... }
void IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
_lock_acquire_recursive(&lock);
}{ ... }
int IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
return _lock_try_acquire(&lock);
}{ ... }
int IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
return _lock_try_acquire_recursive(&lock);
}{ ... }
void IRAM_ATTR __retarget_lock_release(_LOCK_T lock)
{
check_lock_nonzero(lock);
_lock_release(&lock);
}{ ... }
void IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock)
{
check_lock_nonzero(lock);
_lock_release_recursive(&lock);
}{ ... }
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sinit_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___malloc_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___env_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sfp_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___atexit_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___at_quick_exit_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___tz_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___dd_hash_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___arc4random_mutex;
void esp_newlib_locks_init(void)
{
/* ... */
SemaphoreHandle_t handle;
handle = xSemaphoreCreateMutexStatic(&s_common_mutex);
assert(handle == (SemaphoreHandle_t) &s_common_mutex);
handle = xSemaphoreCreateRecursiveMutexStatic(&s_common_recursive_mutex);
assert(handle == (SemaphoreHandle_t) &s_common_recursive_mutex);
(void) handle;
/* ... */
#ifdef CONFIG_IDF_TARGET_ESP32
extern _lock_t __sfp_lock;
__sfp_lock = (_lock_t) &s_common_recursive_mutex;
extern _lock_t __sinit_lock;
__sinit_lock = (_lock_t) &s_common_recursive_mutex;
extern _lock_t __env_lock_object;
__env_lock_object = (_lock_t) &s_common_recursive_mutex;
extern _lock_t __tz_lock_object;
__tz_lock_object = (_lock_t) &s_common_mutex;/* ... */
#elif defined(CONFIG_IDF_TARGET_ESP32S2)
extern _lock_t __sinit_recursive_mutex;
__sinit_recursive_mutex = (_lock_t) &s_common_recursive_mutex;
extern _lock_t __sfp_recursive_mutex;
__sfp_recursive_mutex = (_lock_t) &s_common_recursive_mutex;/* ... */
#elif ESP_ROM_HAS_RETARGETABLE_LOCKING
/* ... */
extern void esp_rom_newlib_init_common_mutexes(_LOCK_T, _LOCK_T);
int magic_val = ROM_MUTEX_MAGIC;
_LOCK_T magic_mutex = (_LOCK_T) &magic_val;
esp_rom_newlib_init_common_mutexes(magic_mutex, magic_mutex);/* ... */
#else
#error Unsupported target
#endif
}{ ... }