1
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
69
70
71
72
73
74
75
76
77
78
79
80
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
132
135
138
139
140
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
172
173
177
178
179
180
183
186
187
188
202
203
204
205
206
207
/* ... */
#include "sdkconfig.h"
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "esp_err.h"
#include "esp_ipc.h"
#include "esp_private/esp_ipc_isr.h"
#include "esp_attr.h"
#include "esp_cpu.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"13 includes
#define IPC_MAX_PRIORITY (configMAX_PRIORITIES - 1)
#if !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE)
#if CONFIG_COMPILER_OPTIMIZATION_NONE
#define IPC_STACK_SIZE (CONFIG_ESP_IPC_TASK_STACK_SIZE + 0x100)
#else
#define IPC_STACK_SIZE (CONFIG_ESP_IPC_TASK_STACK_SIZE)
#endif
static DRAM_ATTR StaticSemaphore_t s_ipc_mutex_buffer[CONFIG_FREERTOS_NUMBER_OF_CORES];
static DRAM_ATTR StaticSemaphore_t s_ipc_ack_buffer[CONFIG_FREERTOS_NUMBER_OF_CORES];
static TaskHandle_t s_ipc_task_handle[CONFIG_FREERTOS_NUMBER_OF_CORES];
static SemaphoreHandle_t s_ipc_mutex[CONFIG_FREERTOS_NUMBER_OF_CORES];
static SemaphoreHandle_t s_ipc_ack[CONFIG_FREERTOS_NUMBER_OF_CORES];
static volatile esp_ipc_func_t s_func[CONFIG_FREERTOS_NUMBER_OF_CORES] = { 0 };
static void * volatile s_func_arg[CONFIG_FREERTOS_NUMBER_OF_CORES];
typedef enum {
IPC_WAIT_NO = 0,
IPC_WAIT_FOR_START,
IPC_WAIT_FOR_END,
}{ ... } esp_ipc_wait_t;
static esp_ipc_wait_t volatile s_wait_for[portNUM_PROCESSORS];
static volatile esp_ipc_func_t s_no_block_func[portNUM_PROCESSORS] = { 0 };
static volatile bool s_no_block_func_and_arg_are_ready[portNUM_PROCESSORS] = { 0 };
static void * volatile s_no_block_func_arg[portNUM_PROCESSORS];
static void IRAM_ATTR ipc_task(void* arg)
{
const int cpuid = (int) arg;
assert(cpuid == xPortGetCoreID());
#ifdef CONFIG_ESP_IPC_ISR_ENABLE
esp_ipc_isr_init();
#endif
while (true) {
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
if (s_no_block_func_and_arg_are_ready[cpuid] && s_no_block_func[cpuid]) {
(*s_no_block_func[cpuid])(s_no_block_func_arg[cpuid]);
s_no_block_func_and_arg_are_ready[cpuid] = false;
s_no_block_func[cpuid] = NULL;
}{...}
#ifndef CONFIG_FREERTOS_UNICORE
if (s_func[cpuid]) {
esp_ipc_func_t func = s_func[cpuid];
void* func_arg = s_func_arg[cpuid];
esp_ipc_wait_t ipc_wait = s_wait_for[cpuid];
SemaphoreHandle_t ipc_ack = s_ipc_ack[cpuid];
s_func[cpuid] = NULL;
if (ipc_wait == IPC_WAIT_FOR_START) {
xSemaphoreGive(ipc_ack);
(*func)(func_arg);
}{...} else if (ipc_wait == IPC_WAIT_FOR_END) {
(*func)(func_arg);
xSemaphoreGive(ipc_ack);
}{...} else {
abort();
}{...}
}{...}
#endif/* ... */
}{...}
vTaskDelete(NULL);
}{ ... }
/* ... */
static void esp_ipc_init(void) __attribute__((constructor));
static void esp_ipc_init(void)
{
char task_name[] = "ipcX";
for (int i = 0; i < CONFIG_FREERTOS_NUMBER_OF_CORES; ++i) {
task_name[3] = i + (char)'0';
s_ipc_mutex[i] = xSemaphoreCreateMutexStatic(&s_ipc_mutex_buffer[i]);
s_ipc_ack[i] = xSemaphoreCreateBinaryStatic(&s_ipc_ack_buffer[i]);
BaseType_t res = xTaskCreatePinnedToCore(ipc_task, task_name, IPC_STACK_SIZE, (void*) i,
IPC_MAX_PRIORITY, &s_ipc_task_handle[i], i);
assert(res == pdTRUE);
(void)res;
}{...}
}{ ... }
static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, void* arg, esp_ipc_wait_t wait_for)
{
if (cpu_id >= CONFIG_FREERTOS_NUMBER_OF_CORES) {
return ESP_ERR_INVALID_ARG;
}{...}
if (s_ipc_task_handle[cpu_id] == NULL) {
return ESP_ERR_INVALID_STATE;
}{...}
if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
return ESP_ERR_INVALID_STATE;
}{...}
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
TaskHandle_t task_handler = xTaskGetCurrentTaskHandle();
UBaseType_t priority_of_current_task = uxTaskPriorityGet(task_handler);
UBaseType_t priority_of_running_ipc_task = uxTaskPriorityGet(s_ipc_task_handle[cpu_id]);
if (priority_of_running_ipc_task < priority_of_current_task) {
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
}{...}
xSemaphoreTake(s_ipc_mutex[cpu_id], portMAX_DELAY);
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);/* ... */
#else
xSemaphoreTake(s_ipc_mutex[0], portMAX_DELAY);
#endif
s_func_arg[cpu_id] = arg;
s_wait_for[cpu_id] = wait_for;
s_func[cpu_id] = func;
xTaskNotifyGive(s_ipc_task_handle[cpu_id]);
xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY);
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
xSemaphoreGive(s_ipc_mutex[cpu_id]);
#else
xSemaphoreGive(s_ipc_mutex[0]);
#endif
return ESP_OK;
}{ ... }
esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
{
return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_START);
}{ ... }
esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
{
return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_END);
}{ ... }
esp_err_t esp_ipc_call_nonblocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
{
if (cpu_id >= portNUM_PROCESSORS || s_ipc_task_handle[cpu_id] == NULL) {
return ESP_ERR_INVALID_ARG;
}{...}
if (cpu_id == xPortGetCoreID() && xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
return ESP_ERR_INVALID_STATE;
}{...}
if (esp_cpu_compare_and_set((volatile uint32_t *)&s_no_block_func[cpu_id], 0, (uint32_t)func)) {
s_no_block_func_arg[cpu_id] = arg;
s_no_block_func_and_arg_are_ready[cpu_id] = true;
if (xPortInIsrContext()) {
vTaskNotifyGiveFromISR(s_ipc_task_handle[cpu_id], NULL);
}{...} else {
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
vTaskPrioritySet(s_ipc_task_handle[cpu_id], IPC_MAX_PRIORITY);
#endif
xTaskNotifyGive(s_ipc_task_handle[cpu_id]);
}{...}
return ESP_OK;
}{...}
return ESP_FAIL;
}{ ... }
/* ... */#endif