1
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
102
103
104
105
106
107
108
109
110
111
112
113
114
115
131
132
133
134
135
136
137
138
147
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
191
192
203
204
205
206
207
213
217
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
237
238
239
240
241
242
243
244
245
246
247
248
251
252
253
254
255
256
257
266
267
268
271
272
273
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
299
300
301
302
303
304
305
306
307
308
309
310
313
316
317
318
319
323
324
325
326
327
328
329
330
331
332
333
334
338
339
340
341
342
343
344
345
346
347
348
349
350
351
354
355
356
357
358
359
360
361
362
365
366
367
370
371
372
373
378
379
380
383
384
385
386
387
388
389
390
391
392
393
395
396
397
398
399
400
404
405
412
413
414
415
416
417
418
419
420
421
422
423
424
425
428
429
430
431
432
433
434
435
436
437
438
439
440
441
444
445
446
449
450
451
454
455
456
457
458
459
460
461
462
468
469
475
476
479
480
481
482
483
484
492
495
496
497
502
503
513
514
515
516
520
524
525
526
527
528
529
530
531
537
538
542
543
547
548
552
553
560
561
564
565
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
589
590
591
592
593
594
595
596
600
601
602
603
604
607
608
609
610
611
612
613
614
615
616
619
620
621
622
623
626
627
628
629
630
631
632
633
634
637
640
641
642
643
644
645
646
647
648
649
652
653
656
657
658
659
660
661
662
663
664
665
666
667
685
686
689
690
691
692
695
697
698
702
703
707
708
709
722
723
724
725
726
727
728
729
730
731
736
737
738
739
740
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
768
769
770
771
772
773
774
775
776
777
795
796
797
798
799
800
803
804
805
806
807
808
809
810
811
812
813
814
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
837
838
839
842
843
/* ... */
/* ... */
#include <string.h>
#include "soc/soc_memory_layout.h"
#include "soc/gpio_periph.h"
#include "soc/soc_caps.h"
#include "soc/sdio_slave_periph.h"
#include "esp_cpu.h"
#include "esp_intr_alloc.h"
#include "esp_log.h"
#include "hal/sdio_slave_hal.h"
#include "hal/gpio_hal.h"
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
#include "esp_private/periph_ctrl.h"13 includes
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP
#include "esp_private/sleep_retention.h"
#endif
#include "driver/gpio.h"
#include "driver/sdio_slave.h"
#define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\
SDIO_SLAVE_LOGE("%s", str);\
return ret_val;\
}{...} }{...}while (0)...
static const char TAG[] = "sdio_slave";
#define SDIO_SLAVE_LOGE(s, ...) ESP_LOGE(TAG, "%s(%d): "s, __FUNCTION__,__LINE__,##__VA_ARGS__)
#define SDIO_SLAVE_LOGW(s, ...) ESP_LOGW(TAG, "%s: "s, __FUNCTION__,##__VA_ARGS__)
#if !SOC_RCC_IS_INDEPENDENT
#define SDIO_SLAVE_RCC_ATOMIC() PERIPH_RCC_ATOMIC()
#else
#define SDIO_SLAVE_RCC_ATOMIC()
#endif
typedef struct recv_desc_s {
union {
struct {
sdio_slave_hal_recv_desc_t hal_desc;
uint32_t not_receiving;
}{ ... };
struct {
uint32_t _reserved0;
uint32_t _reserved1;
TAILQ_ENTRY(recv_desc_s) tail_entry;
}{ ... };
}{ ... };
}{ ... } recv_desc_t;
typedef TAILQ_HEAD(recv_tailq_head_s, recv_desc_s) recv_tailq_t;
typedef struct {
sdio_slave_config_t config;
sdio_slave_context_t *hal;
intr_handle_t intr_handle;
union {
SemaphoreHandle_t events[9];
struct {
SemaphoreHandle_t _events[8];
SemaphoreHandle_t recv_event;
}{ ... };
}{ ... };
portMUX_TYPE reg_spinlock;------- events
SemaphoreHandle_t remain_cnt;
portMUX_TYPE write_spinlock;
QueueHandle_t ret_queue;------- sending
recv_tailq_t recv_reg_list;
portMUX_TYPE recv_spinlock;
}{ ... } sdio_context_t;
#define CONTEXT_INIT_VAL { \
.intr_handle = NULL, \
.hal = NULL, \
\
.events = {}, \
.reg_spinlock = portMUX_INITIALIZER_UNLOCKED, \
\
.ret_queue = NULL, \
.write_spinlock = portMUX_INITIALIZER_UNLOCKED, \
\
.recv_reg_list = TAILQ_HEAD_INITIALIZER(context.recv_reg_list), \
.recv_spinlock = portMUX_INITIALIZER_UNLOCKED, \
}{...}
static sdio_context_t context = CONTEXT_INIT_VAL;
static void sdio_intr(void *);
static void sdio_intr_host(void *);
static void sdio_intr_send(void *);
static void sdio_intr_recv(void *);
static esp_err_t send_flush_data(void);
static esp_err_t recv_flush_data(void);
static inline void critical_enter_recv(void);
static inline void critical_exit_recv(void);
static void deinit_context(void);
static inline void show_ll(sdio_slave_ll_desc_t *item)
{
ESP_EARLY_LOGI(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner);
ESP_EARLY_LOGI(TAG, " buf: %p, stqe_next: %p", item->buf, item->qe.stqe_next);
}{ ... }
static void __attribute((unused)) dump_ll(sdio_slave_ll_desc_t *queue)
{
int cnt = 0;
sdio_slave_ll_desc_t *item = queue;
while (item != NULL) {
cnt++;
show_ll(item);
item = STAILQ_NEXT(item, qe);
}{...}
ESP_EARLY_LOGI(TAG, "total: %d", cnt);
}{ ... }
static inline void deinit_context(void)
{
context.config = (sdio_slave_config_t) {};
for (int i = 0; i < 9; i++) {
if (context.events[i] != NULL) {
vSemaphoreDelete(context.events[i]);
context.events[i] = NULL;
}{...}
}{...}
if (context.ret_queue != NULL) {
vQueueDelete(context.ret_queue);
context.ret_queue = NULL;
}{...}
if (context.remain_cnt != NULL) {
vSemaphoreDelete(context.remain_cnt);
}{...}
free(context.hal->send_desc_queue.data);
context.hal->send_desc_queue.data = NULL;
free(context.hal);
context.hal = NULL;
}{ ... }
static esp_err_t init_context(const sdio_slave_config_t *config)
{
SDIO_SLAVE_CHECK(*(uint32_t *)&context.config == 0, "sdio slave already initialized", ESP_ERR_INVALID_STATE);
context = (sdio_context_t)CONTEXT_INIT_VAL;
context.config = *config;
context.hal = (sdio_slave_context_t *)heap_caps_calloc(sizeof(sdio_slave_context_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
if (context.hal == NULL) {
goto no_mem;
}{...}
context.hal->sending_mode = config->sending_mode;
context.hal->timing = config->timing;
context.hal->no_highspeed = (config->flags & SDIO_SLAVE_FLAG_DEFAULT_SPEED) == SDIO_SLAVE_FLAG_DEFAULT_SPEED;
context.hal->send_queue_size = config->send_queue_size;
context.hal->recv_buffer_size = config->recv_buffer_size;
sdio_ringbuf_t *buf = &(context.hal->send_desc_queue);
buf->size = SDIO_SLAVE_SEND_DESC_SIZE * (config->send_queue_size + 1);
buf->data = (uint8_t *)heap_caps_malloc(buf->size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT | MALLOC_CAP_DMA);
if (buf->data == NULL) {
goto no_mem;
}{...}
sdio_slave_hal_init(context.hal);
context.recv_event = xSemaphoreCreateCounting(UINT32_MAX, 0);
for (int i = 0; i < 9; i++) {
if (i < 8) {
context.events[i] = xSemaphoreCreateBinary();
}{...}
if (context.events[i] == NULL) {
SDIO_SLAVE_LOGE("event initialize failed");
goto no_mem;
}{...}
}{...}
context.remain_cnt = xSemaphoreCreateCounting(context.config.send_queue_size, context.config.send_queue_size);
if (context.remain_cnt == NULL) {
goto no_mem;
}{...}
context.ret_queue = xQueueCreate(config->send_queue_size, sizeof(void *));
if (context.ret_queue == NULL) {
goto no_mem;
}{...}
return ESP_OK;
no_mem:
deinit_context();
return ESP_ERR_NO_MEM;
}{ ... }
static void configure_pin(int pin, uint32_t func, bool pullup)
{
const int sdmmc_func = func;
const int drive_strength = 3;
assert(pin != -1);
uint32_t reg = GPIO_PIN_MUX_REG[pin];
assert(reg != UINT32_MAX);
PIN_INPUT_ENABLE(reg);
gpio_hal_iomux_func_sel(reg, sdmmc_func);
PIN_SET_DRV(reg, drive_strength);
gpio_pulldown_dis(pin);
if (pullup) {
gpio_pullup_en(pin);
}{...}
}{ ... }
static inline esp_err_t sdio_slave_hw_init(sdio_slave_config_t *config)
{
const sdio_slave_slot_info_t *slot = &sdio_slave_slot_info[0];
bool pullup = config->flags & SDIO_SLAVE_FLAG_INTERNAL_PULLUP;
configure_pin(slot->clk_gpio, slot->func, false);
configure_pin(slot->cmd_gpio, slot->func, pullup);
configure_pin(slot->d0_gpio, slot->func, pullup);
if ((config->flags & SDIO_SLAVE_FLAG_HOST_INTR_DISABLED) == 0) {
configure_pin(slot->d1_gpio, slot->func, pullup);
}{...}
if ((config->flags & SDIO_SLAVE_FLAG_DAT2_DISABLED) == 0) {
configure_pin(slot->d2_gpio, slot->func, pullup);
}{...}
configure_pin(slot->d3_gpio, slot->func, pullup);
SDIO_SLAVE_RCC_ATOMIC() {
sdio_slave_ll_enable_bus_clock(true);
sdio_slave_ll_reset_register();
}{...}
sdio_slave_hal_hw_init(context.hal);
return ESP_OK;
}{ ... }
static void recover_pin(int pin, int sdio_func)
{
uint32_t reg = GPIO_PIN_MUX_REG[pin];
assert(reg != UINT32_MAX);
int func = REG_GET_FIELD(reg, MCU_SEL);
if (func == sdio_func) {
gpio_set_direction(pin, GPIO_MODE_INPUT);
gpio_hal_iomux_func_sel(reg, PIN_FUNC_GPIO);
}{...}
}{ ... }
static void sdio_slave_hw_deinit(void)
{
const sdio_slave_slot_info_t *slot = &sdio_slave_slot_info[0];
recover_pin(slot->clk_gpio, slot->func);
recover_pin(slot->cmd_gpio, slot->func);
recover_pin(slot->d0_gpio, slot->func);
recover_pin(slot->d1_gpio, slot->func);
recover_pin(slot->d2_gpio, slot->func);
recover_pin(slot->d3_gpio, slot->func);
SDIO_SLAVE_RCC_ATOMIC() {
sdio_slave_ll_enable_bus_clock(false);
}{...}
}{ ... }
esp_err_t sdio_slave_initialize(sdio_slave_config_t *config)
{
esp_err_t r;
intr_handle_t intr_handle = NULL;
const int flags = 0;
r = esp_intr_alloc(ETS_SLC0_INTR_SOURCE, flags, sdio_intr, NULL, &intr_handle);
if (r != ESP_OK) {
return r;
}{...}
r = init_context(config);
if (r != ESP_OK) {
return r;
}{...}
context.intr_handle = intr_handle;
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP
r = sleep_retention_power_lock_acquire();
if (r != ESP_OK) {
return r;
}{...}
#endif/* ... */
r = sdio_slave_hw_init(config);
if (r != ESP_OK) {
return r;
}{...}
sdio_slave_reset();
return ESP_OK;
}{ ... }
void sdio_slave_deinit(void)
{
sdio_slave_hw_deinit();
#if CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP
esp_err_t r = sleep_retention_power_lock_release();
assert(r == ESP_OK);/* ... */
#endif
recv_desc_t *temp_desc;
recv_desc_t *desc;
TAILQ_FOREACH_SAFE(desc, &context.recv_reg_list, tail_entry, temp_desc) {
TAILQ_REMOVE(&context.recv_reg_list, desc, tail_entry);
free(desc);
}{...}
while (1) {
desc = (recv_desc_t *)sdio_slave_hal_recv_unload_desc(context.hal);
if (desc == NULL) {
break;
}{...}
free(desc);
}{...}
esp_err_t ret = esp_intr_free(context.intr_handle);
assert(ret == ESP_OK);
(void)ret;
context.intr_handle = NULL;
deinit_context();
}{ ... }
esp_err_t sdio_slave_start(void)
{
esp_err_t ret;
sdio_slave_hostint_t intr = (sdio_slave_hostint_t)UINT32_MAX;
sdio_slave_hal_hostint_clear(context.hal, &intr);
ret = sdio_slave_hal_send_start(context.hal);
if (ret != ESP_OK) {
return ret;
}{...}
critical_enter_recv();
sdio_slave_hal_recv_start(context.hal);
critical_exit_recv();
sdio_slave_hal_set_ioready(context.hal, true);
return ESP_OK;
}{ ... }
esp_err_t sdio_slave_reset(void)
{
esp_err_t err;
err = send_flush_data();
if (err != ESP_OK) {
return err;
}{...}
err = sdio_slave_hal_send_reset_counter(context.hal);
if (err != ESP_OK) {
return err;
}{...}
err = recv_flush_data();
if (err != ESP_OK) {
return err;
}{...}
critical_enter_recv();
sdio_slave_hal_recv_reset_counter(context.hal);
critical_exit_recv();
err = ESP_OK;
return err;
}{ ... }
void sdio_slave_stop(void)
{
sdio_slave_hal_set_ioready(context.hal, false);
sdio_slave_hal_send_stop(context.hal);
sdio_slave_hal_recv_stop(context.hal);
}{ ... }
static void sdio_intr(void *arg)
{
sdio_intr_send(arg);
sdio_intr_recv(arg);
sdio_intr_host(arg);
}{ ... }
/* ... */
static void sdio_intr_host(void *arg)
{
sdio_slave_ll_slvint_t int_val;
sdio_slave_hal_slvint_fetch_clear(context.hal, &int_val);
BaseType_t yield = pdFALSE;
for (int i = 0; i < 8; i++) {
if (BIT(i) & int_val) {
if (context.config.event_cb != NULL) {
(*context.config.event_cb)(i);
}{...}
xSemaphoreGiveFromISR(context.events[i], &yield);
}{...}
}{...}
if (yield) {
portYIELD_FROM_ISR();
}{...}
}{ ... }
esp_err_t sdio_slave_wait_int(int pos, TickType_t wait)
{
SDIO_SLAVE_CHECK(pos >= 0 && pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG);
return xSemaphoreTake(context.events[pos], wait);
}{ ... }
uint8_t sdio_slave_read_reg(int pos)
{
if (pos >= 28 && pos <= 31) {
SDIO_SLAVE_LOGW("%s: interrupt reg, for reference", __FUNCTION__);
}{...}
if (pos < 0 || pos >= 64) {
SDIO_SLAVE_LOGE("read register address wrong");
}{...}
return sdio_slave_hal_host_get_reg(context.hal, pos);
}{ ... }
esp_err_t sdio_slave_write_reg(int pos, uint8_t reg)
{
if (pos >= 28 && pos <= 31) {
SDIO_SLAVE_LOGE("interrupt reg, please use sdio_slave_clear_int");
return ESP_ERR_INVALID_ARG;
}{...}
if (pos < 0 || pos >= 64) {
SDIO_SLAVE_LOGE("write register address wrong");
return ESP_ERR_INVALID_ARG;
}{...}
portENTER_CRITICAL(&context.reg_spinlock);
sdio_slave_hal_host_set_reg(context.hal, pos, reg);
portEXIT_CRITICAL(&context.reg_spinlock);
return ESP_OK;
}{ ... }
sdio_slave_hostint_t sdio_slave_get_host_intena(void)
{
sdio_slave_hostint_t host_int;
sdio_slave_hal_hostint_get_ena(context.hal, &host_int);
return host_int;
}{ ... }
void sdio_slave_set_host_intena(sdio_slave_hostint_t mask)
{
sdio_slave_hal_hostint_set_ena(context.hal, &mask);
}{ ... }
void sdio_slave_clear_host_int(sdio_slave_hostint_t mask)
{
sdio_slave_hal_hostint_clear(context.hal, &mask);
}{ ... }
static inline sdio_slave_hostint_t get_hostint_by_pos(int pos)
{
return (sdio_slave_hostint_t)BIT(pos);
}{ ... }
esp_err_t sdio_slave_send_host_int(uint8_t pos)
{
SDIO_SLAVE_CHECK(pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG);
sdio_slave_hostint_t intr = get_hostint_by_pos(pos);
sdio_slave_hal_hostint_send(context.hal, &intr);
return ESP_OK;
}{ ... }
/* ... */
/* ... */
static void sdio_intr_send(void *arg)
{
ESP_EARLY_LOGV(TAG, "intr_send");
BaseType_t yield = pdFALSE;
sdio_slave_hal_send_handle_isr_invoke(context.hal);
uint32_t returned_cnt;
if (sdio_slave_hal_send_eof_happened(context.hal)) {
BaseType_t ret __attribute__((unused));
esp_err_t err;
while (1) {
void *finished_arg;
err = sdio_slave_hal_send_get_next_finished_arg(context.hal, &finished_arg, &returned_cnt);
if (err != ESP_OK) {
break;
}{...}
assert(returned_cnt == 0);
ESP_EARLY_LOGV(TAG, "end: %p", finished_arg);
ret = xQueueSendFromISR(context.ret_queue, &finished_arg, &yield);
assert(ret == pdTRUE);
}{...}
for (size_t i = 0; i < returned_cnt; i++) {
ret = xSemaphoreGiveFromISR(context.remain_cnt, &yield);
assert(ret == pdTRUE);
}{...}
}{...}
sdio_slave_hal_send_new_packet_if_exist(context.hal);
if (yield) {
portYIELD_FROM_ISR();
}{...}
}{ ... }
esp_err_t sdio_slave_send_queue(uint8_t *addr, size_t len, void *arg, TickType_t wait)
{
SDIO_SLAVE_CHECK(len > 0, "len <= 0", ESP_ERR_INVALID_ARG);
SDIO_SLAVE_CHECK(esp_ptr_dma_capable(addr) && (uint32_t)addr % 4 == 0, "buffer to send should be DMA capable and 32-bit aligned",
ESP_ERR_INVALID_ARG);
BaseType_t cnt_ret = xSemaphoreTake(context.remain_cnt, wait);
if (cnt_ret != pdTRUE) {
return ESP_ERR_TIMEOUT;
}{...}
portENTER_CRITICAL(&context.write_spinlock);
esp_err_t ret = sdio_slave_hal_send_queue(context.hal, addr, len, arg);
portEXIT_CRITICAL(&context.write_spinlock);
if (ret != ESP_OK) {
return ret;
}{...}
return ESP_OK;
}{ ... }
esp_err_t sdio_slave_send_get_finished(void **out_arg, TickType_t wait)
{
void *arg = NULL;
BaseType_t err = xQueueReceive(context.ret_queue, &arg, wait);
if (out_arg) {
*out_arg = arg;
}{...}
if (err != pdTRUE) {
return ESP_ERR_TIMEOUT;
}{...}
return ESP_OK;
}{ ... }
esp_err_t sdio_slave_transmit(uint8_t *addr, size_t len)
{
uint32_t timestamp = esp_cpu_get_cycle_count();
uint32_t ret_stamp;
esp_err_t err = sdio_slave_send_queue(addr, len, (void *)timestamp, portMAX_DELAY);
if (err != ESP_OK) {
return err;
}{...}
err = sdio_slave_send_get_finished((void **)&ret_stamp, portMAX_DELAY);
if (err != ESP_OK) {
return err;
}{...}
SDIO_SLAVE_CHECK(ret_stamp == timestamp, "already sent without return before", ESP_ERR_INVALID_STATE);
return ESP_OK;
}{ ... }
static esp_err_t send_flush_data(void)
{
esp_err_t err;
BaseType_t ret __attribute__((unused));
while (1) {
void *finished_arg;
uint32_t return_cnt = 0;
err = sdio_slave_hal_send_flush_next_buffer(context.hal, &finished_arg, &return_cnt);
if (err == ESP_OK) {
ret = xQueueSend(context.ret_queue, &finished_arg, portMAX_DELAY);
assert(ret == pdTRUE);
for (size_t i = 0; i < return_cnt; i++) {
ret = xSemaphoreGive(context.remain_cnt);
assert(ret == pdTRUE);
}{...}
}{...} else {
if (err == ESP_ERR_NOT_FOUND) {
err = ESP_OK;
}{...}
break;
}{...}
}{...}
if (err == ESP_ERR_INVALID_STATE) {
ESP_LOGE(TAG, "flush data when transmission started");
}{...}
return err;
}{ ... }
/* ... */
#define CHECK_HANDLE_IDLE(desc) do { if (desc == NULL || !desc->not_receiving) {\
return ESP_ERR_INVALID_ARG; }{...} }{...} while(0)...
static inline void critical_enter_recv(void)
{
portENTER_CRITICAL(&context.recv_spinlock);
}{ ... }
static inline void critical_exit_recv(void)
{
portEXIT_CRITICAL(&context.recv_spinlock);
}{ ... }
static esp_err_t recv_flush_data(void)
{
while (1) {
BaseType_t ret = xSemaphoreTake(context.recv_event, 0);
if (ret == pdFALSE) {
break;
}{...}
critical_enter_recv();
sdio_slave_hal_recv_flush_one_buffer(context.hal);
critical_exit_recv();
}{...}
return ESP_OK;
}{ ... }
static void sdio_intr_recv(void *arg)
{
BaseType_t yield = 0;
bool triggered = sdio_slave_hal_recv_done(context.hal);
while (triggered) {
portENTER_CRITICAL_ISR(&context.recv_spinlock);
bool has_next_item = sdio_slave_hal_recv_has_next_item(context.hal);
portEXIT_CRITICAL_ISR(&context.recv_spinlock);
if (has_next_item) {
ESP_EARLY_LOGV(TAG, "intr_recv: Give");
xSemaphoreGiveFromISR(context.recv_event, &yield);
continue;
}{...}
triggered = sdio_slave_hal_recv_done(context.hal);
}{...}
if (yield) {
portYIELD_FROM_ISR();
}{...}
}{ ... }
esp_err_t sdio_slave_recv_load_buf(sdio_slave_buf_handle_t handle)
{
recv_desc_t *desc = (recv_desc_t *)handle;
CHECK_HANDLE_IDLE(desc);
assert(desc->not_receiving);
critical_enter_recv();
TAILQ_REMOVE(&context.recv_reg_list, desc, tail_entry);
desc->not_receiving = 0;
sdio_slave_hal_load_buf(context.hal, &desc->hal_desc);
critical_exit_recv();
return ESP_OK;
}{ ... }
sdio_slave_buf_handle_t sdio_slave_recv_register_buf(uint8_t *start)
{
SDIO_SLAVE_CHECK(esp_ptr_dma_capable(start) && (uint32_t)start % 4 == 0,
"buffer to register should be DMA capable and 32-bit aligned", NULL);
recv_desc_t *desc = (recv_desc_t *)heap_caps_malloc(sizeof(recv_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT | MALLOC_CAP_DMA);
if (desc == NULL) {
SDIO_SLAVE_LOGE("cannot allocate lldesc for new buffer");
return NULL;
}{...}
sdio_slave_hal_recv_init_desc(context.hal, &desc->hal_desc, start);
critical_enter_recv();
TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, tail_entry);
critical_exit_recv();
return desc;
}{ ... }
esp_err_t sdio_slave_recv(sdio_slave_buf_handle_t *handle_ret, uint8_t **out_addr, size_t *out_len, TickType_t wait)
{
esp_err_t ret = sdio_slave_recv_packet(handle_ret, wait);
if (ret == ESP_ERR_NOT_FINISHED) {
ret = ESP_OK;
}{...}
if (ret == ESP_OK) {
recv_desc_t *desc = (recv_desc_t *)(*handle_ret);
if (out_addr) {
*out_addr = (uint8_t *)desc->hal_desc.buf;
}{...}
if (out_len) {
*out_len = desc->hal_desc.length;
}{...}
}{...}
return ret;
}{ ... }
esp_err_t sdio_slave_recv_packet(sdio_slave_buf_handle_t *handle_ret, TickType_t wait)
{
SDIO_SLAVE_CHECK(handle_ret != NULL, "handle address cannot be 0", ESP_ERR_INVALID_ARG);
BaseType_t err = xSemaphoreTake(context.recv_event, wait);
if (err == pdFALSE) {
return ESP_ERR_TIMEOUT;
}{...}
esp_err_t ret = ESP_OK;
critical_enter_recv();
recv_desc_t *desc = (recv_desc_t *)sdio_slave_hal_recv_unload_desc(context.hal);
assert(desc != NULL && desc->hal_desc.owner == 0);
TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, tail_entry);
critical_exit_recv();
*handle_ret = (sdio_slave_buf_handle_t)desc;
if (!desc->hal_desc.eof) {
ret = ESP_ERR_NOT_FINISHED;
}{...}
return ret;
}{ ... }
esp_err_t sdio_slave_recv_unregister_buf(sdio_slave_buf_handle_t handle)
{
recv_desc_t *desc = (recv_desc_t *)handle;
CHECK_HANDLE_IDLE(desc);
critical_enter_recv();
TAILQ_REMOVE(&context.recv_reg_list, desc, tail_entry);
critical_exit_recv();
free(desc);
return ESP_OK;
}{ ... }
uint8_t *sdio_slave_recv_get_buf(sdio_slave_buf_handle_t handle, size_t *len_o)
{
if (handle == NULL) {
return NULL;
}{...}
recv_desc_t *desc = (recv_desc_t *)handle;
if (len_o != NULL) {
*len_o = desc->hal_desc.length;
}{...}
return (uint8_t *)desc->hal_desc.buf;
}{ ... }