1
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
42
48
49
50
51
52
53
54
55
56
57
58
59
61
62
63
64
65
66
67
68
76
77
78
85
86
89
90
91
99
100
104
105
109
110
111
112
113
114
126
127
128
129
130
131
132
133
134
135
136
144
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
195
196
197
198
199
200
201
202
203
204
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
227
228
229
233
234
235
239
240
245
246
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
272
273
274
277
278
279
280
281
282
283
284
285
286
287
288
291
292
293
294
305
306
307
308
309
310
311
312
313
314
315
316
319
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
357
361
362
363
364
373
374
375
376
377
378
379
380
381
382
385
388
391
396
397
398
399
400
401
402
403
406
407
410
411
414
415
418
419
422
423
424
425
436
437
438
439
440
441
444
448
455
456
457
458
459
460
464
468
472
476
480
481
482
492
493
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
527
528
529
530
531
532
536
539
542
543
564
565
566
569
573
577
578
579
582
583
584
585
590
591
592
593
594
595
596
597
600
603
606
609
612
615
616
620
621
622
623
629
630
631
632
638
639
640
641
642
643
644
645
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
682
683
684
685
686
687
688
689
690
691
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
714
715
716
717
734
735
736
737
742
743
749
752
753
754
755
756
757
758
759
764
765
766
767
768
769
770
771
772
773
774
778
779
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
827
828
829
830
831
832
833
834
835
836
839
840
843
844
845
846
847
848
852
853
857
858
861
862
863
864
870
871
872
873
874
883
884
885
889
890
891
897
898
899
902
903
904
905
906
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
943
944
952
953
954
955
958
959
960
961
962
963
969
970
971
972
983
984
985
986
987
988
989
990
996
997
998
999
1000
1001
1002
1003
1009
1010
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1039
1040
1041
1042
1043
1047
1048
1049
1050
1051
1052
1053
1054
1057
1058
1059
1060
1061
1062
1063
1064
1065
1071
/* ... */
#include <string.h>
#include "sdkconfig.h"
#include "stdatomic.h"
#include "esp_types.h"
#include "esp_attr.h"
#include "esp_check.h"
#include "esp_cache.h"
#include "esp_rom_gpio.h"
#include "esp_heap_caps.h"
#include "soc/spi_periph.h"
#include "driver/spi_master.h"
#include "driver/gpio.h"
#include "esp_private/gpio.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/spi_common_internal.h"
#include "esp_private/spi_share_hw_ctrl.h"
#include "esp_private/esp_cache_private.h"
#include "esp_private/sleep_retention.h"
#include "esp_dma_utils.h"
#include "hal/spi_hal.h"
#include "hal/gpio_hal.h"21 includes
#if CONFIG_IDF_TARGET_ESP32
#include "soc/dport_reg.h"
#endif
static const char *SPI_TAG = "spi";
#define SPI_CHECK(a, str, ret_val) ESP_RETURN_ON_FALSE(a, ret_val, SPI_TAG, str)
#define SPI_CHECK_PIN(pin_num, pin_name, check_output) if (check_output) { \
SPI_CHECK(GPIO_IS_VALID_OUTPUT_GPIO(pin_num), pin_name" not valid", ESP_ERR_INVALID_ARG); \
}{...} else { \
SPI_CHECK(GPIO_IS_VALID_GPIO(pin_num), pin_name" not valid", ESP_ERR_INVALID_ARG); \
}{...}
...
#define SPI_MAIN_BUS_DEFAULT() { \
.host_id = 0, \
.bus_attr = { \
.max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE, \
}{...}, \
}{...}
#define FUNC_GPIO PIN_FUNC_GPIO
typedef struct {
int host_id;
_lock_t mutex;
spi_destroy_func_t destroy_func;
void* destroy_arg;
spi_bus_attr_t bus_attr;
spi_dma_ctx_t *dma_ctx;
#if SOC_GDMA_SUPPORTED
gdma_channel_handle_t tx_channel;
gdma_channel_handle_t rx_channel;/* ... */
#endif
}{ ... } spicommon_bus_context_t;
static spicommon_bus_context_t s_mainbus = SPI_MAIN_BUS_DEFAULT();
static spicommon_bus_context_t* bus_ctx[SOC_SPI_PERIPH_NUM] = {&s_mainbus};
#if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
/* ... */
static __attribute__((constructor)) void spi_bus_lock_init_main_bus(void)
{
spi_bus_main_set_lock(g_main_spi_bus_lock);
}{...}
/* ... */#endif
#if SOC_GDMA_SUPPORTED
#if defined(SOC_GDMA_BUS_AXI) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI)
#define SPI_GDMA_NEW_CHANNEL gdma_new_axi_channel
#elif defined(SOC_GDMA_BUS_AHB) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB)
#define SPI_GDMA_NEW_CHANNEL gdma_new_ahb_channel
#endif
/* ... */
#else
static uint8_t spi_dma_chan_enabled = 0;
static portMUX_TYPE spi_dma_spinlock = portMUX_INITIALIZER_UNLOCKED;/* ... */
#endif
static inline bool is_valid_host(spi_host_device_t host)
{
#if (SOC_SPI_PERIPH_NUM == 2)
return host >= SPI1_HOST && host <= SPI2_HOST;
#elif (SOC_SPI_PERIPH_NUM == 3)
return host >= SPI1_HOST && host <= SPI3_HOST;
#endif
}{ ... }
int spicommon_irqsource_for_host(spi_host_device_t host)
{
return spi_periph_signal[host].irq;
}{ ... }
int spicommon_irqdma_source_for_host(spi_host_device_t host)
{
return spi_periph_signal[host].irq_dma;
}{ ... }
#if !SOC_GDMA_SUPPORTED
#if SPI_LL_DMA_SHARED
static inline periph_module_t get_dma_periph(int dma_chan)
{
assert(dma_chan >= 1 && dma_chan <= SOC_SPI_DMA_CHAN_NUM);
if (dma_chan == 1) {
return PERIPH_SPI2_DMA_MODULE;
}{...} else if (dma_chan == 2) {
return PERIPH_SPI3_DMA_MODULE;
}{...} else {
abort();
}{...}
}{...}
/* ... */#endif
static bool claim_dma_chan(int dma_chan, uint32_t *out_actual_dma_chan)
{
bool ret = false;
portENTER_CRITICAL(&spi_dma_spinlock);
bool is_used = (BIT(dma_chan) & spi_dma_chan_enabled);
if (!is_used) {
spi_dma_chan_enabled |= BIT(dma_chan);
#if SPI_LL_DMA_SHARED
PERIPH_RCC_ACQUIRE_ATOMIC(get_dma_periph(dma_chan), ref_count) {
if (ref_count == 0) {
spi_dma_ll_enable_bus_clock(dma_chan, true);
spi_dma_ll_reset_register(dma_chan);
}{...}
}{...}
#else/* ... */
SPI_COMMON_RCC_CLOCK_ATOMIC() {
spi_dma_ll_enable_bus_clock(dma_chan, true);
spi_dma_ll_reset_register(dma_chan);
}{...}
#endif/* ... */
*out_actual_dma_chan = dma_chan;
ret = true;
}{...}
portEXIT_CRITICAL(&spi_dma_spinlock);
return ret;
}{ ... }
static void connect_spi_and_dma(spi_host_device_t host, int dma_chan)
{
#if CONFIG_IDF_TARGET_ESP32
DPORT_SET_PERI_REG_BITS(DPORT_SPI_DMA_CHAN_SEL_REG, 3, dma_chan, (host * 2));
#elif CONFIG_IDF_TARGET_ESP32S2
#endif
}{ ... }
static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t *dma_ctx)
{
assert(is_valid_host(host_id));
#if CONFIG_IDF_TARGET_ESP32
assert(dma_chan > SPI_DMA_DISABLED && dma_chan <= SPI_DMA_CH_AUTO);
#elif CONFIG_IDF_TARGET_ESP32S2
assert(dma_chan == (int)host_id || dma_chan == SPI_DMA_CH_AUTO);
#endif
esp_err_t ret = ESP_OK;
bool success = false;
uint32_t actual_dma_chan = 0;
if (dma_chan == SPI_DMA_CH_AUTO) {
#if CONFIG_IDF_TARGET_ESP32
for (int i = 1; i < SOC_SPI_DMA_CHAN_NUM + 1; i++) {
success = claim_dma_chan(i, &actual_dma_chan);
if (success) {
break;
}{...}
}{...}
#elif/* ... */ CONFIG_IDF_TARGET_ESP32S2
success = claim_dma_chan(host_id, &actual_dma_chan);/* ... */
#endif
}{...} else {
success = claim_dma_chan((int)dma_chan, &actual_dma_chan);
}{...}
dma_ctx->tx_dma_chan.chan_id = actual_dma_chan;
dma_ctx->rx_dma_chan.chan_id = actual_dma_chan;
dma_ctx->tx_dma_chan.host_id = host_id;
dma_ctx->rx_dma_chan.host_id = host_id;
dma_ctx->tx_dma_chan.dir = DMA_CHANNEL_DIRECTION_TX;
dma_ctx->rx_dma_chan.dir = DMA_CHANNEL_DIRECTION_RX;
if (!success) {
SPI_CHECK(false, "no available dma channel", ESP_ERR_NOT_FOUND);
}{...}
connect_spi_and_dma(host_id, actual_dma_chan);
spi_dma_enable_burst(dma_ctx->tx_dma_chan, true, true);
spi_dma_enable_burst(dma_ctx->rx_dma_chan, true, true);
return ret;
}{ ... }
/* ... */#else
static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t *dma_ctx)
{
assert(is_valid_host(host_id));
assert(dma_chan == SPI_DMA_CH_AUTO);
esp_err_t ret = ESP_OK;
if (dma_chan == SPI_DMA_CH_AUTO) {
gdma_channel_alloc_config_t tx_alloc_config = {
.flags.reserve_sibling = 1,
.direction = GDMA_CHANNEL_DIRECTION_TX,
}{...};
ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&tx_alloc_config, &dma_ctx->tx_dma_chan), SPI_TAG, "alloc gdma tx failed");
gdma_channel_alloc_config_t rx_alloc_config = {
.direction = GDMA_CHANNEL_DIRECTION_RX,
.sibling_chan = dma_ctx->tx_dma_chan,
}{...};
ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&rx_alloc_config, &dma_ctx->rx_dma_chan), SPI_TAG, "alloc gdma rx failed");
if (host_id == SPI2_HOST) {
gdma_connect(dma_ctx->tx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2));
gdma_connect(dma_ctx->rx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2));
}{...}
#if (SOC_SPI_PERIPH_NUM >= 3)
else if (host_id == SPI3_HOST) {
gdma_connect(dma_ctx->tx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
gdma_connect(dma_ctx->rx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
}{...}
/* ... */#endif
gdma_transfer_config_t trans_cfg = {
.max_data_burst_size = 16,
.access_ext_mem = false,
}{...};
ESP_RETURN_ON_ERROR(gdma_config_transfer(dma_ctx->tx_dma_chan, &trans_cfg), SPI_TAG, "config gdma tx transfer failed");
ESP_RETURN_ON_ERROR(gdma_config_transfer(dma_ctx->rx_dma_chan, &trans_cfg), SPI_TAG, "config gdma rx transfer failed");
}{...}
return ret;
}{...}
/* ... */#endif
esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t **out_dma_ctx)
{
assert(is_valid_host(host_id));
#if CONFIG_IDF_TARGET_ESP32
assert(dma_chan > SPI_DMA_DISABLED && dma_chan <= SPI_DMA_CH_AUTO);
#elif CONFIG_IDF_TARGET_ESP32S2
assert(dma_chan == (int)host_id || dma_chan == SPI_DMA_CH_AUTO);
#endif
esp_err_t ret = ESP_OK;
spi_dma_ctx_t *dma_ctx = (spi_dma_ctx_t *)calloc(1, sizeof(spi_dma_ctx_t));
if (!dma_ctx) {
ret = ESP_ERR_NO_MEM;
goto cleanup;
}{...}
ret = alloc_dma_chan(host_id, dma_chan, dma_ctx);
if (ret != ESP_OK) {
goto cleanup;
}{...}
*out_dma_ctx = dma_ctx;
return ret;
cleanup:
free(dma_ctx);
return ret;
}{ ... }
esp_err_t spicommon_dma_desc_alloc(spi_dma_ctx_t *dma_ctx, int cfg_max_sz, int *actual_max_sz)
{
int dma_desc_ct = (cfg_max_sz + DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED - 1) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
if (dma_desc_ct == 0) {
dma_desc_ct = 1;
}{...}
dma_ctx->dmadesc_tx = heap_caps_aligned_calloc(DMA_DESC_MEM_ALIGN_SIZE, 1, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
dma_ctx->dmadesc_rx = heap_caps_aligned_calloc(DMA_DESC_MEM_ALIGN_SIZE, 1, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
if (dma_ctx->dmadesc_tx == NULL || dma_ctx->dmadesc_rx == NULL) {
if (dma_ctx->dmadesc_tx) {
free(dma_ctx->dmadesc_tx);
dma_ctx->dmadesc_tx = NULL;
}{...}
if (dma_ctx->dmadesc_rx) {
free(dma_ctx->dmadesc_rx);
dma_ctx->dmadesc_rx = NULL;
}{...}
return ESP_ERR_NO_MEM;
}{...}
dma_ctx->dma_desc_num = dma_desc_ct;
*actual_max_sz = dma_desc_ct * DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
return ESP_OK;
}{ ... }
void IRAM_ATTR spicommon_dma_desc_setup_link(spi_dma_desc_t *dmadesc, const void *data, int len, bool is_rx)
{
dmadesc = ADDR_DMA_2_CPU(dmadesc);
int n = 0;
while (len) {
int dmachunklen = len;
if (dmachunklen > DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED) {
dmachunklen = DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
}{...}
if (is_rx) {
dmadesc[n].dw0.size = (dmachunklen + 3) & (~3);
}{...} else {
dmadesc[n].dw0.size = dmachunklen;
dmadesc[n].dw0.length = dmachunklen;
}{...}
dmadesc[n].buffer = (uint8_t *)data;
dmadesc[n].dw0.suc_eof = 0;
dmadesc[n].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
dmadesc[n].next = ADDR_CPU_2_DMA(&dmadesc[n + 1]);
len -= dmachunklen;
data += dmachunklen;
n++;
}{...}
dmadesc[n - 1].dw0.suc_eof = 1;
dmadesc[n - 1].next = NULL;
}{ ... }
alloc dma periph
esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx)
{
assert(dma_ctx);
#if !SOC_GDMA_SUPPORTED
int dma_chan = dma_ctx->tx_dma_chan.chan_id;
assert(spi_dma_chan_enabled & BIT(dma_chan));
portENTER_CRITICAL(&spi_dma_spinlock);
spi_dma_chan_enabled &= ~BIT(dma_chan);
#if SPI_LL_DMA_SHARED
PERIPH_RCC_RELEASE_ATOMIC(get_dma_periph(dma_chan), ref_count) {
if (ref_count == 0) {
spi_dma_ll_enable_bus_clock(dma_ctx->tx_dma_chan.host_id, false);
}{...}
}{...}
#else/* ... */
SPI_COMMON_RCC_CLOCK_ATOMIC() {
spi_dma_ll_enable_bus_clock(dma_ctx->tx_dma_chan.host_id, false);
}{...}
#endif/* ... */
portEXIT_CRITICAL(&spi_dma_spinlock);
/* ... */
#else
if (dma_ctx->rx_dma_chan) {
gdma_disconnect(dma_ctx->rx_dma_chan);
gdma_del_channel(dma_ctx->rx_dma_chan);
}{...}
if (dma_ctx->tx_dma_chan) {
gdma_disconnect(dma_ctx->tx_dma_chan);
gdma_del_channel(dma_ctx->tx_dma_chan);
}{...}
#endif/* ... */
free(dma_ctx);
return ESP_OK;
}{ ... }
free dma periph
#if SOC_SPI_SUPPORT_OCT
static bool check_iomux_pins_oct(spi_host_device_t host, const spi_bus_config_t* bus_config)
{
if (host != SPI2_HOST) {
return false;
}{...}
int io_nums[] = {bus_config->data0_io_num, bus_config->data1_io_num, bus_config->data2_io_num, bus_config->data3_io_num,
bus_config->sclk_io_num, bus_config->data4_io_num, bus_config->data5_io_num, bus_config->data6_io_num, bus_config->data7_io_num
}{...};
int io_mux_nums[] = {SPI2_IOMUX_PIN_NUM_MOSI_OCT, SPI2_IOMUX_PIN_NUM_MISO_OCT, SPI2_IOMUX_PIN_NUM_WP_OCT, SPI2_IOMUX_PIN_NUM_HD_OCT,
SPI2_IOMUX_PIN_NUM_CLK_OCT, SPI2_IOMUX_PIN_NUM_IO4_OCT, SPI2_IOMUX_PIN_NUM_IO5_OCT, SPI2_IOMUX_PIN_NUM_IO6_OCT, SPI2_IOMUX_PIN_NUM_IO7_OCT
}{...};
for (size_t i = 0; i < sizeof(io_nums) / sizeof(io_nums[0]); i++) {
if (io_nums[i] >= 0 && io_nums[i] != io_mux_nums[i]) {
return false;
}{...}
}{...}
return true;
}{...}
/* ... */#endif
static bool check_iomux_pins_quad(spi_host_device_t host, const spi_bus_config_t* bus_config)
{
if (bus_config->sclk_io_num >= 0 &&
bus_config->sclk_io_num != spi_periph_signal[host].spiclk_iomux_pin) {
return false;
}{...}
if (bus_config->quadwp_io_num >= 0 &&
bus_config->quadwp_io_num != spi_periph_signal[host].spiwp_iomux_pin) {
return false;
}{...}
if (bus_config->quadhd_io_num >= 0 &&
bus_config->quadhd_io_num != spi_periph_signal[host].spihd_iomux_pin) {
return false;
}{...}
if (bus_config->mosi_io_num >= 0 &&
bus_config->mosi_io_num != spi_periph_signal[host].spid_iomux_pin) {
return false;
}{...}
if (bus_config->miso_io_num >= 0 &&
bus_config->miso_io_num != spi_periph_signal[host].spiq_iomux_pin) {
return false;
}{...}
return true;
}{ ... }
static bool bus_uses_iomux_pins(spi_host_device_t host, const spi_bus_config_t* bus_config)
{
#if SOC_SPI_SUPPORT_OCT
if ((bus_config->flags & SPICOMMON_BUSFLAG_OCTAL) == SPICOMMON_BUSFLAG_OCTAL) {
return check_iomux_pins_oct(host, bus_config);
}{...}
#endif/* ... */
return check_iomux_pins_quad(host, bus_config);
}{ ... }
#if SOC_SPI_SUPPORT_OCT
static void bus_iomux_pins_set_oct(spi_host_device_t host, const spi_bus_config_t* bus_config)
{
assert(host == SPI2_HOST);
int io_nums[] = {bus_config->data0_io_num, bus_config->data1_io_num, bus_config->data2_io_num, bus_config->data3_io_num,
bus_config->sclk_io_num, bus_config->data4_io_num, bus_config->data5_io_num, bus_config->data6_io_num, bus_config->data7_io_num
}{...};
int io_signals[] = {spi_periph_signal[host].spid_in, spi_periph_signal[host].spiq_in, spi_periph_signal[host].spiwp_in,
spi_periph_signal[host].spihd_in, spi_periph_signal[host].spiclk_in, spi_periph_signal[host].spid4_out,
spi_periph_signal[host].spid5_out, spi_periph_signal[host].spid6_out, spi_periph_signal[host].spid7_out
}{...};
for (size_t i = 0; i < sizeof(io_nums) / sizeof(io_nums[0]); i++) {
if (io_nums[i] > 0) {
gpio_iomux_in(io_nums[i], io_signals[i]);
gpio_iomux_out(io_nums[i], SPI2_FUNC_NUM_OCT, false);
}{...}
}{...}
}{...}
/* ... */#endif
static void bus_iomux_pins_set_quad(spi_host_device_t host, const spi_bus_config_t* bus_config)
{
if (bus_config->mosi_io_num >= 0) {
gpio_iomux_in(bus_config->mosi_io_num, spi_periph_signal[host].spid_in);
gpio_iomux_out(bus_config->mosi_io_num, spi_periph_signal[host].func, false);
}{...}
if (bus_config->miso_io_num >= 0) {
gpio_iomux_in(bus_config->miso_io_num, spi_periph_signal[host].spiq_in);
gpio_iomux_out(bus_config->miso_io_num, spi_periph_signal[host].func, false);
}{...}
if (bus_config->quadwp_io_num >= 0) {
gpio_iomux_in(bus_config->quadwp_io_num, spi_periph_signal[host].spiwp_in);
gpio_iomux_out(bus_config->quadwp_io_num, spi_periph_signal[host].func, false);
}{...}
if (bus_config->quadhd_io_num >= 0) {
gpio_iomux_in(bus_config->quadhd_io_num, spi_periph_signal[host].spihd_in);
gpio_iomux_out(bus_config->quadhd_io_num, spi_periph_signal[host].func, false);
}{...}
if (bus_config->sclk_io_num >= 0) {
gpio_iomux_in(bus_config->sclk_io_num, spi_periph_signal[host].spiclk_in);
gpio_iomux_out(bus_config->sclk_io_num, spi_periph_signal[host].func, false);
}{...}
}{ ... }
static void bus_iomux_pins_set(spi_host_device_t host, const spi_bus_config_t* bus_config)
{
#if SOC_SPI_SUPPORT_OCT
if ((bus_config->flags & SPICOMMON_BUSFLAG_OCTAL) == SPICOMMON_BUSFLAG_OCTAL) {
bus_iomux_pins_set_oct(host, bus_config);
return;
}{...}
#endif/* ... */
bus_iomux_pins_set_quad(host, bus_config);
}{ ... }
/* ... */
esp_err_t spicommon_bus_initialize_io(spi_host_device_t host, const spi_bus_config_t *bus_config, uint32_t flags, uint32_t* flags_o)
{
#if SOC_SPI_SUPPORT_OCT
bool io4_7_is_blank = !bus_config->data4_io_num && !bus_config->data5_io_num && !bus_config->data6_io_num && !bus_config->data7_io_num;
bool io4_7_enabled = !io4_7_is_blank && bus_config->data4_io_num >= 0 && bus_config->data5_io_num >= 0 &&
bus_config->data6_io_num >= 0 && bus_config->data7_io_num >= 0;
SPI_CHECK((flags & SPICOMMON_BUSFLAG_MASTER) || !((flags & SPICOMMON_BUSFLAG_OCTAL) == SPICOMMON_BUSFLAG_OCTAL), "Octal SPI mode / OPI mode only works when SPI is used as Master", ESP_ERR_INVALID_ARG);
SPI_CHECK(host == SPI2_HOST || !((flags & SPICOMMON_BUSFLAG_OCTAL) == SPICOMMON_BUSFLAG_OCTAL), "Only SPI2 supports Octal SPI mode / OPI mode", ESP_ERR_INVALID_ARG);/* ... */
#endif
uint32_t temp_flag = 0;
bool miso_need_output;
bool mosi_need_output;
bool sclk_need_output;
if ((flags & SPICOMMON_BUSFLAG_MASTER) != 0) {
miso_need_output = ((flags & SPICOMMON_BUSFLAG_DUAL) != 0) ? true : false;
mosi_need_output = true;
sclk_need_output = true;
}{...} else {
miso_need_output = true;
mosi_need_output = ((flags & SPICOMMON_BUSFLAG_DUAL) != 0) ? true : false;
sclk_need_output = false;
}{...}
const bool wp_need_output = true;
const bool hd_need_output = true;
if (bus_config->sclk_io_num >= 0) {
temp_flag |= SPICOMMON_BUSFLAG_SCLK;
SPI_CHECK_PIN(bus_config->sclk_io_num, "sclk", sclk_need_output);
}{...}
if (bus_config->quadwp_io_num >= 0) {
SPI_CHECK_PIN(bus_config->quadwp_io_num, "wp", wp_need_output);
}{...}
if (bus_config->quadhd_io_num >= 0) {
SPI_CHECK_PIN(bus_config->quadhd_io_num, "hd", hd_need_output);
}{...}
#if SOC_SPI_SUPPORT_OCT
const bool io4_need_output = true;
const bool io5_need_output = true;
const bool io6_need_output = true;
const bool io7_need_output = true;
if (io4_7_enabled) {
temp_flag |= SPICOMMON_BUSFLAG_IO4_IO7;
if (bus_config->data4_io_num >= 0) {
SPI_CHECK_PIN(bus_config->data4_io_num, "spi data4", io4_need_output);
}{...}
if (bus_config->data5_io_num >= 0) {
SPI_CHECK_PIN(bus_config->data5_io_num, "spi data5", io5_need_output);
}{...}
if (bus_config->data6_io_num >= 0) {
SPI_CHECK_PIN(bus_config->data6_io_num, "spi data6", io6_need_output);
}{...}
if (bus_config->data7_io_num >= 0) {
SPI_CHECK_PIN(bus_config->data7_io_num, "spi data7", io7_need_output);
}{...}
}{...}
#endif/* ... */
if (bus_config->quadhd_io_num >= 0 && bus_config->quadwp_io_num >= 0) {
temp_flag |= SPICOMMON_BUSFLAG_WPHD;
}{...}
if (bus_config->mosi_io_num >= 0) {
temp_flag |= SPICOMMON_BUSFLAG_MOSI;
SPI_CHECK_PIN(bus_config->mosi_io_num, "mosi", mosi_need_output);
}{...}
if (bus_config->miso_io_num >= 0) {
temp_flag |= SPICOMMON_BUSFLAG_MISO;
SPI_CHECK_PIN(bus_config->miso_io_num, "miso", miso_need_output);
}{...}
if ((bus_config->mosi_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->mosi_io_num)) &&
(bus_config->miso_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(bus_config->miso_io_num))) {
temp_flag |= SPICOMMON_BUSFLAG_DUAL;
}{...}
bool use_iomux = !(flags & SPICOMMON_BUSFLAG_GPIO_PINS) && bus_uses_iomux_pins(host, bus_config);
if (use_iomux) {
temp_flag |= SPICOMMON_BUSFLAG_IOMUX_PINS;
}{...} else {
temp_flag |= SPICOMMON_BUSFLAG_GPIO_PINS;
}{...}
uint32_t missing_flag = flags & ~temp_flag;
missing_flag &= ~SPICOMMON_BUSFLAG_MASTER;
missing_flag &= ~SPICOMMON_BUSFLAG_SLP_ALLOW_PD;
if (missing_flag != 0) {
if (missing_flag & SPICOMMON_BUSFLAG_SCLK) {
ESP_LOGE(SPI_TAG, "sclk pin required.");
}{...}
if (missing_flag & SPICOMMON_BUSFLAG_MOSI) {
ESP_LOGE(SPI_TAG, "mosi pin required.");
}{...}
if (missing_flag & SPICOMMON_BUSFLAG_MISO) {
ESP_LOGE(SPI_TAG, "miso pin required.");
}{...}
if (missing_flag & SPICOMMON_BUSFLAG_DUAL) {
ESP_LOGE(SPI_TAG, "not both mosi and miso output capable");
}{...}
if (missing_flag & SPICOMMON_BUSFLAG_WPHD) {
ESP_LOGE(SPI_TAG, "both wp and hd required.");
}{...}
if (missing_flag & SPICOMMON_BUSFLAG_IOMUX_PINS) {
ESP_LOGE(SPI_TAG, "not using iomux pins");
}{...}
#if SOC_SPI_SUPPORT_OCT
if (missing_flag & SPICOMMON_BUSFLAG_IO4_IO7) {
ESP_LOGE(SPI_TAG, "spi data4 ~ spi data7 are required.");
}{...}
#endif/* ... */
SPI_CHECK(missing_flag == 0, "not all required capabilities satisfied.", ESP_ERR_INVALID_ARG);
}{...}
if (use_iomux) {
ESP_LOGD(SPI_TAG, "SPI%d use iomux pins.", host + 1);
bus_iomux_pins_set(host, bus_config);
}{...} else {
ESP_LOGD(SPI_TAG, "SPI%d use gpio matrix.", host + 1);
if (bus_config->mosi_io_num >= 0) {
if (mosi_need_output || (temp_flag & SPICOMMON_BUSFLAG_DUAL)) {
gpio_set_direction(bus_config->mosi_io_num, GPIO_MODE_INPUT_OUTPUT);
esp_rom_gpio_connect_out_signal(bus_config->mosi_io_num, spi_periph_signal[host].spid_out, false, false);
}{...} else {
gpio_set_direction(bus_config->mosi_io_num, GPIO_MODE_INPUT);
}{...}
esp_rom_gpio_connect_in_signal(bus_config->mosi_io_num, spi_periph_signal[host].spid_in, false);
#if CONFIG_IDF_TARGET_ESP32S2
PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[bus_config->mosi_io_num]);
#endif
gpio_func_sel(bus_config->mosi_io_num, FUNC_GPIO);
}{...}
if (bus_config->miso_io_num >= 0) {
if (miso_need_output || (temp_flag & SPICOMMON_BUSFLAG_DUAL)) {
gpio_set_direction(bus_config->miso_io_num, GPIO_MODE_INPUT_OUTPUT);
esp_rom_gpio_connect_out_signal(bus_config->miso_io_num, spi_periph_signal[host].spiq_out, false, false);
}{...} else {
gpio_set_direction(bus_config->miso_io_num, GPIO_MODE_INPUT);
}{...}
esp_rom_gpio_connect_in_signal(bus_config->miso_io_num, spi_periph_signal[host].spiq_in, false);
#if CONFIG_IDF_TARGET_ESP32S2
PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[bus_config->miso_io_num]);
#endif
gpio_func_sel(bus_config->miso_io_num, FUNC_GPIO);
}{...}
if (bus_config->quadwp_io_num >= 0) {
gpio_set_direction(bus_config->quadwp_io_num, GPIO_MODE_INPUT_OUTPUT);
esp_rom_gpio_connect_out_signal(bus_config->quadwp_io_num, spi_periph_signal[host].spiwp_out, false, false);
esp_rom_gpio_connect_in_signal(bus_config->quadwp_io_num, spi_periph_signal[host].spiwp_in, false);
#if CONFIG_IDF_TARGET_ESP32S2
PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[bus_config->quadwp_io_num]);
#endif
gpio_func_sel(bus_config->quadwp_io_num, FUNC_GPIO);
}{...}
if (bus_config->quadhd_io_num >= 0) {
gpio_set_direction(bus_config->quadhd_io_num, GPIO_MODE_INPUT_OUTPUT);
esp_rom_gpio_connect_out_signal(bus_config->quadhd_io_num, spi_periph_signal[host].spihd_out, false, false);
esp_rom_gpio_connect_in_signal(bus_config->quadhd_io_num, spi_periph_signal[host].spihd_in, false);
#if CONFIG_IDF_TARGET_ESP32S2
PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[bus_config->quadhd_io_num]);
#endif
gpio_func_sel(bus_config->quadhd_io_num, FUNC_GPIO);
}{...}
if (bus_config->sclk_io_num >= 0) {
if (sclk_need_output) {
gpio_set_direction(bus_config->sclk_io_num, GPIO_MODE_INPUT_OUTPUT);
esp_rom_gpio_connect_out_signal(bus_config->sclk_io_num, spi_periph_signal[host].spiclk_out, false, false);
}{...} else {
gpio_set_direction(bus_config->sclk_io_num, GPIO_MODE_INPUT);
}{...}
esp_rom_gpio_connect_in_signal(bus_config->sclk_io_num, spi_periph_signal[host].spiclk_in, false);
#if CONFIG_IDF_TARGET_ESP32S2
PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[bus_config->sclk_io_num]);
#endif
gpio_func_sel(bus_config->sclk_io_num, FUNC_GPIO);
}{...}
#if SOC_SPI_SUPPORT_OCT
if ((flags & SPICOMMON_BUSFLAG_OCTAL) == SPICOMMON_BUSFLAG_OCTAL) {
int io_nums[] = {bus_config->data4_io_num, bus_config->data5_io_num, bus_config->data6_io_num, bus_config->data7_io_num};
uint8_t io_signals[4][2] = {{spi_periph_signal[host].spid4_out, spi_periph_signal[host].spid4_in},
{spi_periph_signal[host].spid5_out, spi_periph_signal[host].spid5_in},
{spi_periph_signal[host].spid6_out, spi_periph_signal[host].spid6_in},
{spi_periph_signal[host].spid7_out, spi_periph_signal[host].spid7_in}
}{...};
for (size_t i = 0; i < sizeof(io_nums) / sizeof(io_nums[0]); i++) {
if (io_nums[i] >= 0) {
gpio_set_direction(io_nums[i], GPIO_MODE_INPUT_OUTPUT);
esp_rom_gpio_connect_out_signal(io_nums[i], io_signals[i][0], false, false);
esp_rom_gpio_connect_in_signal(io_nums[i], io_signals[i][1], false);
#if CONFIG_IDF_TARGET_ESP32S2
PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[io_nums[i]]);
#endif
gpio_func_sel(io_nums[i], FUNC_GPIO);
}{...}
}{...}
}{...}
#endif/* ... */
}{...}
if (flags_o) {
*flags_o = temp_flag;
}{...}
return ESP_OK;
}{ ... }
esp_err_t spicommon_bus_free_io_cfg(const spi_bus_config_t *bus_cfg)
{
int pin_array[] = {
bus_cfg->mosi_io_num,
bus_cfg->miso_io_num,
bus_cfg->sclk_io_num,
bus_cfg->quadwp_io_num,
bus_cfg->quadhd_io_num,
}{...};
for (int i = 0; i < sizeof(pin_array) / sizeof(int); i ++) {
const int io = pin_array[i];
if (GPIO_IS_VALID_GPIO(io)) {
gpio_reset_pin(io);
}{...}
}{...}
return ESP_OK;
}{ ... }
void spicommon_cs_initialize(spi_host_device_t host, int cs_io_num, int cs_num, int force_gpio_matrix)
{
if (!force_gpio_matrix && cs_io_num == spi_periph_signal[host].spics0_iomux_pin && cs_num == 0) {
gpio_iomux_in(cs_io_num, spi_periph_signal[host].spics_in);
gpio_iomux_out(cs_io_num, spi_periph_signal[host].func, false);
}{...} else {
if (GPIO_IS_VALID_OUTPUT_GPIO(cs_io_num)) {
gpio_set_direction(cs_io_num, GPIO_MODE_INPUT_OUTPUT);
esp_rom_gpio_connect_out_signal(cs_io_num, spi_periph_signal[host].spics_out[cs_num], false, false);
}{...} else {
gpio_set_direction(cs_io_num, GPIO_MODE_INPUT);
}{...}
if (cs_num == 0) {
esp_rom_gpio_connect_in_signal(cs_io_num, spi_periph_signal[host].spics_in, false);
}{...}
#if CONFIG_IDF_TARGET_ESP32S2
PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[cs_io_num]);
#endif
gpio_func_sel(cs_io_num, FUNC_GPIO);
}{...}
}{ ... }
void spicommon_cs_free_io(int cs_gpio_num)
{
assert(GPIO_IS_VALID_GPIO(cs_gpio_num));
gpio_reset_pin(cs_gpio_num);
}{ ... }
bool spicommon_bus_using_iomux(spi_host_device_t host)
{
CHECK_IOMUX_PIN(host, spid);
CHECK_IOMUX_PIN(host, spiq);
CHECK_IOMUX_PIN(host, spiwp);
CHECK_IOMUX_PIN(host, spihd);
return true;
}{ ... }
void spi_bus_main_set_lock(spi_bus_lock_handle_t lock)
{
bus_ctx[0]->bus_attr.lock = lock;
}{ ... }
spi_bus_lock_handle_t spi_bus_lock_get_by_id(spi_host_device_t host_id)
{
return bus_ctx[host_id]->bus_attr.lock;
}{ ... }
#if SOC_SPI_SUPPORT_SLEEP_RETENTION && CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP
static esp_err_t s_bus_create_sleep_retention_cb(void *arg)
{
spicommon_bus_context_t *ctx = arg;
return sleep_retention_entries_create(spi_reg_retention_info[ctx->host_id - 1].entry_array,
spi_reg_retention_info[ctx->host_id - 1].array_size,
REGDMA_LINK_PRI_GPSPI,
spi_reg_retention_info[ctx->host_id - 1].module_id);
}{...}
/* ... */#endif IO general
esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *bus_config, spi_dma_chan_t dma_chan)
{
esp_err_t err = ESP_OK;
spicommon_bus_context_t *ctx = NULL;
spi_bus_attr_t *bus_attr = NULL;
SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_ctx[host_id] == NULL, "SPI bus already initialized.", ESP_ERR_INVALID_STATE);
#ifdef CONFIG_IDF_TARGET_ESP32
SPI_CHECK(dma_chan >= SPI_DMA_DISABLED && dma_chan <= SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG);
#elif CONFIG_IDF_TARGET_ESP32S2
SPI_CHECK(dma_chan == SPI_DMA_DISABLED || dma_chan == (int)host_id || dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG);
#elif SOC_GDMA_SUPPORTED
SPI_CHECK(dma_chan == SPI_DMA_DISABLED || dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel, chip only support spi dma channel auto-alloc", ESP_ERR_INVALID_ARG);
#endif
SPI_CHECK((bus_config->intr_flags & (ESP_INTR_FLAG_HIGH | ESP_INTR_FLAG_EDGE | ESP_INTR_FLAG_INTRDISABLED)) == 0, "intr flag not allowed", ESP_ERR_INVALID_ARG);
#ifndef CONFIG_SPI_MASTER_ISR_IN_IRAM
SPI_CHECK((bus_config->intr_flags & ESP_INTR_FLAG_IRAM) == 0, "ESP_INTR_FLAG_IRAM should be disabled when CONFIG_SPI_MASTER_ISR_IN_IRAM is not set.", ESP_ERR_INVALID_ARG);
#endif
#if CONFIG_IDF_TARGET_ESP32
SPI_CHECK((bus_config->data_io_default_level == 0), "no support changing io default level ", ESP_ERR_INVALID_ARG);
#endif
bool spi_chan_claimed = spicommon_periph_claim(host_id, "spi master");
SPI_CHECK(spi_chan_claimed, "host_id already in use", ESP_ERR_INVALID_STATE);
ctx = (spicommon_bus_context_t *)calloc(1, sizeof(spicommon_bus_context_t));
if (!ctx) {
err = ESP_ERR_NO_MEM;
goto cleanup;
}{...}
bus_ctx[host_id] = ctx;
ctx->host_id = host_id;
bus_attr = &ctx->bus_attr;
bus_attr->bus_cfg = *bus_config;
if (dma_chan != SPI_DMA_DISABLED) {
bus_attr->dma_enabled = 1;
err = spicommon_dma_chan_alloc(host_id, dma_chan, &ctx->dma_ctx);
if (err != ESP_OK) {
goto cleanup;
}{...}
err = spicommon_dma_desc_alloc(ctx->dma_ctx, bus_config->max_transfer_sz, &bus_attr->max_transfer_sz);
if (err != ESP_OK) {
goto cleanup;
}{...}
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_cache_get_alignment(MALLOC_CAP_DMA, (size_t *)&bus_attr->internal_mem_align_size);
#else
bus_attr->internal_mem_align_size = 4;
#endif
}{...} else {
bus_attr->dma_enabled = 0;
bus_attr->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
}{...}
spi_bus_lock_config_t lock_config = {
.host_id = host_id,
.cs_num = SOC_SPI_PERIPH_CS_NUM(host_id),
}{...};
err = spi_bus_init_lock(&bus_attr->lock, &lock_config);
if (err != ESP_OK) {
goto cleanup;
}{...}
#if SOC_SPI_SUPPORT_SLEEP_RETENTION && CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP
sleep_retention_module_init_param_t init_param = {
.cbs = {
.create = {
.handle = s_bus_create_sleep_retention_cb,
.arg = ctx,
}{...},
}{...},
.depends = RETENTION_MODULE_BITMAP_INIT(CLOCK_SYSTEM)
}{...};
_lock_acquire(&ctx->mutex);
if (sleep_retention_module_init(spi_reg_retention_info[host_id - 1].module_id, &init_param) == ESP_OK) {
if ((bus_attr->bus_cfg.flags & SPICOMMON_BUSFLAG_SLP_ALLOW_PD) && (sleep_retention_module_allocate(spi_reg_retention_info[host_id - 1].module_id) != ESP_OK)) {
ESP_LOGW(SPI_TAG, "alloc sleep recover failed, peripherals may hold power on");
}{...}
}{...} else {
ESP_LOGW(SPI_TAG, "init sleep recover failed, spi may offline after sleep");
}{...}
_lock_release(&ctx->mutex);/* ... */
#else
if (bus_attr->bus_cfg.flags & SPICOMMON_BUSFLAG_SLP_ALLOW_PD) {
ESP_LOGE(SPI_TAG, "power down peripheral in sleep is not enabled or not supported on your target");
}{...}
#endif/* ... */
#ifdef CONFIG_PM_ENABLE
err = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_master",
&bus_attr->pm_lock);
if (err != ESP_OK) {
goto cleanup;
}{...}
#endif/* ... */
err = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_MASTER | bus_config->flags, &bus_attr->flags);
if (err != ESP_OK) {
goto cleanup;
}{...}
return ESP_OK;
cleanup:
if (bus_attr) {
#ifdef CONFIG_PM_ENABLE
esp_pm_lock_delete(bus_attr->pm_lock);
#endif
if (bus_attr->lock) {
spi_bus_deinit_lock(bus_attr->lock);
}{...}
if (ctx->dma_ctx) {
free(ctx->dma_ctx->dmadesc_tx);
free(ctx->dma_ctx->dmadesc_rx);
spicommon_dma_chan_free(ctx->dma_ctx);
ctx->dma_ctx = NULL;
}{...}
}{...}
spicommon_periph_free(host_id);
free(bus_ctx[host_id]);
bus_ctx[host_id] = NULL;
return err;
}{ ... }
void *spi_bus_dma_memory_alloc(spi_host_device_t host_id, size_t size, uint32_t extra_heap_caps)
{
(void) host_id;
ESP_RETURN_ON_FALSE((extra_heap_caps & MALLOC_CAP_SPIRAM) == 0, NULL, SPI_TAG, "external memory is not supported now");
size_t dma_requir = 16;
return heap_caps_aligned_calloc(dma_requir, 1, size, extra_heap_caps | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
}{ ... }
const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id)
{
if (bus_ctx[host_id] == NULL) {
return NULL;
}{...}
return &bus_ctx[host_id]->bus_attr;
}{ ... }
const spi_dma_ctx_t* spi_bus_get_dma_ctx(spi_host_device_t host_id)
{
if (bus_ctx[host_id] == NULL) {
return NULL;
}{...}
return bus_ctx[host_id]->dma_ctx;
}{ ... }
esp_err_t spi_bus_free(spi_host_device_t host_id)
{
if (bus_ctx[host_id] == NULL) {
return ESP_ERR_INVALID_STATE;
}{...}
esp_err_t err = ESP_OK;
spicommon_bus_context_t* ctx = bus_ctx[host_id];
spi_bus_attr_t* bus_attr = &ctx->bus_attr;
if (ctx->destroy_func) {
err = ctx->destroy_func(ctx->destroy_arg);
if (err != ESP_OK) {
return err;
}{...}
}{...}
spicommon_bus_free_io_cfg(&bus_attr->bus_cfg);
#if SOC_SPI_SUPPORT_SLEEP_RETENTION && CONFIG_PM_POWER_DOWN_PERIPHERAL_IN_LIGHT_SLEEP
const periph_retention_module_t retention_id = spi_reg_retention_info[host_id - 1].module_id;
_lock_acquire(&ctx->mutex);
if (sleep_retention_is_module_created(retention_id)) {
assert(sleep_retention_is_module_inited(retention_id));
sleep_retention_module_free(retention_id);
}{...}
if (sleep_retention_is_module_inited(retention_id)) {
sleep_retention_module_deinit(retention_id);
}{...}
_lock_release(&ctx->mutex);
_lock_close(&ctx->mutex);/* ... */
#endif
#ifdef CONFIG_PM_ENABLE
esp_pm_lock_delete(bus_attr->pm_lock);
#endif
spi_bus_deinit_lock(bus_attr->lock);
if (ctx->dma_ctx) {
free(ctx->dma_ctx->dmadesc_tx);
free(ctx->dma_ctx->dmadesc_rx);
spicommon_dma_chan_free(ctx->dma_ctx);
ctx->dma_ctx = NULL;
}{...}
spicommon_periph_free(host_id);
free(ctx);
bus_ctx[host_id] = NULL;
return err;
}{ ... }
esp_err_t spi_bus_register_destroy_func(spi_host_device_t host_id,
spi_destroy_func_t f, void *arg)
{
bus_ctx[host_id]->destroy_func = f;
bus_ctx[host_id]->destroy_arg = arg;
return ESP_OK;
}{ ... }
/* ... */
#if CONFIG_IDF_TARGET_ESP32
static volatile int dmaworkaround_channels_busy[2] = {0, 0};
static dmaworkaround_cb_t dmaworkaround_cb;
static void *dmaworkaround_cb_arg;
static portMUX_TYPE dmaworkaround_mux = portMUX_INITIALIZER_UNLOCKED;
static int dmaworkaround_waiting_for_chan = 0;
bool IRAM_ATTR spicommon_dmaworkaround_req_reset(int dmachan, dmaworkaround_cb_t cb, void *arg)
{
int otherchan = (dmachan == 1) ? 2 : 1;
bool ret;
portENTER_CRITICAL_ISR(&dmaworkaround_mux);
if (dmaworkaround_channels_busy[otherchan - 1]) {
dmaworkaround_cb = cb;
dmaworkaround_cb_arg = arg;
dmaworkaround_waiting_for_chan = otherchan;
ret = false;
}{...} else {
SPI_COMMON_RCC_CLOCK_ATOMIC() {
spi_dma_ll_reset_register(dmachan);
}{...}
ret = true;
}{...}
portEXIT_CRITICAL_ISR(&dmaworkaround_mux);
return ret;
}{ ... }
bool IRAM_ATTR spicommon_dmaworkaround_reset_in_progress(void)
{
return (dmaworkaround_waiting_for_chan != 0);
}{ ... }
void IRAM_ATTR spicommon_dmaworkaround_idle(int dmachan)
{
portENTER_CRITICAL_ISR(&dmaworkaround_mux);
dmaworkaround_channels_busy[dmachan - 1] = 0;
if (dmaworkaround_waiting_for_chan == dmachan) {
SPI_COMMON_RCC_CLOCK_ATOMIC() {
spi_dma_ll_reset_register(dmachan);
}{...}
dmaworkaround_waiting_for_chan = 0;
dmaworkaround_cb(dmaworkaround_cb_arg);
}{...}
portEXIT_CRITICAL_ISR(&dmaworkaround_mux);
}{ ... }
void IRAM_ATTR spicommon_dmaworkaround_transfer_active(int dmachan)
{
portENTER_CRITICAL_ISR(&dmaworkaround_mux);
dmaworkaround_channels_busy[dmachan - 1] = 1;
portEXIT_CRITICAL_ISR(&dmaworkaround_mux);
}{ ... }
#endif/* ... */ master bus init