Select one of the symbols to view example projects that use it.
 
Outline
#include <string.h>
#include <stdbool.h>
#include "freertos/FreeRTOS.h"
#include "freertos/queue.h"
#include "freertos/task.h"
#include "freertos/idf_additions.h"
#include "sdkconfig.h"
#define LOG_LOCAL_LEVEL
#include "esp_log.h"
#include "soc/i2s_periph.h"
#include "soc/soc_caps.h"
#include "hal/gpio_hal.h"
#include "hal/i2s_hal.h"
#include "hal/dma_types.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#include "hal/adc_ll.h"
#include "hal/clk_tree_ll.h"
#include "clk_ctrl_os.h"
#include "esp_private/i2s_platform.h"
#include "esp_private/esp_clk.h"
#include "esp_private/sleep_retention.h"
#include "driver/gpio.h"
#include "esp_private/gpio.h"
#include "driver/i2s_common.h"
#include "i2s_private.h"
#include "esp_clock_output.h"
#include "clk_ctrl_os.h"
#include "esp_clk_tree.h"
#include "esp_intr_alloc.h"
#include "esp_check.h"
#include "esp_attr.h"
#include "esp_cache.h"
#include "esp_rom_gpio.h"
#include "esp_memory_utils.h"
#define I2S_DMA_BUFFER_MAX_SIZE
#define I2S_DMA_BUFFER_MAX_SIZE
TAG
i2s_dma_calloc(i2s_chan_handle_t, size_t, size_t)
i2s_tx_channel_start(i2s_chan_handle_t)
i2s_rx_channel_start(i2s_chan_handle_t)
i2s_tx_channel_stop(i2s_chan_handle_t)
i2s_rx_channel_stop(i2s_chan_handle_t)
i2s_destroy_controller_obj(i2s_controller_t **)
i2s_acquire_controller_obj(int)
i2s_take_available_channel(i2s_controller_t *, uint8_t)
i2s_register_channel(i2s_controller_t *, i2s_dir_t, uint32_t)
i2s_channel_register_event_callback(i2s_chan_handle_t, const i2s_event_callbacks_t *, void *)
i2s_get_buf_size(i2s_chan_handle_t, uint32_t, uint32_t)
i2s_free_dma_desc(i2s_chan_handle_t)
i2s_alloc_dma_desc(i2s_chan_handle_t, uint32_t, uint32_t)
i2s_set_get_apll_freq(uint32_t)
i2s_get_source_clk_freq(i2s_clock_src_t, uint32_t)
i2s_dma_rx_callback(void *)
i2s_dma_tx_callback(void *)
i2s_init_dma_intr(i2s_chan_handle_t, int)
s_i2s_get_pair_chan_gpio_mask(i2s_chan_handle_t)
i2s_output_gpio_reserve(i2s_chan_handle_t, int)
i2s_output_gpio_revoke(i2s_chan_handle_t, uint64_t)
i2s_gpio_check_and_set(i2s_chan_handle_t, int, uint32_t, bool, bool)
i2s_gpio_loopback_set(i2s_chan_handle_t, int, uint32_t, uint32_t)
i2s_check_set_mclk(i2s_chan_handle_t, i2s_port_t, int, i2s_clock_src_t, bool)
i2s_new_channel(const i2s_chan_config_t *, i2s_chan_handle_t *, i2s_chan_handle_t *)
i2s_del_channel(i2s_chan_handle_t)
i2s_channel_get_info(i2s_chan_handle_t, i2s_chan_info_t *)
i2s_channel_enable(i2s_chan_handle_t)
i2s_channel_disable(i2s_chan_handle_t)
i2s_channel_preload_data(i2s_chan_handle_t, const void *, size_t, size_t *)
i2s_channel_write(i2s_chan_handle_t, const void *, size_t, size_t *, uint32_t)
i2s_channel_read(i2s_chan_handle_t, void *, size_t, size_t *, uint32_t)
Files
loading...
SourceVuESP-IDF Framework and ExamplesESP-IDFcomponents/esp_driver_i2s/i2s_common.c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
/* * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 *//* ... */ #include <string.h> #include <stdbool.h> #include "freertos/FreeRTOS.h" #include "freertos/queue.h" #include "freertos/task.h" #include "freertos/idf_additions.h" #include "sdkconfig.h"7 includes #if CONFIG_I2S_ENABLE_DEBUG_LOG // The local log level must be defined before including esp_log.h // Set the maximum log level for this source file #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG/* ... */ #endif #include "esp_log.h" #include "soc/i2s_periph.h" #include "soc/soc_caps.h" #include "hal/gpio_hal.h" #include "hal/i2s_hal.h" #include "hal/dma_types.h"6 includes #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE #include "hal/cache_hal.h" #include "hal/cache_ll.h"/* ... */ #endif #if SOC_I2S_SUPPORTS_ADC_DAC #include "hal/adc_ll.h" #endif #if SOC_I2S_SUPPORTS_APLL #include "hal/clk_tree_ll.h" #include "clk_ctrl_os.h"/* ... */ #endif #include "esp_private/i2s_platform.h" #include "esp_private/esp_clk.h" #if SOC_I2S_SUPPORT_SLEEP_RETENTION #include "esp_private/sleep_retention.h" #endif #include "driver/gpio.h" #include "esp_private/gpio.h" #include "driver/i2s_common.h" #include "i2s_private.h" #if CONFIG_IDF_TARGET_ESP32 #include "esp_clock_output.h" #endif #include "clk_ctrl_os.h" #include "esp_clk_tree.h" #include "esp_intr_alloc.h" #include "esp_check.h" #include "esp_attr.h"5 includes #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE #include "esp_cache.h" #endif #include "esp_rom_gpio.h" #include "esp_memory_utils.h" /* The actual max size of DMA buffer is 4095 * Reserve several bytes for alignment, so that the position of the slot data in the buffer will be relatively fixed *//* ... */ #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE #define I2S_DMA_BUFFER_MAX_SIZE DMA_DESCRIPTOR_BUFFER_MAX_SIZE_64B_ALIGNED #else #define I2S_DMA_BUFFER_MAX_SIZE DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED #endif // SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE static const char *TAG = "i2s_common"; __attribute__((always_inline)) inline void *i2s_dma_calloc(i2s_chan_handle_t handle, size_t num, size_t size) { return heap_caps_aligned_calloc(4, num, size, I2S_DMA_ALLOC_CAPS); }{ ... } /*--------------------------------------------------------------------------- I2S Static APIs ---------------------------------------------------------------------------- Scope: This file only ----------------------------------------------------------------------------*//* ... */ #if I2S_USE_RETENTION_LINK static esp_err_t s_i2s_create_sleep_retention_link_cb(void *arg) { i2s_controller_t *i2s_obj = (i2s_controller_t *)arg; ESP_RETURN_ON_ERROR(sleep_retention_entries_create(i2s_reg_retention_info[i2s_obj->id].entry_array, i2s_reg_retention_info[i2s_obj->id].array_size, REGDMA_LINK_PRI_I2S, i2s_obj->slp_retention_mod), TAG, "create retention link failed"); return ESP_OK; }{...} static void s_i2s_create_retention_module(i2s_controller_t *i2s_obj) { sleep_retention_module_t module = i2s_obj->slp_retention_mod; _lock_acquire(&i2s_obj->mutex); if (i2s_obj->retention_link_created == false) { if (sleep_retention_module_allocate(module) != ESP_OK) { // even though the sleep retention module create failed, I2S driver should still work, so just warning here ESP_LOGW(TAG, "create retention module failed, power domain can't turn off"); }{...} else { i2s_obj->retention_link_created = true; }{...} }{...} _lock_release(&i2s_obj->mutex); }{...} /* ... */#endif // I2S_USE_RETENTION_LINK static void i2s_tx_channel_start(i2s_chan_handle_t handle) { i2s_hal_tx_reset(&(handle->controller->hal)); #if SOC_GDMA_SUPPORTED gdma_reset((handle->dma.dma_chan)); #else i2s_hal_tx_reset_dma(&(handle->controller->hal)); #endif i2s_hal_tx_reset_fifo(&(handle->controller->hal)); #if SOC_GDMA_SUPPORTED gdma_start((handle->dma.dma_chan), (uint32_t) handle->dma.desc[0]); #else esp_intr_enable(handle->dma.dma_chan); i2s_hal_tx_enable_intr(&(handle->controller->hal)); i2s_hal_tx_enable_dma(&(handle->controller->hal)); i2s_hal_tx_start_link(&(handle->controller->hal), (uint32_t) handle->dma.desc[0]);/* ... */ #endif if (!handle->is_etm_start) { i2s_hal_tx_start(&(handle->controller->hal)); }{...} }{ ... } static void i2s_rx_channel_start(i2s_chan_handle_t handle) { i2s_hal_rx_reset(&(handle->controller->hal)); #if SOC_GDMA_SUPPORTED gdma_reset(handle->dma.dma_chan); #else i2s_hal_rx_reset_dma(&(handle->controller->hal)); #endif i2s_hal_rx_reset_fifo(&(handle->controller->hal)); #if SOC_GDMA_SUPPORTED gdma_start(handle->dma.dma_chan, (uint32_t) handle->dma.desc[0]); #else esp_intr_enable(handle->dma.dma_chan); i2s_hal_rx_enable_intr(&(handle->controller->hal)); i2s_hal_rx_enable_dma(&(handle->controller->hal)); i2s_hal_rx_start_link(&(handle->controller->hal), (uint32_t) handle->dma.desc[0]);/* ... */ #endif if (!handle->is_etm_start) { i2s_hal_rx_start(&(handle->controller->hal)); }{...} }{ ... } static void i2s_tx_channel_stop(i2s_chan_handle_t handle) { if (!handle->is_etm_stop) { i2s_hal_tx_stop(&(handle->controller->hal)); }{...} #if SOC_GDMA_SUPPORTED gdma_stop(handle->dma.dma_chan); #else i2s_hal_tx_stop_link(&(handle->controller->hal)); i2s_hal_tx_disable_intr(&(handle->controller->hal)); i2s_hal_tx_disable_dma(&(handle->controller->hal)); esp_intr_disable(handle->dma.dma_chan);/* ... */ #endif }{ ... } static void i2s_rx_channel_stop(i2s_chan_handle_t handle) { if (!handle->is_etm_stop) { i2s_hal_rx_stop(&(handle->controller->hal)); }{...} #if SOC_GDMA_SUPPORTED gdma_stop(handle->dma.dma_chan); #else i2s_hal_rx_stop_link(&(handle->controller->hal)); i2s_hal_rx_disable_intr(&(handle->controller->hal)); i2s_hal_rx_disable_dma(&(handle->controller->hal)); esp_intr_disable(handle->dma.dma_chan);/* ... */ #endif }{ ... } static esp_err_t i2s_destroy_controller_obj(i2s_controller_t **i2s_obj) { I2S_NULL_POINTER_CHECK(TAG, i2s_obj); I2S_NULL_POINTER_CHECK(TAG, *i2s_obj); ESP_RETURN_ON_FALSE(!(*i2s_obj)->rx_chan && !(*i2s_obj)->tx_chan, ESP_ERR_INVALID_STATE, TAG, "there still have channels under this i2s controller"); int id = (*i2s_obj)->id; #if CONFIG_IDF_TARGET_ESP32 if ((*i2s_obj)->mclk_out_hdl) { esp_clock_output_stop((*i2s_obj)->mclk_out_hdl); }{...} #endif/* ... */ #if SOC_I2S_HW_VERSION_1 i2s_ll_enable_dma((*i2s_obj)->hal.dev, false); #endif #if I2S_USE_RETENTION_LINK if ((*i2s_obj)->slp_retention_mod) { if ((*i2s_obj)->retention_link_created) { sleep_retention_module_free((*i2s_obj)->slp_retention_mod); }{...} sleep_retention_module_deinit((*i2s_obj)->slp_retention_mod); }{...} #endif/* ... */ // I2S_USE_RETENTION_LINK free(*i2s_obj); *i2s_obj = NULL; return i2s_platform_release_occupation(I2S_CTLR_HP, id); }{ ... } /** * @brief Acquire i2s controller object * * @param id i2s port id * @param search_reverse reverse the sequence of port acquirement * set false to acquire from I2S_NUM_0 first * set true to acquire from SOC_I2S_NUM - 1 first * @return * - pointer of acquired i2s controller object *//* ... */ static i2s_controller_t *i2s_acquire_controller_obj(int id) { if (id < 0 || id >= SOC_I2S_NUM) { return NULL; }{...} /* pre-alloc controller object */ i2s_controller_t *pre_alloc = (i2s_controller_t *)heap_caps_calloc(1, sizeof(i2s_controller_t), I2S_MEM_ALLOC_CAPS); if (pre_alloc == NULL) { return NULL; }{...} pre_alloc->id = id; i2s_hal_init(&pre_alloc->hal, id); pre_alloc->full_duplex = false; pre_alloc->tx_chan = NULL; pre_alloc->rx_chan = NULL; pre_alloc->mclk = I2S_GPIO_UNUSED; i2s_controller_t *i2s_obj = NULL; /* Try to occupy this i2s controller */ if (i2s_platform_acquire_occupation(I2S_CTLR_HP, id, "i2s_driver") == ESP_OK) { portENTER_CRITICAL(&g_i2s.spinlock); i2s_obj = pre_alloc; g_i2s.controller[id] = i2s_obj; portEXIT_CRITICAL(&g_i2s.spinlock); #if SOC_I2S_SUPPORTS_ADC_DAC if (id == I2S_NUM_0) { adc_ll_digi_set_data_source(0); }{...} #endif/* ... */ #if I2S_USE_RETENTION_LINK sleep_retention_module_t module = i2s_reg_retention_info[id].retention_module; sleep_retention_module_init_param_t init_param = { .cbs = { .create = { .handle = s_i2s_create_sleep_retention_link_cb, .arg = i2s_obj, }{...}, }{...}, .depends = RETENTION_MODULE_BITMAP_INIT(CLOCK_SYSTEM) }{...}; if (sleep_retention_module_init(module, &init_param) == ESP_OK) { i2s_obj->slp_retention_mod = module; }{...} else { // even the sleep retention module init failed, I2S driver should still work, so just warning here ESP_LOGW(TAG, "init sleep retention failed for I2S%d, power domain may be turned off during sleep", id); }{...} #endif/* ... */ // I2S_USE_RETENTION_LINK }{...} else { free(pre_alloc); portENTER_CRITICAL(&g_i2s.spinlock); if (g_i2s.controller[id]) { i2s_obj = g_i2s.controller[id]; }{...} portEXIT_CRITICAL(&g_i2s.spinlock); if (i2s_obj == NULL) { ESP_LOGE(TAG, "i2s%d might be occupied by other component", id); }{...} }{...} return i2s_obj; }{ ... } static inline bool i2s_take_available_channel(i2s_controller_t *i2s_obj, uint8_t chan_search_mask) { bool is_available = false; #if SOC_I2S_HW_VERSION_1 /* In ESP32 and ESP32-S2, tx channel and rx channel are not totally separated * Take both two channels in case one channel can affect another *//* ... */ chan_search_mask = I2S_DIR_RX | I2S_DIR_TX;/* ... */ #endif portENTER_CRITICAL(&g_i2s.spinlock); if (!(chan_search_mask & i2s_obj->chan_occupancy)) { i2s_obj->chan_occupancy |= chan_search_mask; is_available = true; }{...} portEXIT_CRITICAL(&g_i2s.spinlock); return is_available; }{ ... } static esp_err_t i2s_register_channel(i2s_controller_t *i2s_obj, i2s_dir_t dir, uint32_t desc_num) { I2S_NULL_POINTER_CHECK(TAG, i2s_obj); esp_err_t ret = ESP_OK; i2s_chan_handle_t new_chan = (i2s_chan_handle_t)heap_caps_calloc(1, sizeof(struct i2s_channel_obj_t), I2S_MEM_ALLOC_CAPS); ESP_RETURN_ON_FALSE(new_chan, ESP_ERR_NO_MEM, TAG, "No memory for new channel"); new_chan->mode = I2S_COMM_MODE_NONE; new_chan->role = I2S_ROLE_MASTER; // Set default role to master new_chan->dir = dir; new_chan->state = I2S_CHAN_STATE_REGISTER; #if SOC_I2S_SUPPORTS_APLL new_chan->apll_en = false; #endif new_chan->mode_info = NULL; new_chan->controller = i2s_obj; #if CONFIG_PM_ENABLE new_chan->pm_lock = NULL; // Init in i2s_set_clock according to clock source #endif new_chan->msg_queue = xQueueCreateWithCaps(desc_num - 1, sizeof(uint8_t *), I2S_MEM_ALLOC_CAPS); ESP_GOTO_ON_FALSE(new_chan->msg_queue, ESP_ERR_NO_MEM, err, TAG, "No memory for message queue"); new_chan->mutex = xSemaphoreCreateMutexWithCaps(I2S_MEM_ALLOC_CAPS); ESP_GOTO_ON_FALSE(new_chan->mutex, ESP_ERR_NO_MEM, err, TAG, "No memory for mutex semaphore"); new_chan->binary = xSemaphoreCreateBinaryWithCaps(I2S_MEM_ALLOC_CAPS); ESP_GOTO_ON_FALSE(new_chan->binary, ESP_ERR_NO_MEM, err, TAG, "No memory for binary semaphore"); new_chan->callbacks.on_recv = NULL; new_chan->callbacks.on_recv_q_ovf = NULL; new_chan->callbacks.on_sent = NULL; new_chan->callbacks.on_send_q_ovf = NULL; new_chan->dma.rw_pos = 0; new_chan->dma.curr_ptr = NULL; new_chan->dma.curr_desc = NULL; new_chan->start = NULL; new_chan->stop = NULL; new_chan->reserve_gpio_mask = 0; if (dir == I2S_DIR_TX) { if (i2s_obj->tx_chan) { i2s_del_channel(i2s_obj->tx_chan); }{...} i2s_obj->tx_chan = new_chan; }{...} else { if (i2s_obj->rx_chan) { i2s_del_channel(i2s_obj->rx_chan); }{...} i2s_obj->rx_chan = new_chan; }{...} return ret; err: if (new_chan->msg_queue) { vQueueDeleteWithCaps(new_chan->msg_queue); }{...} if (new_chan->mutex) { vSemaphoreDeleteWithCaps(new_chan->mutex); }{...} if (new_chan->binary) { vSemaphoreDeleteWithCaps(new_chan->binary); }{...} free(new_chan); return ret; }{ ... } #ifndef __cplusplus /* To make sure the i2s_event_callbacks_t is same size as i2s_event_callbacks_internal_t */ _Static_assert(sizeof(i2s_event_callbacks_t) == sizeof(i2s_event_callbacks_internal_t), "Invalid size of i2s_event_callbacks_t structure");/* ... */ #endif esp_err_t i2s_channel_register_event_callback(i2s_chan_handle_t handle, const i2s_event_callbacks_t *callbacks, void *user_data) { I2S_NULL_POINTER_CHECK(TAG, handle); I2S_NULL_POINTER_CHECK(TAG, callbacks); esp_err_t ret = ESP_OK; #if CONFIG_I2S_ISR_IRAM_SAFE if (callbacks->on_recv) { ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_recv), ESP_ERR_INVALID_ARG, TAG, "on_recv callback not in IRAM"); }{...} if (callbacks->on_recv_q_ovf) { ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_recv_q_ovf), ESP_ERR_INVALID_ARG, TAG, "on_recv_q_ovf callback not in IRAM"); }{...} if (callbacks->on_sent) { ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_sent), ESP_ERR_INVALID_ARG, TAG, "on_sent callback not in IRAM"); }{...} if (callbacks->on_send_q_ovf) { ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_send_q_ovf), ESP_ERR_INVALID_ARG, TAG, "on_send_q_ovf callback not in IRAM"); }{...} if (user_data) { ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, TAG, "user context not in internal RAM"); }{...} #endif/* ... */ xSemaphoreTake(handle->mutex, portMAX_DELAY); ESP_GOTO_ON_FALSE(handle->state < I2S_CHAN_STATE_RUNNING, ESP_ERR_INVALID_STATE, err, TAG, "invalid state, I2S has enabled"); memcpy(&(handle->callbacks), callbacks, sizeof(i2s_event_callbacks_t)); handle->user_data = user_data; err: xSemaphoreGive(handle->mutex); return ret; }{ ... } uint32_t i2s_get_buf_size(i2s_chan_handle_t handle, uint32_t data_bit_width, uint32_t dma_frame_num) { uint32_t active_chan = handle->active_slot; #if CONFIG_IDF_TARGET_ESP32 uint32_t bytes_per_sample = ((data_bit_width + 15) / 16) * 2; #else uint32_t bytes_per_sample = (data_bit_width + 7) / 8; #endif // CONFIG_IDF_TARGET_ESP32 uint32_t bytes_per_frame = bytes_per_sample * active_chan; uint32_t bufsize = dma_frame_num * bytes_per_frame; #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE /* bufsize need to align with cache line size */ uint32_t alignment = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA); uint32_t aligned_frame_num = dma_frame_num; /* To make the buffer aligned with the cache line size, search for the ceil aligned size first, If the buffer size exceed the max DMA buffer size, toggle the sign to search for the floor aligned size *//* ... */ for (int sign = 1; bufsize % alignment != 0; aligned_frame_num += sign) { bufsize = aligned_frame_num * bytes_per_frame; /* If the buffer size exceed the max dma size */ if (bufsize > I2S_DMA_BUFFER_MAX_SIZE && sign == 1) { sign = -1; // toggle the search sign aligned_frame_num = dma_frame_num; // Reset the frame num bufsize = aligned_frame_num * bytes_per_frame; // Reset the bufsize }{...} }{...} if (bufsize / bytes_per_frame != dma_frame_num) { ESP_LOGW(TAG, "dma frame num is adjusted to %"PRIu32" to align the dma buffer with %"PRIu32 ", bufsize = %"PRIu32, bufsize / bytes_per_frame, alignment, bufsize); }{...} #endif/* ... */ /* Limit DMA buffer size if it is out of range */ if (bufsize > I2S_DMA_BUFFER_MAX_SIZE) { uint32_t frame_num = I2S_DMA_BUFFER_MAX_SIZE / bytes_per_frame; bufsize = frame_num * bytes_per_frame; ESP_LOGW(TAG, "dma frame num is out of dma buffer size, limited to %"PRIu32, frame_num); }{...} return bufsize; }{ ... } esp_err_t i2s_free_dma_desc(i2s_chan_handle_t handle) { I2S_NULL_POINTER_CHECK(TAG, handle); if (!handle->dma.desc) { return ESP_OK; }{...} for (int i = 0; i < handle->dma.desc_num; i++) { if (handle->dma.bufs[i]) { free(handle->dma.bufs[i]); handle->dma.bufs[i] = NULL; }{...} if (handle->dma.desc[i]) { free(handle->dma.desc[i]); handle->dma.desc[i] = NULL; }{...} }{...} if (handle->dma.bufs) { free(handle->dma.bufs); handle->dma.bufs = NULL; }{...} if (handle->dma.desc) { free(handle->dma.desc); handle->dma.desc = NULL; }{...} return ESP_OK; }{ ... } esp_err_t i2s_alloc_dma_desc(i2s_chan_handle_t handle, uint32_t num, uint32_t bufsize) { I2S_NULL_POINTER_CHECK(TAG, handle); esp_err_t ret = ESP_OK; ESP_RETURN_ON_FALSE(bufsize <= I2S_DMA_BUFFER_MAX_SIZE, ESP_ERR_INVALID_ARG, TAG, "dma buffer can't be bigger than %d", I2S_DMA_BUFFER_MAX_SIZE); handle->dma.desc_num = num; handle->dma.buf_size = bufsize; /* Descriptors must be in the internal RAM */ handle->dma.desc = (lldesc_t **)heap_caps_calloc(num, sizeof(lldesc_t *), I2S_MEM_ALLOC_CAPS); ESP_GOTO_ON_FALSE(handle->dma.desc, ESP_ERR_NO_MEM, err, TAG, "create I2S DMA descriptor array failed"); handle->dma.bufs = (uint8_t **)heap_caps_calloc(num, sizeof(uint8_t *), I2S_MEM_ALLOC_CAPS); for (int i = 0; i < num; i++) { /* Allocate DMA descriptor */ handle->dma.desc[i] = (lldesc_t *) i2s_dma_calloc(handle, 1, sizeof(lldesc_t)); ESP_GOTO_ON_FALSE(handle->dma.desc[i], ESP_ERR_NO_MEM, err, TAG, "allocate DMA description failed"); handle->dma.desc[i]->owner = 1; handle->dma.desc[i]->eof = 1; handle->dma.desc[i]->sosf = 0; handle->dma.desc[i]->length = bufsize; handle->dma.desc[i]->size = bufsize; handle->dma.desc[i]->offset = 0; handle->dma.bufs[i] = (uint8_t *) i2s_dma_calloc(handle, 1, bufsize * sizeof(uint8_t)); ESP_GOTO_ON_FALSE(handle->dma.bufs[i], ESP_ERR_NO_MEM, err, TAG, "allocate DMA buffer failed"); #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE esp_cache_msync(handle->dma.bufs[i], bufsize * sizeof(uint8_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M); #endif handle->dma.desc[i]->buf = handle->dma.bufs[i]; ESP_LOGV(TAG, "desc addr: %8p\tbuffer addr:%8p", handle->dma.desc[i], handle->dma.bufs[i]); }{...} /* Connect DMA descriptor as a circle */ for (int i = 0; i < num; i++) { /* Link to the next descriptor */ STAILQ_NEXT(handle->dma.desc[i], qe) = (i < (num - 1)) ? (handle->dma.desc[i + 1]) : handle->dma.desc[0]; #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE esp_cache_msync(handle->dma.desc[i], sizeof(lldesc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED); #endif }{...} if (handle->dir == I2S_DIR_RX) { i2s_ll_rx_set_eof_num(handle->controller->hal.dev, bufsize); }{...} ESP_LOGD(TAG, "DMA malloc info: dma_desc_num = %"PRIu32", dma_desc_buf_size = dma_frame_num * slot_num * data_bit_width = %"PRIu32, num, bufsize); return ESP_OK; err: i2s_free_dma_desc(handle); return ret; }{ ... } #if SOC_I2S_SUPPORTS_APLL static uint32_t i2s_set_get_apll_freq(uint32_t mclk_freq_hz) { /* Calculate the expected APLL */ int mclk_div = (int)((CLK_LL_APLL_MIN_HZ / mclk_freq_hz) + 1); /* apll_freq = mclk * div * when div = 1, hardware will still divide 2 * when div = 0, the final mclk will be unpredictable * So the div here should be at least 2 *//* ... */ mclk_div = mclk_div < 2 ? 2 : mclk_div; uint32_t expt_freq = mclk_freq_hz * mclk_div; if (expt_freq > CLK_LL_APLL_MAX_HZ) { ESP_LOGE(TAG, "The required APLL frequency exceed its maximum value"); return 0; }{...} uint32_t real_freq = 0; esp_err_t ret = periph_rtc_apll_freq_set(expt_freq, &real_freq); if (ret == ESP_ERR_INVALID_ARG) { ESP_LOGE(TAG, "set APLL freq failed due to invalid argument"); return 0; }{...} if (ret == ESP_ERR_INVALID_STATE) { ESP_LOGW(TAG, "APLL is occupied already, it is working at %"PRIu32" Hz while the expected frequency is %"PRIu32" Hz", real_freq, expt_freq); ESP_LOGW(TAG, "Trying to work at %"PRIu32" Hz...", real_freq); }{...} ESP_LOGD(TAG, "APLL expected frequency is %"PRIu32" Hz, real frequency is %"PRIu32" Hz", expt_freq, real_freq); return real_freq; }{ ... } /* ... */#endif uint32_t i2s_get_source_clk_freq(i2s_clock_src_t clk_src, uint32_t mclk_freq_hz) { uint32_t clk_freq = 0; #if SOC_I2S_SUPPORTS_APLL if (clk_src == I2S_CLK_SRC_APLL) { return i2s_set_get_apll_freq(mclk_freq_hz); }{...} #endif/* ... */ esp_clk_tree_src_get_freq_hz(clk_src, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &clk_freq); return clk_freq; }{ ... } /* Temporary ignore the deprecated warning of i2s_event_data_t::data */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #if SOC_GDMA_SUPPORTED static bool IRAM_ATTR i2s_dma_rx_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data) { i2s_chan_handle_t handle = (i2s_chan_handle_t)user_data; BaseType_t need_yield1 = 0; BaseType_t need_yield2 = 0; BaseType_t user_need_yield = 0; lldesc_t *finish_desc; uint32_t dummy; finish_desc = (lldesc_t *)event_data->rx_eof_desc_addr; #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE esp_cache_msync((void *)finish_desc->buf, handle->dma.buf_size, ESP_CACHE_MSYNC_FLAG_INVALIDATE); #endif i2s_event_data_t evt = { .data = &(finish_desc->buf), .dma_buf = (void *)finish_desc->buf, .size = handle->dma.buf_size, }{...}; if (handle->callbacks.on_recv) { user_need_yield |= handle->callbacks.on_recv(handle, &evt, handle->user_data); }{...} if (xQueueIsQueueFullFromISR(handle->msg_queue)) { xQueueReceiveFromISR(handle->msg_queue, &dummy, &need_yield1); if (handle->callbacks.on_recv_q_ovf) { evt.data = NULL; user_need_yield |= handle->callbacks.on_recv_q_ovf(handle, &evt, handle->user_data); }{...} }{...} xQueueSendFromISR(handle->msg_queue, &(finish_desc->buf), &need_yield2); return need_yield1 | need_yield2 | user_need_yield; }{...} static bool IRAM_ATTR i2s_dma_tx_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data) { i2s_chan_handle_t handle = (i2s_chan_handle_t)user_data; BaseType_t need_yield1 = 0; BaseType_t need_yield2 = 0; BaseType_t user_need_yield = 0; lldesc_t *finish_desc; uint32_t dummy; finish_desc = (lldesc_t *)event_data->tx_eof_desc_addr; void *curr_buf = (void *)finish_desc->buf; i2s_event_data_t evt = { .data = &(finish_desc->buf), .dma_buf = curr_buf, .size = handle->dma.buf_size, }{...}; if (handle->dma.auto_clear_before_cb) { memset(curr_buf, 0, handle->dma.buf_size); }{...} if (handle->callbacks.on_sent) { user_need_yield |= handle->callbacks.on_sent(handle, &evt, handle->user_data); }{...} #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE /* Sync buffer after the callback in case users update the buffer in the callback */ if (handle->dma.auto_clear_before_cb || handle->callbacks.on_sent) { esp_cache_msync(curr_buf, handle->dma.buf_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M); }{...} /* ... */#endif if (xQueueIsQueueFullFromISR(handle->msg_queue)) { xQueueReceiveFromISR(handle->msg_queue, &dummy, &need_yield1); if (handle->callbacks.on_send_q_ovf) { evt.data = NULL; evt.dma_buf = NULL; user_need_yield |= handle->callbacks.on_send_q_ovf(handle, &evt, handle->user_data); }{...} }{...} if (handle->dma.auto_clear_after_cb) { memset(curr_buf, 0, handle->dma.buf_size); #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE esp_cache_msync(curr_buf, handle->dma.buf_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M); #endif }{...} xQueueSendFromISR(handle->msg_queue, &(finish_desc->buf), &need_yield2); return need_yield1 | need_yield2 | user_need_yield; }{...} /* ... */ #else static void IRAM_ATTR i2s_dma_rx_callback(void *arg) { BaseType_t need_yield1 = 0; BaseType_t need_yield2 = 0; BaseType_t user_need_yield = 0; lldesc_t *finish_desc = NULL; i2s_event_data_t evt; i2s_chan_handle_t handle = (i2s_chan_handle_t)arg; uint32_t dummy; uint32_t status = i2s_hal_get_intr_status(&(handle->controller->hal)); i2s_hal_clear_intr_status(&(handle->controller->hal), status); if (!status) { return; }{...} if (handle && (status & I2S_LL_EVENT_RX_EOF)) { i2s_hal_get_in_eof_des_addr(&(handle->controller->hal), (uint32_t *)&finish_desc); evt.data = &(finish_desc->buf); evt.dma_buf = (void *)finish_desc->buf; evt.size = handle->dma.buf_size; if (handle->callbacks.on_recv) { user_need_yield |= handle->callbacks.on_recv(handle, &evt, handle->user_data); }{...} if (xQueueIsQueueFullFromISR(handle->msg_queue)) { xQueueReceiveFromISR(handle->msg_queue, &dummy, &need_yield1); if (handle->callbacks.on_recv_q_ovf) { evt.data = NULL; evt.dma_buf = NULL; user_need_yield |= handle->callbacks.on_recv_q_ovf(handle, &evt, handle->user_data); }{...} }{...} xQueueSendFromISR(handle->msg_queue, &(finish_desc->buf), &need_yield2); }{...} if (need_yield1 || need_yield2 || user_need_yield) { portYIELD_FROM_ISR(); }{...} }{ ... } static void IRAM_ATTR i2s_dma_tx_callback(void *arg) { BaseType_t need_yield1 = 0; BaseType_t need_yield2 = 0; BaseType_t user_need_yield = 0; lldesc_t *finish_desc = NULL; i2s_event_data_t evt; i2s_chan_handle_t handle = (i2s_chan_handle_t)arg; uint32_t dummy; uint32_t status = i2s_hal_get_intr_status(&(handle->controller->hal)); i2s_hal_clear_intr_status(&(handle->controller->hal), status); if (!status) { return; }{...} if (handle && (status & I2S_LL_EVENT_TX_EOF)) { i2s_hal_get_out_eof_des_addr(&(handle->controller->hal), (uint32_t *)&finish_desc); void *curr_buf = (void *)finish_desc->buf; evt.data = &(finish_desc->buf); evt.dma_buf = curr_buf; evt.size = handle->dma.buf_size; // Auto clear the dma buffer before data sent if (handle->dma.auto_clear_before_cb) { memset(curr_buf, 0, handle->dma.buf_size); }{...} if (handle->callbacks.on_sent) { user_need_yield |= handle->callbacks.on_sent(handle, &evt, handle->user_data); }{...} if (xQueueIsQueueFullFromISR(handle->msg_queue)) { xQueueReceiveFromISR(handle->msg_queue, &dummy, &need_yield1); if (handle->callbacks.on_send_q_ovf) { evt.data = NULL; user_need_yield |= handle->callbacks.on_send_q_ovf(handle, &evt, handle->user_data); }{...} }{...} // Auto clear the dma buffer after data sent if (handle->dma.auto_clear_after_cb) { memset(curr_buf, 0, handle->dma.buf_size); }{...} xQueueSendFromISR(handle->msg_queue, &(finish_desc->buf), &need_yield2); }{...} if (need_yield1 || need_yield2 || user_need_yield) { portYIELD_FROM_ISR(); }{...} }{ ... } #endif/* ... */ #pragma GCC diagnostic pop /** * @brief I2S DMA interrupt initialization * @note I2S will use GDMA if chip supports, and the interrupt is triggered by GDMA. * * @param handle I2S channel handle * @param intr_flag Interrupt allocation flag * @return * - ESP_OK I2S DMA interrupt initialize success * - ESP_ERR_NOT_FOUND GDMA channel not found * - ESP_ERR_INVALID_ARG Invalid arguments * - ESP_ERR_INVALID_STATE GDMA state error *//* ... */ esp_err_t i2s_init_dma_intr(i2s_chan_handle_t handle, int intr_flag) { esp_err_t ret = ESP_OK; i2s_port_t port_id = handle->controller->id; ESP_RETURN_ON_FALSE((port_id >= 0) && (port_id < SOC_I2S_NUM), ESP_ERR_INVALID_ARG, TAG, "invalid handle"); #if SOC_GDMA_SUPPORTED /* Set GDMA trigger module */ gdma_trigger_t trig = {.periph = GDMA_TRIG_PERIPH_I2S}; switch (port_id) { #if SOC_I2S_NUM > 2 case I2S_NUM_2: trig.instance_id = SOC_GDMA_TRIG_PERIPH_I2S2; break;/* ... */ #endif #if SOC_I2S_NUM > 1 case I2S_NUM_1: trig.instance_id = SOC_GDMA_TRIG_PERIPH_I2S1; break;/* ... */ #endif case I2S_NUM_0: trig.instance_id = SOC_GDMA_TRIG_PERIPH_I2S0; break;... default: ESP_LOGE(TAG, "Unsupported I2S port number"); return ESP_ERR_NOT_SUPPORTED;... }{...} /* Set GDMA config */ gdma_channel_alloc_config_t dma_cfg = {}; if (handle->dir == I2S_DIR_TX) { dma_cfg.direction = GDMA_CHANNEL_DIRECTION_TX; /* Register a new GDMA tx channel */ ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_cfg, &handle->dma.dma_chan), TAG, "Register tx dma channel error"); ESP_GOTO_ON_ERROR(gdma_connect(handle->dma.dma_chan, trig), err1, TAG, "Connect tx dma channel error"); gdma_tx_event_callbacks_t cb = {.on_trans_eof = i2s_dma_tx_callback}; /* Set callback function for GDMA, the interrupt is triggered by GDMA, then the GDMA ISR will call the callback function */ ESP_GOTO_ON_ERROR(gdma_register_tx_event_callbacks(handle->dma.dma_chan, &cb, handle), err2, TAG, "Register tx callback failed"); }{...} else { dma_cfg.direction = GDMA_CHANNEL_DIRECTION_RX; /* Register a new GDMA rx channel */ ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_cfg, &handle->dma.dma_chan), TAG, "Register rx dma channel error"); ESP_GOTO_ON_ERROR(gdma_connect(handle->dma.dma_chan, trig), err1, TAG, "Connect rx dma channel error"); gdma_rx_event_callbacks_t cb = {.on_recv_eof = i2s_dma_rx_callback}; /* Set callback function for GDMA, the interrupt is triggered by GDMA, then the GDMA ISR will call the callback function */ ESP_GOTO_ON_ERROR(gdma_register_rx_event_callbacks(handle->dma.dma_chan, &cb, handle), err2, TAG, "Register rx callback failed"); }{...} #else/* ... */ intr_flag |= handle->intr_prio_flags; /* Initialize I2S module interrupt */ if (handle->dir == I2S_DIR_TX) { esp_intr_alloc_intrstatus(i2s_periph_signal[port_id].irq, intr_flag, (uint32_t)i2s_ll_get_interrupt_status_reg(handle->controller->hal.dev), I2S_LL_TX_EVENT_MASK, i2s_dma_tx_callback, handle, &handle->dma.dma_chan); }{...} else { esp_intr_alloc_intrstatus(i2s_periph_signal[port_id].irq, intr_flag, (uint32_t)i2s_ll_get_interrupt_status_reg(handle->controller->hal.dev), I2S_LL_RX_EVENT_MASK, i2s_dma_rx_callback, handle, &handle->dma.dma_chan); }{...} /* Start DMA */ i2s_ll_enable_dma(handle->controller->hal.dev, true);/* ... */ #endif // SOC_GDMA_SUPPORTED return ret; #if SOC_GDMA_SUPPORTED err2: gdma_disconnect(handle->dma.dma_chan); err1: gdma_del_channel(handle->dma.dma_chan); handle->dma.dma_chan = NULL; return ret;/* ... */ #endif }{ ... } static uint64_t s_i2s_get_pair_chan_gpio_mask(i2s_chan_handle_t handle) { if (handle->dir == I2S_DIR_TX) { return handle->controller->rx_chan ? handle->controller->rx_chan->reserve_gpio_mask : 0; }{...} return handle->controller->tx_chan ? handle->controller->tx_chan->reserve_gpio_mask : 0; }{ ... } void i2s_output_gpio_reserve(i2s_chan_handle_t handle, int gpio_num) { bool used_by_pair_chan = false; /* If the gpio is used by the pair channel do not show warning for this case */ if (handle->controller->full_duplex) { used_by_pair_chan = !!(s_i2s_get_pair_chan_gpio_mask(handle) & BIT64(gpio_num)); }{...} /* reserve the GPIO output path, because we don't expect another peripheral to signal to the same GPIO */ if (!used_by_pair_chan && (esp_gpio_reserve(BIT64(gpio_num)) & BIT64(gpio_num))) { ESP_LOGW(TAG, "GPIO %d is not usable, maybe conflict with others", gpio_num); }{...} handle->reserve_gpio_mask |= BIT64(gpio_num); }{ ... } void i2s_output_gpio_revoke(i2s_chan_handle_t handle, uint64_t gpio_mask) { uint64_t revoke_mask = gpio_mask; /* If the gpio is used by the pair channel do not show warning for this case */ if (handle->controller->full_duplex) { uint64_t pair_chan_gpio_mask = s_i2s_get_pair_chan_gpio_mask(handle); /* Only revoke the gpio which is not used by the pair channel */ revoke_mask = (pair_chan_gpio_mask ^ gpio_mask) & gpio_mask; }{...} esp_gpio_revoke(revoke_mask); handle->reserve_gpio_mask &= ~gpio_mask; }{ ... } void i2s_gpio_check_and_set(i2s_chan_handle_t handle, int gpio, uint32_t signal_idx, bool is_input, bool is_invert) { /* Ignore the pin if pin = I2S_GPIO_UNUSED */ if (gpio != (int)I2S_GPIO_UNUSED) { gpio_func_sel(gpio, PIN_FUNC_GPIO); if (is_input) { /* Set direction, for some GPIOs, the input function are not enabled as default */ gpio_set_direction(gpio, GPIO_MODE_INPUT); esp_rom_gpio_connect_in_signal(gpio, signal_idx, is_invert); }{...} else { i2s_output_gpio_reserve(handle, gpio); gpio_set_direction(gpio, GPIO_MODE_OUTPUT); esp_rom_gpio_connect_out_signal(gpio, signal_idx, is_invert, 0); }{...} }{...} }{ ... } void i2s_gpio_loopback_set(i2s_chan_handle_t handle, int gpio, uint32_t out_sig_idx, uint32_t in_sig_idx) { if (gpio != (int)I2S_GPIO_UNUSED) { i2s_output_gpio_reserve(handle, gpio); gpio_func_sel(gpio, PIN_FUNC_GPIO); gpio_set_direction(gpio, GPIO_MODE_INPUT_OUTPUT); esp_rom_gpio_connect_out_signal(gpio, out_sig_idx, 0, 0); esp_rom_gpio_connect_in_signal(gpio, in_sig_idx, 0); }{...} }{ ... } esp_err_t i2s_check_set_mclk(i2s_chan_handle_t handle, i2s_port_t id, int gpio_num, i2s_clock_src_t clk_src, bool is_invert) { if (gpio_num == (int)I2S_GPIO_UNUSED) { return ESP_OK; }{...} #if CONFIG_IDF_TARGET_ESP32 bool is_i2s0 = id == I2S_NUM_0; bool is_apll = clk_src == I2S_CLK_SRC_APLL; if (g_i2s.controller[id]->mclk_out_hdl == NULL) { i2s_output_gpio_reserve(handle, gpio_num); soc_clkout_sig_id_t clkout_sig = is_apll ? CLKOUT_SIG_APLL : (is_i2s0 ? CLKOUT_SIG_I2S0 : CLKOUT_SIG_I2S1); ESP_RETURN_ON_ERROR(esp_clock_output_start(clkout_sig, gpio_num, &(g_i2s.controller[id]->mclk_out_hdl)), TAG, "mclk configure failed"); }{...} #else/* ... */ ESP_RETURN_ON_FALSE(GPIO_IS_VALID_GPIO(gpio_num), ESP_ERR_INVALID_ARG, TAG, "mck_io_num invalid"); #if SOC_I2S_HW_VERSION_2 if (clk_src == I2S_CLK_SRC_EXTERNAL) { i2s_gpio_check_and_set(handle, gpio_num, i2s_periph_signal[id].mck_in_sig, true, is_invert); }{...} else #endif // SOC_I2S_HW_VERSION_2 { i2s_gpio_check_and_set(handle, gpio_num, i2s_periph_signal[id].mck_out_sig, false, is_invert); }{...} #endif/* ... */ // CONFIG_IDF_TARGET_ESP32 ESP_LOGD(TAG, "MCLK is pinned to GPIO%d on I2S%d", gpio_num, id); return ESP_OK; }{ ... } /*--------------------------------------------------------------------------- I2S bus Public APIs ---------------------------------------------------------------------------- Scope: Public ----------------------------------------------------------------------------*//* ... */ esp_err_t i2s_new_channel(const i2s_chan_config_t *chan_cfg, i2s_chan_handle_t *tx_handle, i2s_chan_handle_t *rx_handle) { #if CONFIG_I2S_ENABLE_DEBUG_LOG esp_log_level_set(TAG, ESP_LOG_DEBUG); #endif /* Parameter validity check */ I2S_NULL_POINTER_CHECK(TAG, chan_cfg); I2S_NULL_POINTER_CHECK(TAG, tx_handle || rx_handle); ESP_RETURN_ON_FALSE(chan_cfg->id < SOC_I2S_NUM || chan_cfg->id == I2S_NUM_AUTO, ESP_ERR_INVALID_ARG, TAG, "invalid I2S port id"); ESP_RETURN_ON_FALSE(chan_cfg->dma_desc_num >= 2, ESP_ERR_INVALID_ARG, TAG, "there should be at least 2 DMA buffers"); ESP_RETURN_ON_FALSE(chan_cfg->intr_priority >= 0 && chan_cfg->intr_priority <= 7, ESP_ERR_INVALID_ARG, TAG, "intr_priority should be within 0~7"); #if !SOC_I2S_SUPPORT_SLEEP_RETENTION ESP_RETURN_ON_FALSE(!chan_cfg->allow_pd, ESP_ERR_NOT_SUPPORTED, TAG, "register back up is not supported"); #endif esp_err_t ret = ESP_OK; i2s_controller_t *i2s_obj = NULL; i2s_port_t id = chan_cfg->id; bool channel_found = false; uint8_t chan_search_mask = 0; chan_search_mask |= tx_handle ? I2S_DIR_TX : 0; chan_search_mask |= rx_handle ? I2S_DIR_RX : 0; /* Channel will be registered to one i2s port automatically if id is I2S_NUM_AUTO * Otherwise, the channel will be registered to the specific port. *//* ... */ if (id == I2S_NUM_AUTO) { for (int i = 0; i < SOC_I2S_NUM && !channel_found; i++) { i2s_obj = i2s_acquire_controller_obj(i); if (!i2s_obj) { continue; }{...} channel_found = i2s_take_available_channel(i2s_obj, chan_search_mask); }{...} ESP_RETURN_ON_FALSE(i2s_obj, ESP_ERR_NOT_FOUND, TAG, "get i2s object failed"); }{...} else { i2s_obj = i2s_acquire_controller_obj(id); ESP_RETURN_ON_FALSE(i2s_obj, ESP_ERR_NOT_FOUND, TAG, "get i2s object failed"); channel_found = i2s_take_available_channel(i2s_obj, chan_search_mask); }{...} ESP_GOTO_ON_FALSE(channel_found, ESP_ERR_NOT_FOUND, err, TAG, "no available channel found"); /* Register and specify the tx handle */ if (tx_handle) { ESP_GOTO_ON_ERROR(i2s_register_channel(i2s_obj, I2S_DIR_TX, chan_cfg->dma_desc_num), err, TAG, "register I2S tx channel failed"); i2s_obj->tx_chan->role = chan_cfg->role; i2s_obj->tx_chan->intr_prio_flags = chan_cfg->intr_priority ? BIT(chan_cfg->intr_priority) : ESP_INTR_FLAG_LOWMED; i2s_obj->tx_chan->dma.auto_clear_after_cb = chan_cfg->auto_clear_after_cb; i2s_obj->tx_chan->dma.auto_clear_before_cb = chan_cfg->auto_clear_before_cb; i2s_obj->tx_chan->dma.desc_num = chan_cfg->dma_desc_num; i2s_obj->tx_chan->dma.frame_num = chan_cfg->dma_frame_num; i2s_obj->tx_chan->start = i2s_tx_channel_start; i2s_obj->tx_chan->stop = i2s_tx_channel_stop; *tx_handle = i2s_obj->tx_chan; ESP_LOGD(TAG, "tx channel is registered on I2S%d successfully", i2s_obj->id); }{...} /* Register and specify the rx handle */ if (rx_handle) { ESP_GOTO_ON_ERROR(i2s_register_channel(i2s_obj, I2S_DIR_RX, chan_cfg->dma_desc_num), err, TAG, "register I2S rx channel failed"); i2s_obj->rx_chan->role = chan_cfg->role; i2s_obj->rx_chan->intr_prio_flags = chan_cfg->intr_priority ? BIT(chan_cfg->intr_priority) : ESP_INTR_FLAG_LOWMED; i2s_obj->rx_chan->dma.desc_num = chan_cfg->dma_desc_num; i2s_obj->rx_chan->dma.frame_num = chan_cfg->dma_frame_num; i2s_obj->rx_chan->start = i2s_rx_channel_start; i2s_obj->rx_chan->stop = i2s_rx_channel_stop; *rx_handle = i2s_obj->rx_chan; ESP_LOGD(TAG, "rx channel is registered on I2S%d successfully", i2s_obj->id); }{...} if ((tx_handle != NULL) && (rx_handle != NULL)) { i2s_obj->full_duplex = true; }{...} #if I2S_USE_RETENTION_LINK if (chan_cfg->allow_pd) { s_i2s_create_retention_module(i2s_obj); }{...} #endif/* ... */ return ESP_OK; /* i2s_obj allocated but register channel failed */ err: /* if the controller object has no channel, find the corresponding global object and destroy it */ if (i2s_obj != NULL && i2s_obj->rx_chan == NULL && i2s_obj->tx_chan == NULL) { for (int i = 0; i < SOC_I2S_NUM; i++) { if (i2s_obj == g_i2s.controller[i]) { i2s_destroy_controller_obj(&g_i2s.controller[i]); break; }{...} }{...} }{...} return ret; }{ ... } esp_err_t i2s_del_channel(i2s_chan_handle_t handle) { I2S_NULL_POINTER_CHECK(TAG, handle); ESP_RETURN_ON_FALSE(handle->state < I2S_CHAN_STATE_RUNNING, ESP_ERR_INVALID_STATE, TAG, "the channel can't be deleted unless it is disabled"); i2s_controller_t *i2s_obj = handle->controller; int __attribute__((unused)) id = i2s_obj->id; i2s_dir_t __attribute__((unused)) dir = handle->dir; bool is_bound = true; #if SOC_I2S_HW_VERSION_2 I2S_CLOCK_SRC_ATOMIC() { if (dir == I2S_DIR_TX) { i2s_ll_tx_disable_clock(handle->controller->hal.dev); }{...} else { i2s_ll_rx_disable_clock(handle->controller->hal.dev); }{...} }{...} #endif/* ... */ #if SOC_I2S_SUPPORTS_APLL if (handle->apll_en) { /* Must switch back to D2CLK on ESP32-S2, * because the clock of some registers are bound to APLL, * otherwise, once APLL is disabled, the registers can't be updated anymore *//* ... */ I2S_CLOCK_SRC_ATOMIC() { if (handle->dir == I2S_DIR_TX) { i2s_ll_tx_clk_set_src(handle->controller->hal.dev, I2S_CLK_SRC_DEFAULT); }{...} else { i2s_ll_rx_clk_set_src(handle->controller->hal.dev, I2S_CLK_SRC_DEFAULT); }{...} }{...} periph_rtc_apll_release(); }{...} #endif/* ... */ #if CONFIG_PM_ENABLE if (handle->pm_lock) { esp_pm_lock_delete(handle->pm_lock); }{...} #endif/* ... */ if (handle->reserve_gpio_mask) { i2s_output_gpio_revoke(handle, handle->reserve_gpio_mask); }{...} if (handle->mode_info) { free(handle->mode_info); }{...} if (handle->dma.desc) { i2s_free_dma_desc(handle); }{...} if (handle->msg_queue) { vQueueDeleteWithCaps(handle->msg_queue); }{...} if (handle->mutex) { vSemaphoreDeleteWithCaps(handle->mutex); }{...} if (handle->binary) { vSemaphoreDeleteWithCaps(handle->binary); }{...} #if SOC_I2S_HW_VERSION_1 i2s_obj->chan_occupancy = 0; #else i2s_obj->chan_occupancy &= ~(uint32_t)dir; #endif if (handle->dma.dma_chan) { #if SOC_GDMA_SUPPORTED gdma_disconnect(handle->dma.dma_chan); gdma_del_channel(handle->dma.dma_chan);/* ... */ #else esp_intr_free(handle->dma.dma_chan); #endif }{...} if (handle == i2s_obj->tx_chan) { free(i2s_obj->tx_chan); i2s_obj->tx_chan = NULL; i2s_obj->full_duplex = false; }{...} else if (handle == i2s_obj->rx_chan) { free(i2s_obj->rx_chan); i2s_obj->rx_chan = NULL; i2s_obj->full_duplex = false; }{...} else { /* Indicate the delete channel is an unbound free channel */ is_bound = false; free(handle); }{...} /* If the delete channel was bound to a controller before, we need to destroy this controller object if there is no channel any more *//* ... */ if (is_bound) { if (!(i2s_obj->tx_chan) && !(i2s_obj->rx_chan)) { i2s_destroy_controller_obj(&g_i2s.controller[i2s_obj->id]); }{...} ESP_LOGD(TAG, "%s channel on I2S%d deleted", dir == I2S_DIR_TX ? "tx" : "rx", id); }{...} return ESP_OK; }{ ... } esp_err_t i2s_channel_get_info(i2s_chan_handle_t handle, i2s_chan_info_t *chan_info) { I2S_NULL_POINTER_CHECK(TAG, handle); I2S_NULL_POINTER_CHECK(TAG, chan_info); /* Find whether the handle is a registered i2s handle or still available */ for (int i = 0; i < SOC_I2S_NUM; i++) { if (g_i2s.controller[i] != NULL) { if (g_i2s.controller[i]->tx_chan == handle || g_i2s.controller[i]->rx_chan == handle) { goto found; }{...} }{...} }{...} return ESP_ERR_NOT_FOUND; found: /* Assign the handle information */ xSemaphoreTake(handle->mutex, portMAX_DELAY); chan_info->id = handle->controller->id; chan_info->dir = handle->dir; chan_info->role = handle->role; chan_info->mode = handle->mode; chan_info->total_dma_buf_size = handle->state >= I2S_CHAN_STATE_READY ? handle->dma.desc_num * handle->dma.buf_size : 0; if (handle->controller->full_duplex) { if (handle->dir == I2S_DIR_TX) { chan_info->pair_chan = handle->controller->rx_chan; }{...} else { chan_info->pair_chan = handle->controller->tx_chan; }{...} }{...} else { chan_info->pair_chan = NULL; }{...} xSemaphoreGive(handle->mutex); return ESP_OK; }{ ... } esp_err_t i2s_channel_enable(i2s_chan_handle_t handle) { I2S_NULL_POINTER_CHECK(TAG, handle); esp_err_t ret = ESP_OK; xSemaphoreTake(handle->mutex, portMAX_DELAY); ESP_GOTO_ON_FALSE(handle->state == I2S_CHAN_STATE_READY, ESP_ERR_INVALID_STATE, err, TAG, "the channel has already enabled or not initialized"); #if CONFIG_PM_ENABLE esp_pm_lock_acquire(handle->pm_lock); #endif handle->start(handle); handle->state = I2S_CHAN_STATE_RUNNING; /* Reset queue */ xQueueReset(handle->msg_queue); xSemaphoreGive(handle->mutex); /* Give the binary semaphore to enable reading / writing task */ xSemaphoreGive(handle->binary); ESP_LOGD(TAG, "i2s %s channel enabled", handle->dir == I2S_DIR_TX ? "tx" : "rx"); return ret; err: xSemaphoreGive(handle->mutex); return ret; }{ ... } esp_err_t i2s_channel_disable(i2s_chan_handle_t handle) { I2S_NULL_POINTER_CHECK(TAG, handle); esp_err_t ret = ESP_OK; xSemaphoreTake(handle->mutex, portMAX_DELAY); ESP_GOTO_ON_FALSE(handle->state > I2S_CHAN_STATE_READY, ESP_ERR_INVALID_STATE, err, TAG, "the channel has not been enabled yet"); /* Update the state to force quit the current reading/writing operation */ handle->state = I2S_CHAN_STATE_READY; /* Waiting for reading/wrinting operation quit * It should be acquired before assigning the pointer to NULL, * otherwise may cause NULL pointer panic while reading/writing threads haven't release the lock *//* ... */ xSemaphoreTake(handle->binary, portMAX_DELAY); /* Reset the descriptor pointer */ handle->dma.curr_ptr = NULL; handle->dma.curr_desc = NULL; handle->dma.rw_pos = 0; handle->stop(handle); #if CONFIG_PM_ENABLE esp_pm_lock_release(handle->pm_lock); #endif xSemaphoreGive(handle->mutex); ESP_LOGD(TAG, "i2s %s channel disabled", handle->dir == I2S_DIR_TX ? "tx" : "rx"); return ret; err: xSemaphoreGive(handle->mutex); return ret; }{ ... } esp_err_t i2s_channel_preload_data(i2s_chan_handle_t tx_handle, const void *src, size_t size, size_t *bytes_loaded) { I2S_NULL_POINTER_CHECK(TAG, tx_handle); ESP_RETURN_ON_FALSE(tx_handle->dir == I2S_DIR_TX, ESP_ERR_INVALID_ARG, TAG, "this channel is not tx channel"); ESP_RETURN_ON_FALSE(tx_handle->state == I2S_CHAN_STATE_READY, ESP_ERR_INVALID_STATE, TAG, "data can only be preloaded when the channel is READY"); uint8_t *data_ptr = (uint8_t *)src; size_t remain_bytes = size; size_t total_loaded_bytes = 0; xSemaphoreTake(tx_handle->mutex, portMAX_DELAY); /* The pre-load data will be loaded from the first descriptor */ if (tx_handle->dma.curr_desc == NULL) { tx_handle->dma.curr_desc = tx_handle->dma.desc[0]; tx_handle->dma.curr_ptr = (void *)tx_handle->dma.desc[0]->buf; tx_handle->dma.rw_pos = 0; }{...} /* Loop until no bytes in source buff remain or the descriptors are full */ while (remain_bytes) { size_t bytes_can_load = remain_bytes > (tx_handle->dma.buf_size - tx_handle->dma.rw_pos) ? (tx_handle->dma.buf_size - tx_handle->dma.rw_pos) : remain_bytes; /* When all the descriptors has loaded data, no more bytes can be loaded, break directly */ if (bytes_can_load == 0) { break; }{...} /* Load the data from the last loaded position */ memcpy((uint8_t *)(tx_handle->dma.curr_ptr + tx_handle->dma.rw_pos), data_ptr, bytes_can_load); #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE esp_cache_msync(tx_handle->dma.curr_ptr, tx_handle->dma.buf_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M); #endif data_ptr += bytes_can_load; // Move forward the data pointer total_loaded_bytes += bytes_can_load; // Add to the total loaded bytes remain_bytes -= bytes_can_load; // Update the remaining bytes to be loaded tx_handle->dma.rw_pos += bytes_can_load; // Move forward the dma buffer position /* When the current position reach the end of the dma buffer */ if (tx_handle->dma.rw_pos == tx_handle->dma.buf_size) { /* If the next descriptor is not the first descriptor, keep load to the first descriptor * otherwise all descriptor has been loaded, break directly, the dma buffer position * will remain at the end of the last dma buffer *//* ... */ if (STAILQ_NEXT((lldesc_t *)tx_handle->dma.curr_desc, qe) != tx_handle->dma.desc[0]) { tx_handle->dma.curr_desc = STAILQ_NEXT((lldesc_t *)tx_handle->dma.curr_desc, qe); tx_handle->dma.curr_ptr = (void *)(((lldesc_t *)tx_handle->dma.curr_desc)->buf); tx_handle->dma.rw_pos = 0; }{...} else { break; }{...} }{...} }{...} *bytes_loaded = total_loaded_bytes; xSemaphoreGive(tx_handle->mutex); return ESP_OK; }{ ... } esp_err_t i2s_channel_write(i2s_chan_handle_t handle, const void *src, size_t size, size_t *bytes_written, uint32_t timeout_ms) { I2S_NULL_POINTER_CHECK(TAG, handle); ESP_RETURN_ON_FALSE(handle->dir == I2S_DIR_TX, ESP_ERR_INVALID_ARG, TAG, "this channel is not tx channel"); esp_err_t ret = ESP_OK; char *data_ptr; char *src_byte; size_t bytes_can_write; if (bytes_written) { *bytes_written = 0; }{...} /* The binary semaphore can only be taken when the channel has been enabled and no other writing operation in progress */ ESP_RETURN_ON_FALSE(xSemaphoreTake(handle->binary, pdMS_TO_TICKS(timeout_ms)) == pdTRUE, ESP_ERR_INVALID_STATE, TAG, "The channel is not enabled"); src_byte = (char *)src; while (size > 0 && handle->state == I2S_CHAN_STATE_RUNNING) { if (handle->dma.rw_pos == handle->dma.buf_size || handle->dma.curr_ptr == NULL) { if (xQueueReceive(handle->msg_queue, &(handle->dma.curr_ptr), pdMS_TO_TICKS(timeout_ms)) == pdFALSE) { ret = ESP_ERR_TIMEOUT; break; }{...} handle->dma.rw_pos = 0; }{...} data_ptr = (char *)handle->dma.curr_ptr; data_ptr += handle->dma.rw_pos; bytes_can_write = handle->dma.buf_size - handle->dma.rw_pos; if (bytes_can_write > size) { bytes_can_write = size; }{...} memcpy(data_ptr, src_byte, bytes_can_write); #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE esp_cache_msync(handle->dma.curr_ptr, handle->dma.buf_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M); #endif size -= bytes_can_write; src_byte += bytes_can_write; handle->dma.rw_pos += bytes_can_write; if (bytes_written) { (*bytes_written) += bytes_can_write; }{...} }{...} xSemaphoreGive(handle->binary); return ret; }{ ... } esp_err_t i2s_channel_read(i2s_chan_handle_t handle, void *dest, size_t size, size_t *bytes_read, uint32_t timeout_ms) { I2S_NULL_POINTER_CHECK(TAG, handle); ESP_RETURN_ON_FALSE(handle->dir == I2S_DIR_RX, ESP_ERR_INVALID_ARG, TAG, "this channel is not rx channel"); esp_err_t ret = ESP_OK; uint8_t *data_ptr; uint8_t *dest_byte; int bytes_can_read; if (bytes_read) { *bytes_read = 0; }{...} dest_byte = (uint8_t *)dest; /* The binary semaphore can only be taken when the channel has been enabled and no other reading operation in progress */ ESP_RETURN_ON_FALSE(xSemaphoreTake(handle->binary, pdMS_TO_TICKS(timeout_ms)) == pdTRUE, ESP_ERR_INVALID_STATE, TAG, "The channel is not enabled"); while (size > 0 && handle->state == I2S_CHAN_STATE_RUNNING) { if (handle->dma.rw_pos == handle->dma.buf_size || handle->dma.curr_ptr == NULL) { if (xQueueReceive(handle->msg_queue, &(handle->dma.curr_ptr), pdMS_TO_TICKS(timeout_ms)) == pdFALSE) { ret = ESP_ERR_TIMEOUT; break; }{...} handle->dma.rw_pos = 0; }{...} data_ptr = (uint8_t *)handle->dma.curr_ptr; data_ptr += handle->dma.rw_pos; bytes_can_read = handle->dma.buf_size - handle->dma.rw_pos; if (bytes_can_read > (int)size) { bytes_can_read = size; }{...} memcpy(dest_byte, data_ptr, bytes_can_read); size -= bytes_can_read; dest_byte += bytes_can_read; handle->dma.rw_pos += bytes_can_read; if (bytes_read) { (*bytes_read) += bytes_can_read; }{...} }{...} xSemaphoreGive(handle->binary); return ret; }{ ... } #if SOC_I2S_SUPPORTS_TX_SYNC_CNT uint32_t i2s_sync_get_bclk_count(i2s_chan_handle_t tx_handle) { return i2s_ll_tx_get_bclk_sync_count(tx_handle->controller->hal.dev); }{...} uint32_t i2s_sync_get_fifo_count(i2s_chan_handle_t tx_handle) { return i2s_ll_tx_get_fifo_sync_count(tx_handle->controller->hal.dev); }{...} void i2s_sync_reset_bclk_count(i2s_chan_handle_t tx_handle) { i2s_ll_tx_reset_bclk_sync_counter(tx_handle->controller->hal.dev); }{...} void i2s_sync_reset_fifo_count(i2s_chan_handle_t tx_handle) { i2s_ll_tx_reset_fifo_sync_counter(tx_handle->controller->hal.dev); }{...} /* ... */#endif // SOC_I2S_SUPPORTS_TX_SYNC_CNT
Details
Show:
from
Types: Columns:
This file uses the notable symbols shown below. Click anywhere in the file to view more details.