1
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
70
71
72
73
74
75
76
77
78
79
80
81
82
83
86
87
88
89
90
91
92
93
94
95
96
97
98
110
111
112
113
114
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
137
138
139
140
141
142
143
144
145
146
158
159
160
161
162
165
166
167
168
169
170
171
172
173
174
186
187
188
189
190
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
244
245
246
247
248
249
257
258
261
262
263
264
265
276
277
278
279
280
281
282
283
284
285
292
293
294
295
296
297
298
311
312
313
314
315
316
317
318
319
320
321
325
326
327
328
329
330
331
332
337
338
339
340
341
342
343
344
345
346
347
348
351
352
369
370
373
374
375
376
377
378
379
380
381
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
411
412
413
414
415
416
417
418
419
424
425
426
427
428
429
430
431
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
510
511
524
525
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
580
581
582
583
584
585
586
587
588
591
594
595
596
597
598
599
606
607
608
609
610
613
614
615
616
617
618
619
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
648
649
650
651
652
653
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
754
755
756
757
758
759
760
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
882
883
884
885
/* ... */
#include <stdint.h>
#include <string.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <inttypes.h>
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_check.h"
#include "esp_heap_caps.h"
#include "esp_compiler.h"
#include "soc/soc_caps.h"
#include "hal/cache_types.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#include "hal/mmu_types.h"
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#include "esp_private/cache_utils.h"
#include "esp_private/esp_cache_esp32_private.h"
#include "esp_private/esp_mmu_map_private.h"
#include "ext_mem_layout.h"
#include "esp_mmu_map.h"23 includes
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
#define ALIGN_DOWN_BY(num, align) ((num) & (~((align) - 1)))
#define MEM_REGION_MERGED -1
/* ... */
#define ENABLE_PADDR_CHECK !ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR
static DRAM_ATTR const char *TAG = "mmap";
/* ... */
/* ... */
typedef struct mem_block_ {
uint32_t laddr_start;
uint32_t laddr_end;
intptr_t vaddr_start;
intptr_t vaddr_end;
size_t size;
int caps;
uint32_t paddr_start;
uint32_t paddr_end;
mmu_target_t target;
TAILQ_ENTRY(mem_block_) entries;
}{ ... } mem_block_t;
/* ... */
typedef struct mem_region_ {
cache_bus_mask_t bus_id;
uint32_t start;
uint32_t end;
size_t region_size;
uint32_t free_head;
size_t max_slot_size;
int caps;
mmu_target_t targets;
TAILQ_HEAD(mem_block_head_, mem_block_) mem_block_head;
}{ ... } mem_region_t;
typedef struct {
/* ... */
uint32_t num_regions;
/* ... */
mem_region_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
}{ ... } mmu_ctx_t;
static mmu_ctx_t s_mmu_ctx;
#if ENABLE_PADDR_CHECK
static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size);
static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size);/* ... */
#endif
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
static cache_bus_mask_t s_get_bus_mask(uint32_t vaddr_start, uint32_t len)
{
#if CACHE_LL_EXT_MEM_VIA_L2CACHE
return cache_ll_l2_get_bus(0, vaddr_start, len);
#else
return cache_ll_l1_get_bus(0, vaddr_start, len);
#endif
}{ ... }
static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums)
{
/* ... */
extern char _instruction_reserved_start;
extern char _instruction_reserved_end;
size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve);
irom_len_to_reserve += (uint32_t)&_instruction_reserved_start - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = s_get_bus_mask((uint32_t)&_instruction_reserved_start, irom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].region_size <= irom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].max_slot_size = 0;
irom_len_to_reserve -= hw_mem_regions[i].region_size;
}{...} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + irom_len_to_reserve;
hw_mem_regions[i].max_slot_size -= irom_len_to_reserve;
}{...}
}{...}
}{...}
}{ ... }
static void s_reserve_drom_region(mem_region_t *hw_mem_regions, int region_nums)
{
/* ... */
extern char _rodata_reserved_start;
extern char _rodata_reserved_end;
size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve);
drom_len_to_reserve += (uint32_t)&_rodata_reserved_start - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = s_get_bus_mask((uint32_t)&_rodata_reserved_start, drom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].region_size <= drom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].max_slot_size = 0;
drom_len_to_reserve -= hw_mem_regions[i].region_size;
}{...} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + drom_len_to_reserve;
hw_mem_regions[i].max_slot_size -= drom_len_to_reserve;
}{...}
}{...}
}{...}
}{ ... }
#endif/* ... */
#if SOC_MMU_PER_EXT_MEM_TARGET
FORCE_INLINE_ATTR uint32_t s_get_mmu_id_from_target(mmu_target_t target)
{
return (target == MMU_TARGET_FLASH0) ? MMU_LL_FLASH_MMU_ID : MMU_LL_PSRAM_MMU_ID;
}{...}
/* ... */#endif
void esp_mmu_map_init(void)
{
mem_region_t hw_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {};
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
hw_mem_regions[i].start = g_mmu_mem_regions[i].start;
hw_mem_regions[i].end = g_mmu_mem_regions[i].end;
hw_mem_regions[i].region_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].max_slot_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].free_head = g_mmu_mem_regions[i].start;
hw_mem_regions[i].bus_id = g_mmu_mem_regions[i].bus_id;
hw_mem_regions[i].caps = g_mmu_mem_regions[i].caps;
hw_mem_regions[i].targets = g_mmu_mem_regions[i].targets;
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
assert(__builtin_popcount(hw_mem_regions[i].bus_id) == 1);
#endif
assert(hw_mem_regions[i].region_size % CONFIG_MMU_PAGE_SIZE == 0);
}{...}
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
s_reserve_irom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
s_reserve_drom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);/* ... */
#endif
if (SOC_MMU_LINEAR_ADDRESS_REGION_NUM > 1) {
for (int i = 1; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
mem_region_t *a = &hw_mem_regions[i - 1];
mem_region_t *b = &hw_mem_regions[i];
if ((b->free_head == a->end) && (b->caps == a->caps) && (b->targets == a->targets)) {
a->caps = MEM_REGION_MERGED;
b->bus_id |= a->bus_id;
b->start = a->start;
b->region_size += a->region_size;
b->free_head = a->free_head;
b->max_slot_size += a->max_slot_size;
}{...}
}{...}
}{...}
uint32_t region_num = 0;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (hw_mem_regions[i].caps != MEM_REGION_MERGED) {
region_num++;
}{...}
}{...}
ESP_EARLY_LOGV(TAG, "after coalescing, %" PRIu32 " regions are left", region_num);
uint32_t available_region_idx = 0;
s_mmu_ctx.num_regions = region_num;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (hw_mem_regions[i].caps == MEM_REGION_MERGED) {
continue;
}{...}
memcpy(&s_mmu_ctx.mem_regions[available_region_idx], &hw_mem_regions[i], sizeof(mem_region_t));
available_region_idx++;
}{...}
for (int i = 0; i < available_region_idx; i++) {
TAILQ_INIT(&s_mmu_ctx.mem_regions[i].mem_block_head);
}{...}
assert(available_region_idx == region_num);
}{ ... }
static esp_err_t s_mem_caps_check(mmu_mem_caps_t caps)
{
if (caps & MMU_MEM_CAP_EXEC) {
if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
return ESP_ERR_INVALID_ARG;
}{...}
caps |= MMU_MEM_CAP_32BIT;
}{...}
return ESP_OK;
}{ ... }
esp_err_t esp_mmu_map_get_max_consecutive_free_block_size(mmu_mem_caps_t caps, mmu_target_t target, size_t *out_len)
{
ESP_RETURN_ON_FALSE(out_len, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
*out_len = 0;
size_t max = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if (((s_mmu_ctx.mem_regions[i].caps & caps) == caps) && ((s_mmu_ctx.mem_regions[i].targets & target) == target)) {
if (s_mmu_ctx.mem_regions[i].max_slot_size > max) {
max = s_mmu_ctx.mem_regions[i].max_slot_size;
}{...}
}{...}
}{...}
*out_len = max;
return ESP_OK;
}{ ... }
static int32_t s_find_available_region(mem_region_t *mem_regions, uint32_t region_nums, size_t size, mmu_mem_caps_t caps, mmu_target_t target)
{
int32_t found_region_id = -1;
for (int i = 0; i < region_nums; i++) {
if (((mem_regions[i].caps & caps) == caps) && ((mem_regions[i].targets & target) == target)) {
if (mem_regions[i].max_slot_size >= size) {
found_region_id = i;
break;
}{...}
}{...}
}{...}
return found_region_id;
}{ ... }
esp_err_t esp_mmu_map_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, mmu_target_t target, const void **out_ptr)
{
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
uint32_t laddr = 0;
int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
if (found_region_id == -1) {
ESP_EARLY_LOGE(TAG, "no such vaddr range");
return ESP_ERR_NOT_FOUND;
}{...}
laddr = (uint32_t)s_mmu_ctx.mem_regions[found_region_id].free_head;
s_mmu_ctx.mem_regions[found_region_id].free_head += aligned_size;
s_mmu_ctx.mem_regions[found_region_id].max_slot_size -= aligned_size;
ESP_EARLY_LOGV(TAG, "found laddr is 0x%" PRIx32, laddr);
uint32_t vaddr = 0;
if (caps & MMU_MEM_CAP_EXEC) {
vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION, target);
}{...} else {
vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA, target);
}{...}
*out_ptr = (void *)vaddr;
return ESP_OK;
}{ ... }
IRAM_ATTR esp_err_t esp_mmu_paddr_find_caps(const esp_paddr_t paddr, mmu_mem_caps_t *out_caps)
{
mem_region_t *region = NULL;
mem_block_t *mem_block = NULL;
bool found = false;
mem_block_t *found_block = NULL;
if (out_caps == NULL) {
return ESP_ERR_INVALID_ARG;
}{...}
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
region = &s_mmu_ctx.mem_regions[i];
TAILQ_FOREACH(mem_block, ®ion->mem_block_head, entries) {
if (mem_block == TAILQ_FIRST(®ion->mem_block_head) || mem_block == TAILQ_LAST(®ion->mem_block_head, mem_block_head_)) {
continue;
}{...}
if (paddr >= mem_block->paddr_start && paddr < mem_block->paddr_end) {
found = true;
found_block = mem_block;
break;
}{...}
}{...}
}{...}
if (!found) {
return ESP_ERR_NOT_FOUND;
}{...}
*out_caps = found_block->caps;
return ESP_OK;
}{ ... }
static void IRAM_ATTR NOINLINE_ATTR s_do_cache_invalidate(uint32_t vaddr_start, uint32_t size)
{
#if CONFIG_IDF_TARGET_ESP32
/* ... */
cache_sync();/* ... */
#else
cache_hal_invalidate_addr(vaddr_start, size);
#endif
}{ ... }
#if SOC_MMU_PER_EXT_MEM_TARGET
FORCE_INLINE_ATTR uint32_t s_mapping_operation(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
{
uint32_t actual_mapped_len = 0;
uint32_t mmu_id = s_get_mmu_id_from_target(target);
mmu_hal_map_region(mmu_id, target, vaddr_start, paddr_start, size, &actual_mapped_len);
return actual_mapped_len;
}{...}
/* ... */#else
FORCE_INLINE_ATTR uint32_t s_mapping_operation(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
{
uint32_t actual_mapped_len = 0;
mmu_hal_map_region(0, target, vaddr_start, paddr_start, size, &actual_mapped_len);
#if (SOC_MMU_PERIPH_NUM == 2)
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
mmu_hal_map_region(1, target, vaddr_start, paddr_start, size, &actual_mapped_len);
#endif /* ... */
#endif
return actual_mapped_len;
}{ ... }
/* ... */#endif
static void IRAM_ATTR NOINLINE_ATTR s_do_mapping(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
{
/* ... */
spi_flash_disable_interrupts_caches_and_other_cpu();
uint32_t actual_mapped_len = s_mapping_operation(target, vaddr_start, paddr_start, size);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
cache_ll_l1_enable_bus(0, bus_mask);
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
cache_ll_l1_enable_bus(1, bus_mask);/* ... */
#endif
s_do_cache_invalidate(vaddr_start, size);
spi_flash_enable_interrupts_caches_and_other_cpu();
ESP_EARLY_LOGV(TAG, "actual_mapped_len is 0x%"PRIx32, actual_mapped_len);
}{ ... }
esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target, mmu_mem_caps_t caps, int flags, void **out_ptr)
{
esp_err_t ret = ESP_FAIL;
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
#if !SOC_SPIRAM_SUPPORTED || CONFIG_IDF_TARGET_ESP32
ESP_RETURN_ON_FALSE(!(target & MMU_TARGET_PSRAM0), ESP_ERR_NOT_SUPPORTED, TAG, "PSRAM is not supported");
#endif
ESP_RETURN_ON_FALSE((paddr_start % CONFIG_MMU_PAGE_SIZE == 0), ESP_ERR_INVALID_ARG, TAG, "paddr must be rounded up to the nearest multiple of CONFIG_MMU_PAGE_SIZE");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
if (found_region_id == -1) {
ESP_EARLY_LOGE(TAG, "no such vaddr range");
return ESP_ERR_NOT_FOUND;
}{...}
mem_region_t *found_region = &s_mmu_ctx.mem_regions[found_region_id];
mem_block_t *dummy_head = NULL;
mem_block_t *dummy_tail = NULL;
mem_block_t *new_block = NULL;
if (TAILQ_EMPTY(&found_region->mem_block_head)) {
dummy_head = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ESP_GOTO_ON_FALSE(dummy_head, ESP_ERR_NO_MEM, err, TAG, "no mem");
dummy_head->laddr_start = found_region->free_head;
dummy_head->laddr_end = found_region->free_head;
dummy_head->size = 0;
dummy_head->caps = caps;
TAILQ_INSERT_HEAD(&found_region->mem_block_head, dummy_head, entries);
dummy_tail = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ESP_GOTO_ON_FALSE(dummy_tail, ESP_ERR_NO_MEM, err, TAG, "no mem");
dummy_tail->laddr_start = found_region->end;
dummy_tail->laddr_end = found_region->end;
dummy_tail->size = 0;
dummy_tail->caps = caps;
TAILQ_INSERT_TAIL(&found_region->mem_block_head, dummy_tail, entries);
}{...}
mem_block_t *mem_block = NULL;
#if ENABLE_PADDR_CHECK
bool is_enclosed = false;
bool is_overlapped = false;
bool allow_overlap = flags & ESP_MMU_MMAP_FLAG_PADDR_SHARED;
TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
if (target == mem_block->target) {
if ((s_is_enclosed(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) {
is_enclosed = true;
break;
}{...}
if (!allow_overlap && (s_is_overlapped(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) {
is_overlapped = true;
break;
}{...}
}{...}
}{...}
if (is_enclosed) {
ESP_LOGW(TAG, "paddr block is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size);
/* ... */
const uint32_t new_paddr_offset = paddr_start - mem_block->paddr_start;
*out_ptr = (void *)mem_block->vaddr_start + new_paddr_offset;
return ESP_ERR_INVALID_STATE;
}{...}
if (!allow_overlap && is_overlapped) {
ESP_LOGE(TAG, "paddr block is overlapped with an already mapped paddr block");
return ESP_ERR_INVALID_ARG;
}{...}
/* ... */#endif
new_block = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ESP_GOTO_ON_FALSE(new_block, ESP_ERR_NO_MEM, err, TAG, "no mem");
bool found = false;
uint32_t last_end = TAILQ_FIRST(&found_region->mem_block_head)->laddr_end;
size_t slot_len = 0;
size_t max_slot_len = 0;
mem_block_t *found_block = NULL;
TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
slot_len = mem_block->laddr_start - last_end;
if (!found) {
if (slot_len >= aligned_size) {
found = true;
found_block = mem_block;
slot_len -= aligned_size;
new_block->laddr_start = last_end;
}{...}
}{...}
max_slot_len = (slot_len > max_slot_len) ? slot_len : max_slot_len;
last_end = mem_block->laddr_end;
}{...}
assert(found);
TAILQ_INSERT_BEFORE(found_block, new_block, entries);
found_region->max_slot_size = max_slot_len;
new_block->laddr_end = new_block->laddr_start + aligned_size;
new_block->size = aligned_size;
new_block->caps = caps;
new_block->paddr_start = paddr_start;
new_block->paddr_end = paddr_start + aligned_size;
new_block->target = target;
if (caps & MMU_MEM_CAP_EXEC) {
new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_INSTRUCTION, target);
new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_INSTRUCTION, target);
}{...} else {
new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_DATA, target);
new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_DATA, target);
}{...}
s_do_mapping(target, new_block->vaddr_start, paddr_start, aligned_size);
*out_ptr = (void *)new_block->vaddr_start;
return ESP_OK;
err:
if (dummy_tail) {
free(dummy_tail);
}{...}
if (dummy_head) {
free(dummy_head);
}{...}
return ret;
}{ ... }
#if SOC_MMU_PER_EXT_MEM_TARGET
FORCE_INLINE_ATTR void s_unmapping_operation(uint32_t vaddr_start, uint32_t size)
{
mmu_target_t target = mmu_ll_vaddr_to_target(vaddr_start);
uint32_t mmu_id = s_get_mmu_id_from_target(target);
mmu_hal_unmap_region(mmu_id, vaddr_start, size);
}{...}
/* ... */#else
FORCE_INLINE_ATTR void s_unmapping_operation(uint32_t vaddr_start, uint32_t size)
{
mmu_hal_unmap_region(0, vaddr_start, size);
#if (SOC_MMU_PERIPH_NUM == 2)
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
mmu_hal_unmap_region(1, vaddr_start, size);
#endif /* ... */
#endif
}{ ... }
/* ... */#endif
static void IRAM_ATTR NOINLINE_ATTR s_do_unmapping(uint32_t vaddr_start, uint32_t size)
{
/* ... */
spi_flash_disable_interrupts_caches_and_other_cpu();
s_unmapping_operation(vaddr_start, size);
spi_flash_enable_interrupts_caches_and_other_cpu();
}{ ... }
esp_err_t esp_mmu_unmap(void *ptr)
{
ESP_RETURN_ON_FALSE(ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
mem_region_t *region = NULL;
mem_block_t *mem_block = NULL;
uint32_t ptr_laddr = mmu_ll_vaddr_to_laddr((uint32_t)ptr);
size_t slot_len = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
ESP_COMPILER_DIAGNOSTIC_PUSH_IGNORE("-Wanalyzer-out-of-bounds")
if (ptr_laddr >= s_mmu_ctx.mem_regions[i].free_head && ptr_laddr < s_mmu_ctx.mem_regions[i].end) {
region = &s_mmu_ctx.mem_regions[i];
}{...}
ESP_COMPILER_DIAGNOSTIC_POP("-Wanalyzer-out-of-bounds")
}{...}
ESP_RETURN_ON_FALSE(region, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer is outside external memory regions");
bool found = false;
mem_block_t *found_block = NULL;
TAILQ_FOREACH(mem_block, ®ion->mem_block_head, entries) {
if (mem_block == TAILQ_FIRST(®ion->mem_block_head) || mem_block == TAILQ_LAST(®ion->mem_block_head, mem_block_head_)) {
continue;
}{...}
if (mem_block->laddr_start == ptr_laddr) {
slot_len = TAILQ_NEXT(mem_block, entries)->laddr_start - TAILQ_PREV(mem_block, mem_block_head_, entries)->laddr_end;
region->max_slot_size = (slot_len > region->max_slot_size) ? slot_len : region->max_slot_size;
found = true;
found_block = mem_block;
break;
}{...}
}{...}
ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer isn't mapped yet");
s_do_unmapping(mem_block->vaddr_start, mem_block->size);
TAILQ_REMOVE(®ion->mem_block_head, found_block, entries);
free(found_block);
return ESP_OK;
}{ ... }
esp_err_t esp_mmu_map_dump_mapped_blocks(FILE* stream)
{
char line[100];
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
fprintf(stream, "region %d:\n", i);
fprintf(stream, "%-15s %-14s %-14s %-12s %-12s %-12s\n", "Bus ID", "Start", "Free Head", "End", "Caps", "Max Slot Size");
char *buf = line;
size_t len = sizeof(line);
memset(line, 0x0, len);
snprintf(buf, len, "0x%-13x 0x%-12"PRIx32" 0x%-11"PRIx32" 0x%-10"PRIx32" 0x%-10x 0x%-8x\n",
s_mmu_ctx.mem_regions[i].bus_id,
s_mmu_ctx.mem_regions[i].start,
s_mmu_ctx.mem_regions[i].free_head,
s_mmu_ctx.mem_regions[i].end,
s_mmu_ctx.mem_regions[i].caps,
s_mmu_ctx.mem_regions[i].max_slot_size);
fputs(line, stream);
fprintf(stream, "mapped blocks:\n");
fprintf(stream, "%-4s %-13s %-12s %-12s %-6s %-13s %-11s\n", "ID", "Vaddr Start", "Vaddr End", "Block Size", "Caps", "Paddr Start", "Paddr End");
mem_region_t *region = &s_mmu_ctx.mem_regions[i];
mem_block_t *mem_block = NULL;
int id = 0;
TAILQ_FOREACH(mem_block, ®ion->mem_block_head, entries) {
if (mem_block != TAILQ_FIRST(®ion->mem_block_head) && mem_block != TAILQ_LAST(®ion->mem_block_head, mem_block_head_)) {
snprintf(buf, len, "%-4d 0x%-11x 0x%-10x 0x%-10x 0x%-4x 0x%-11"PRIx32" 0x%-8"PRIx32"\n",
id,
mem_block->vaddr_start,
mem_block->vaddr_end,
mem_block->size,
mem_block->caps,
mem_block->paddr_start,
mem_block->paddr_end);
fputs(line, stream);
id++;
}{...}
}{...}
fprintf(stream, "\n");
}{...}
return ESP_OK;
}{ ... }
/* ... */
esp_err_t IRAM_ATTR esp_mmu_map_dump_mapped_blocks_private(void)
{
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
mem_region_t *region = &s_mmu_ctx.mem_regions[i];
mem_block_t *mem_block = NULL;
TAILQ_FOREACH(mem_block, ®ion->mem_block_head, entries) {
if (mem_block != TAILQ_FIRST(®ion->mem_block_head) && mem_block != TAILQ_LAST(®ion->mem_block_head, mem_block_head_)) {
ESP_DRAM_LOGI(TAG, "block vaddr_start: 0x%x", mem_block->vaddr_start);
ESP_DRAM_LOGI(TAG, "block vaddr_end: 0x%x", mem_block->vaddr_end);
ESP_DRAM_LOGI(TAG, "block size: 0x%x", mem_block->size);
ESP_DRAM_LOGI(TAG, "block caps: 0x%x", mem_block->caps);
ESP_DRAM_LOGI(TAG, "block paddr_start: 0x%x", mem_block->paddr_start);
ESP_DRAM_LOGI(TAG, "block paddr_end: 0x%x", mem_block->paddr_end);
}{...}
}{...}
ESP_DRAM_LOGI(TAG, "region bus_id: 0x%x", s_mmu_ctx.mem_regions[i].bus_id);
ESP_DRAM_LOGI(TAG, "region start: 0x%x", s_mmu_ctx.mem_regions[i].start);
ESP_DRAM_LOGI(TAG, "region end: 0x%x", s_mmu_ctx.mem_regions[i].end);
ESP_DRAM_LOGI(TAG, "region caps: 0x%x", s_mmu_ctx.mem_regions[i].caps);
}{...}
return ESP_OK;
}{ ... }
/* ... */
static bool NOINLINE_ATTR IRAM_ATTR s_vaddr_to_paddr(uint32_t vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
{
spi_flash_disable_interrupts_caches_and_other_cpu();
bool is_mapped = mmu_hal_vaddr_to_paddr(0, vaddr, out_paddr, out_target);
#if SOC_MMU_PER_EXT_MEM_TARGET
if (!is_mapped) {
is_mapped = mmu_hal_vaddr_to_paddr(1, vaddr, out_paddr, out_target);
}{...}
#endif/* ... */
spi_flash_enable_interrupts_caches_and_other_cpu();
return is_mapped;
}{ ... }
esp_err_t esp_mmu_vaddr_to_paddr(void *vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
{
ESP_RETURN_ON_FALSE(vaddr && out_paddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_FALSE(mmu_hal_check_valid_ext_vaddr_region(0, (uint32_t)vaddr, 1, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION), ESP_ERR_INVALID_ARG, TAG, "not a valid external virtual address");
esp_paddr_t paddr = 0;
mmu_target_t target = 0;
bool is_mapped = s_vaddr_to_paddr((uint32_t)vaddr, &paddr, &target);
ESP_RETURN_ON_FALSE(is_mapped, ESP_ERR_NOT_FOUND, TAG, "vaddr isn't mapped");
*out_paddr = paddr;
*out_target = target;
return ESP_OK;
}{ ... }
static bool NOINLINE_ATTR IRAM_ATTR s_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr)
{
spi_flash_disable_interrupts_caches_and_other_cpu();
uint32_t mmu_id = 0;
#if SOC_MMU_PER_EXT_MEM_TARGET
mmu_id = s_get_mmu_id_from_target(target);
#endif
bool found = mmu_hal_paddr_to_vaddr(mmu_id, paddr, target, type, out_vaddr);
spi_flash_enable_interrupts_caches_and_other_cpu();
return found;
}{ ... }
esp_err_t esp_mmu_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, void **out_vaddr)
{
ESP_RETURN_ON_FALSE(out_vaddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
uint32_t vaddr = 0;
bool found = false;
found = s_paddr_to_vaddr(paddr, target, type, &vaddr);
ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "paddr isn't mapped");
*out_vaddr = (void *)vaddr;
return ESP_OK;
}{ ... }
#if ENABLE_PADDR_CHECK
/* ... */
/* ... */
static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size)
{
bool is_enclosed = false;
uint32_t new_block_end = new_block_start + new_block_size;
if ((new_block_start >= block_start) && (new_block_end <= block_end)) {
is_enclosed = true;
}{...} else {
is_enclosed = false;
}{...}
return is_enclosed;
}{ ... }
/* ... */
static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size)
{
bool is_overlapped = false;
uint32_t new_block_end = new_block_start + new_block_size;
if (((new_block_start < block_start) && (new_block_end > block_start)) ||
((new_block_start < block_end) && (new_block_end > block_end))) {
is_overlapped = true;
}{...} else {
is_overlapped = false;
}{...}
return is_overlapped;
}{ ... }
#endif/* ... */