1
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
67
68
69
74
75
76
77
78
79
80
81
85
86
87
91
92
93
94
98
99
103
104
105
106
107
108
109
110
111
112
113
120
121
131
132
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
162
163
164
165
166
167
168
169
190
191
192
193
196
197
198
201
202
205
206
207
208
211
212
213
214
215
218
219
220
221
222
223
224
225
229
230
231
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
251
252
253
254
255
264
265
271
272
275
276
277
278
279
280
281
282
283
284
289
290
291
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
318
319
320
321
327
328
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
356
357
358
359
/* ... */
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "esp_psram.h"
#include "esp_private/esp_psram_extram.h"
#include "esp32/rom/cache.h"
#include "sdkconfig.h"
#include "esp32/himem.h"
#include "soc/soc.h"
#include "esp_log.h"
#include "esp_check.h"
#include "../esp_psram_impl.h"11 includes
/* ... */
#if CONFIG_SPIRAM_BANKSWITCH_ENABLE
#define SPIRAM_BANKSWITCH_RESERVE CONFIG_SPIRAM_BANKSWITCH_RESERVE
#else
#define SPIRAM_BANKSWITCH_RESERVE 0
#endif
#define CACHE_BLOCKSIZE (32*1024)
#define VIRT_HIMEM_RANGE_START (SOC_EXTRAM_DATA_LOW+(128-SPIRAM_BANKSWITCH_RESERVE)*CACHE_BLOCKSIZE)
#define VIRT_HIMEM_RANGE_BLOCKSTART (128-SPIRAM_BANKSWITCH_RESERVE)
#define PHYS_HIMEM_BLOCKSTART (128-SPIRAM_BANKSWITCH_RESERVE)
#define TAG "esp_himem"5 defines
typedef struct {
unsigned int is_alloced: 1;
unsigned int is_mapped: 1;
}{ ... } ramblock_t;
typedef struct {
unsigned int is_alloced: 1;
unsigned int is_mapped: 1;
unsigned int ram_block: 16;
}{ ... } rangeblock_t;
static ramblock_t *s_ram_descriptor = NULL;
static rangeblock_t *s_range_descriptor = NULL;
static int s_ramblockcnt = 0;
static const int s_rangeblockcnt = SPIRAM_BANKSWITCH_RESERVE;
typedef struct esp_himem_rangedata_t {
int block_ct;
int block_start;
}{ ... } esp_himem_rangedata_t;
typedef struct esp_himem_ramdata_t {
int block_ct;
uint16_t *block;
}{ ... } esp_himem_ramdata_t;
static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
static inline int ramblock_idx_valid(int ramblock_idx)
{
return (ramblock_idx >= 0 && ramblock_idx < s_ramblockcnt);
}{ ... }
static inline int rangeblock_idx_valid(int rangeblock_idx)
{
return (rangeblock_idx >= 0 && rangeblock_idx < s_rangeblockcnt);
}{ ... }
static void set_bank(int virt_bank, int phys_bank, int ct)
{
int r __attribute__((unused));
r = cache_sram_mmu_set(0, 0, SOC_EXTRAM_DATA_LOW + CACHE_BLOCKSIZE * virt_bank, phys_bank * CACHE_BLOCKSIZE, 32, ct);
assert(r == 0);
r = cache_sram_mmu_set(1, 0, SOC_EXTRAM_DATA_LOW + CACHE_BLOCKSIZE * virt_bank, phys_bank * CACHE_BLOCKSIZE, 32, ct);
assert(r == 0);
}{ ... }
size_t esp_himem_get_phys_size(void)
{
int paddr_start = (4096 * 1024) - (CACHE_BLOCKSIZE * SPIRAM_BANKSWITCH_RESERVE);
uint32_t psram_available_size = 0;
esp_psram_impl_get_available_size(&psram_available_size);
return psram_available_size - paddr_start;
}{ ... }
size_t esp_himem_get_free_size(void)
{
size_t ret = 0;
for (int i = 0; i < s_ramblockcnt; i++) {
if (!s_ram_descriptor[i].is_alloced) {
ret += CACHE_BLOCKSIZE;
}{...}
}{...}
return ret;
}{ ... }
size_t esp_himem_reserved_area_size(void)
{
return CACHE_BLOCKSIZE * SPIRAM_BANKSWITCH_RESERVE;
}{ ... }
#if SPIRAM_BANKSWITCH_RESERVE > 0
void __attribute__((constructor)) esp_himem_init(void)
{
uint32_t maxram = 0;
esp_psram_impl_get_available_size(&maxram);
ESP_RETURN_ON_FALSE(s_ram_descriptor == NULL,, TAG, "already initialized");
ESP_RETURN_ON_FALSE(s_range_descriptor == NULL,, TAG, "already initialized");
ESP_RETURN_ON_FALSE(SPIRAM_BANKSWITCH_RESERVE != 0,, TAG, "No banks reserved for himem");
int paddr_start = (4096 * 1024) - (CACHE_BLOCKSIZE * SPIRAM_BANKSWITCH_RESERVE);
int paddr_end = maxram;
s_ramblockcnt = ((paddr_end - paddr_start) / CACHE_BLOCKSIZE);
s_ram_descriptor = calloc(s_ramblockcnt, sizeof(ramblock_t));
s_range_descriptor = calloc(SPIRAM_BANKSWITCH_RESERVE, sizeof(rangeblock_t));
if (s_ram_descriptor == NULL || s_range_descriptor == NULL) {
ESP_EARLY_LOGE(TAG, "Cannot allocate memory for meta info. Not initializing!");
free(s_ram_descriptor);
free(s_range_descriptor);
return;
}{...}
ESP_EARLY_LOGI(TAG, "Initialized. Using last %d 32KB address blocks for bank switching on %d KB of physical memory.",
SPIRAM_BANKSWITCH_RESERVE, (paddr_end - paddr_start) / 1024);
}{ ... }
/* ... */#endif
static bool allocate_blocks(int count, uint16_t *blocks_out)
{
int n = 0;
for (int i = 0; i < s_ramblockcnt && n != count; i++) {
if (!s_ram_descriptor[i].is_alloced) {
blocks_out[n] = i;
n++;
}{...}
}{...}
if (n == count) {
for (int i = 0; i < count; i++) {
s_ram_descriptor[blocks_out[i]].is_alloced = true;
assert(s_ram_descriptor[blocks_out[i]].is_mapped == false);
}{...}
return true;
}{...} else {
return false;
}{...}
}{ ... }
esp_err_t esp_himem_alloc(size_t size, esp_himem_handle_t *handle_out)
{
if (size % CACHE_BLOCKSIZE != 0) {
return ESP_ERR_INVALID_SIZE;
}{...}
int blocks = size / CACHE_BLOCKSIZE;
esp_himem_ramdata_t *r = calloc(1, sizeof(esp_himem_ramdata_t));
if (!r) {
goto nomem;
}{...}
r->block = calloc(blocks, sizeof(uint16_t));
if (!r->block) {
goto nomem;
}{...}
portENTER_CRITICAL(&spinlock);
int ok = allocate_blocks(blocks, r->block);
portEXIT_CRITICAL(&spinlock);
if (!ok) {
goto nomem;
}{...}
r->block_ct = blocks;
*handle_out = r;
return ESP_OK;
nomem:
if (r) {
free(r->block);
}{...}
free(r);
return ESP_ERR_NO_MEM;
}{ ... }
esp_err_t esp_himem_free(esp_himem_handle_t handle)
{
for (int i = 0; i < handle->block_ct; i++) {
assert(ramblock_idx_valid(handle->block[i]));
ESP_RETURN_ON_FALSE(!s_ram_descriptor[handle->block[i]].is_mapped, ESP_ERR_INVALID_ARG, TAG, "block in range still mapped");
}{...}
portENTER_CRITICAL(&spinlock);
for (int i = 0; i < handle->block_ct; i++) {
s_ram_descriptor[handle->block[i]].is_alloced = false;
}{...}
portEXIT_CRITICAL(&spinlock);
free(handle->block);
free(handle);
return ESP_OK;
}{ ... }
esp_err_t esp_himem_alloc_map_range(size_t size, esp_himem_rangehandle_t *handle_out)
{
ESP_RETURN_ON_FALSE(s_ram_descriptor != NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
ESP_RETURN_ON_FALSE(size % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_SIZE, TAG, "requested size not aligned to blocksize");
int blocks = size / CACHE_BLOCKSIZE;
esp_himem_rangedata_t *r = calloc(1, sizeof(esp_himem_rangedata_t));
if (!r) {
return ESP_ERR_NO_MEM;
}{...}
r->block_ct = blocks;
r->block_start = -1;
int start_free = 0;
portENTER_CRITICAL(&spinlock);
for (int i = 0; i < s_rangeblockcnt; i++) {
if (s_range_descriptor[i].is_alloced) {
start_free = i + 1;
}{...} else if (i - start_free == blocks - 1) {
r->block_start = start_free;
break;
}{...}
}{...}
if (r->block_start == -1) {
free(r);
portEXIT_CRITICAL(&spinlock);
return ESP_ERR_NO_MEM;
}{...}
for (int i = 0; i < blocks; i++) {
s_range_descriptor[r->block_start + i].is_alloced = 1;
}{...}
portEXIT_CRITICAL(&spinlock);
*handle_out = r;
return ESP_OK;
}{ ... }
esp_err_t esp_himem_free_map_range(esp_himem_rangehandle_t handle)
{
for (int i = 0; i < handle->block_ct; i++) {
assert(rangeblock_idx_valid(handle->block_start + i));
assert(s_range_descriptor[i + handle->block_start].is_alloced == 1);
ESP_RETURN_ON_FALSE(!s_range_descriptor[i + handle->block_start].is_mapped, ESP_ERR_INVALID_ARG, TAG, "memory still mapped to range");
}{...}
portENTER_CRITICAL(&spinlock);
for (int i = 0; i < handle->block_ct; i++) {
s_range_descriptor[i + handle->block_start].is_alloced = 0;
}{...}
portEXIT_CRITICAL(&spinlock);
free(handle);
return ESP_OK;
}{ ... }
esp_err_t esp_himem_map(esp_himem_handle_t handle, esp_himem_rangehandle_t range, size_t ram_offset, size_t range_offset, size_t len, int flags, void **out_ptr)
{
int ram_block = ram_offset / CACHE_BLOCKSIZE;
int range_block = range_offset / CACHE_BLOCKSIZE;
int blockcount = len / CACHE_BLOCKSIZE;
ESP_RETURN_ON_FALSE(s_ram_descriptor != NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
ESP_RETURN_ON_FALSE(ram_offset % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "ram offset not aligned to blocksize");
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "range not aligned to blocksize");
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "length not aligned to blocksize");
ESP_RETURN_ON_FALSE(ram_block + blockcount <= handle->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of phys ram handle");
ESP_RETURN_ON_FALSE(range_block + blockcount <= range->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of range handle");
for (int i = 0; i < blockcount; i++) {
ESP_RETURN_ON_FALSE(!s_ram_descriptor[handle->block[i + ram_block]].is_mapped, ESP_ERR_INVALID_STATE, TAG, "ram already mapped");
ESP_RETURN_ON_FALSE(!s_range_descriptor[range->block_start + i + range_block].is_mapped, ESP_ERR_INVALID_STATE, TAG, "range already mapped");
}{...}
portENTER_CRITICAL(&spinlock);
for (int i = 0; i < blockcount; i++) {
assert(ramblock_idx_valid(handle->block[i + ram_block]));
s_ram_descriptor[handle->block[i + ram_block]].is_mapped = 1;
s_range_descriptor[range->block_start + i + range_block].is_mapped = 1;
s_range_descriptor[range->block_start + i + range_block].ram_block = handle->block[i + ram_block];
}{...}
portEXIT_CRITICAL(&spinlock);
for (int i = 0; i < blockcount; i++) {
set_bank(VIRT_HIMEM_RANGE_BLOCKSTART + range->block_start + i + range_block, handle->block[i + ram_block] + PHYS_HIMEM_BLOCKSTART, 1);
}{...}
*out_ptr = (void *)(VIRT_HIMEM_RANGE_START + (range->block_start + range_block) * CACHE_BLOCKSIZE);
return ESP_OK;
}{ ... }
esp_err_t esp_himem_unmap(esp_himem_rangehandle_t range, void *ptr, size_t len)
{
int range_offset = (uint32_t)ptr - VIRT_HIMEM_RANGE_START;
int range_block = (range_offset / CACHE_BLOCKSIZE) - range->block_start;
int blockcount = len / CACHE_BLOCKSIZE;
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "range offset not block-aligned");
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "map length not block-aligned");
ESP_RETURN_ON_FALSE(range_block + blockcount <= range->block_ct, ESP_ERR_INVALID_ARG, TAG, "range out of bounds for handle");
portENTER_CRITICAL(&spinlock);
for (int i = 0; i < blockcount; i++) {
int ramblock = s_range_descriptor[range->block_start + i + range_block].ram_block;
assert(ramblock_idx_valid(ramblock));
s_ram_descriptor[ramblock].is_mapped = 0;
s_range_descriptor[range->block_start + i + range_block].is_mapped = 0;
}{...}
esp_psram_extram_writeback_cache();
portEXIT_CRITICAL(&spinlock);
return ESP_OK;
}{ ... }