mirror of
https://github.com/fail0verflow/switch-coreboot.git
synced 2025-05-04 01:39:18 -04:00
I noticed that free regions provided by search_global_resources() don't have
the reserved regions substracted from them. This patch introduces a check to weed them out, splitting when necessary Signed-off-by: Robert Millan <rmh@aybabtu.com> Acked-by: Jordan Crouse <jordan.crouse@amd.com> git-svn-id: svn://coreboot.org/repository/coreboot-v3@937 f3766cd6-281f-0410-b1cd-43a5c92072e9
This commit is contained in:
parent
5d37f8595c
commit
20621bdadb
1 changed files with 57 additions and 16 deletions
|
@ -25,42 +25,83 @@
|
||||||
|
|
||||||
static struct multiboot_mmap_entry *mb_mem;
|
static struct multiboot_mmap_entry *mb_mem;
|
||||||
|
|
||||||
static void build_mb_mem_range(void *gp, struct device *dev, struct resource *res)
|
static struct {
|
||||||
|
u64 addr;
|
||||||
|
u64 len;
|
||||||
|
} reserved_mem[2];
|
||||||
|
|
||||||
|
static void build_mb_mem_range_nooverlap(u64 addr, u64 len)
|
||||||
{
|
{
|
||||||
mb_mem->addr = res->base;
|
int i;
|
||||||
mb_mem->len = res->size;
|
for (i = 0; i < sizeof(reserved_mem) / sizeof(reserved_mem[0]); i++) {
|
||||||
|
/* free region fully contained in reserved region, abort */
|
||||||
|
if (addr >= reserved_mem[i].addr && addr + len <= reserved_mem[i].addr + reserved_mem[i].len)
|
||||||
|
return;
|
||||||
|
/* reserved region splits free region */
|
||||||
|
if (addr < reserved_mem[i].addr && addr + len > reserved_mem[i].addr + reserved_mem[i].len) {
|
||||||
|
build_mb_mem_range_nooverlap(addr, reserved_mem[i].addr - addr);
|
||||||
|
build_mb_mem_range_nooverlap(reserved_mem[i].addr + reserved_mem[i].len, (addr + len) - (reserved_mem[i].addr + reserved_mem[i].len));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
/* left overlap */
|
||||||
|
if (addr < reserved_mem[i].addr + reserved_mem[i].len && addr + len > reserved_mem[i].addr + reserved_mem[i].len) {
|
||||||
|
len += addr;
|
||||||
|
addr = reserved_mem[i].addr + reserved_mem[i].len;
|
||||||
|
len -= addr;
|
||||||
|
/* len += addr - old_addr */
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
/* right overlap */
|
||||||
|
if (addr < reserved_mem[i].addr && addr + len > reserved_mem[i].addr) {
|
||||||
|
len = reserved_mem[i].addr - addr;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
/* none of the above, just add it */
|
||||||
|
}
|
||||||
|
|
||||||
|
mb_mem->addr = addr;
|
||||||
|
mb_mem->len = len;
|
||||||
mb_mem->type = 1;
|
mb_mem->type = 1;
|
||||||
mb_mem->size = sizeof(*mb_mem) - sizeof(mb_mem->size);
|
mb_mem->size = sizeof(*mb_mem) - sizeof(mb_mem->size);
|
||||||
mb_mem++;
|
mb_mem++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void build_mb_mem_range(void *gp, struct device *dev, struct resource *res)
|
||||||
|
{
|
||||||
|
build_mb_mem_range_nooverlap(res->base, res->size);
|
||||||
|
}
|
||||||
|
|
||||||
unsigned long write_multiboot_info(
|
unsigned long write_multiboot_info(
|
||||||
unsigned long low_table_start, unsigned long low_table_end,
|
unsigned long low_table_start, unsigned long low_table_end,
|
||||||
unsigned long rom_table_start, unsigned long rom_table_end)
|
unsigned long rom_table_start, unsigned long rom_table_end)
|
||||||
{
|
{
|
||||||
struct multiboot_info *mbi = rom_table_end;
|
struct multiboot_info *mbi;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
mbi = rom_table_end;
|
||||||
memset(mbi, 0, sizeof(*mbi));
|
memset(mbi, 0, sizeof(*mbi));
|
||||||
rom_table_end += sizeof(*mbi);
|
rom_table_end += sizeof(*mbi);
|
||||||
|
|
||||||
mbi->mmap_addr = (u32) rom_table_end;
|
mbi->mmap_addr = (u32) rom_table_end;
|
||||||
mb_mem = rom_table_end;
|
mb_mem = rom_table_end;
|
||||||
|
|
||||||
|
/* reserved regions */
|
||||||
|
reserved_mem[0].addr = low_table_start;
|
||||||
|
reserved_mem[0].len = low_table_end - low_table_start;
|
||||||
|
reserved_mem[1].addr = rom_table_start;
|
||||||
|
reserved_mem[1].len = rom_table_end - rom_table_start;
|
||||||
|
for (i = 0; i < sizeof(reserved_mem) / sizeof(reserved_mem[0]); i++) {
|
||||||
|
mb_mem->addr = reserved_mem[i].addr;
|
||||||
|
mb_mem->len = reserved_mem[i].len;
|
||||||
|
mb_mem->type = 2;
|
||||||
|
mb_mem->size = sizeof(*mb_mem) - sizeof(mb_mem->size);
|
||||||
|
mb_mem++;
|
||||||
|
}
|
||||||
|
|
||||||
/* free regions */
|
/* free regions */
|
||||||
search_global_resources( IORESOURCE_MEM | IORESOURCE_CACHEABLE,
|
search_global_resources( IORESOURCE_MEM | IORESOURCE_CACHEABLE,
|
||||||
IORESOURCE_MEM | IORESOURCE_CACHEABLE, build_mb_mem_range, NULL);
|
IORESOURCE_MEM | IORESOURCE_CACHEABLE, build_mb_mem_range, NULL);
|
||||||
|
|
||||||
/* reserved regions */
|
|
||||||
mb_mem->addr = low_table_start;
|
|
||||||
mb_mem->len = low_table_end - low_table_start;
|
|
||||||
mb_mem->type = 2;
|
|
||||||
mb_mem->size = sizeof(*mb_mem) - sizeof(mb_mem->size);
|
|
||||||
mb_mem++;
|
|
||||||
mb_mem->addr = rom_table_start;
|
|
||||||
mb_mem->len = rom_table_end - rom_table_start;
|
|
||||||
mb_mem->type = 2;
|
|
||||||
mb_mem->size = sizeof(*mb_mem) - sizeof(mb_mem->size);
|
|
||||||
mb_mem++;
|
|
||||||
|
|
||||||
mbi->mmap_length = ((u32) mb_mem) - mbi->mmap_addr;
|
mbi->mmap_length = ((u32) mb_mem) - mbi->mmap_addr;
|
||||||
mbi->flags |= MB_INFO_MEM_MAP;
|
mbi->flags |= MB_INFO_MEM_MAP;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue