*/
#define INITRD_START (50*1024*1024)
+/* Default start address for kernel. */
+#define DEFAULT_KERNEL_START 0x100000
+
+
/*
* This version must match the one in the kernel.
*
/* 0xA0 */ UINT16 mca_info_len; /* LDR */
/* 0xA2 */ UINT8 mca_info_buf[0x10]; /* LDR */
-/* 0xB2 */ UINT8 unused_4[0x106]; /* unused */
-
-/* Address of the EFI system table. */
-/* 0x1B8 */ UINT64 efi_sys_tbl; /* LDR */
+/* 0xB2 */ UINT8 unused_4[0x10E]; /* unused */
/* EFI boot loader signature. */
/* 0x1C0 */ UINT8 efi_loader_sig[4]; /* LDR */
-#define EFI_LOADER_SIG "EFIL"
+#define EFI_LOADER_SIG_X64 "EL64"
+
+/* Address of the EFI system table. */
+/* 0x1C4 */ UINT32 efi_sys_tbl; /* LDR */
/* EFI memory descriptor size. */
-/* 0x1C4 */ UINT32 efi_mem_desc_size; /* LDR */
+/* 0x1C8 */ UINT32 efi_mem_desc_size; /* LDR */
/* EFI memory descriptor version. */
-/* 0x1C8 */ UINT32 efi_mem_desc_ver; /* LDR */
+/* 0x1CC */ UINT32 efi_mem_desc_ver; /* LDR */
/* Address & size of EFI memory map. */
-/* 0x1CC */ UINT32 efi_mem_map_size; /* LDR */
-/* 0x1D0 */ UINT64 efi_mem_map; /* LDR */
+/* 0x1D0 */ UINT32 efi_mem_map; /* LDR */
+/* 0x1D4 */ UINT32 efi_mem_map_size; /* LDR */
-/* Address & size of loader. */
-/* 0x1D8 */ UINT32 loader_start; /* LDR */
-/* 0x1DC */ UINT32 loader_size; /* LDR */
+/* 0x1D8 */ UINT32 efi_sys_tbl_hi; /* LDR */
+/* 0x1DC */ UINT32 efi_mem_map_hi; /* LDR */
/* Available contiguous extended memory in KB. */
/* 0x1E0 */ UINT32 alt_mem_k; /* LDR */
UINT8 *t = (UINT8 *)(to); \
UINT8 *f = (UINT8 *)(from); \
UINTN n = cnt; \
- if (t && f && n) { \
+ if (t && f && n && (t<f)) { \
while (n--) { \
*t++ = *f++; \
} \
+ } else if (t && f && n && (t>f)) { \
+ t += n; \
+ f += n; \
+ while (n--) { \
+ *t-- = *f--; \
+ } \
} \
}
extern VOID *kernel_start;
extern UINTN kernel_size;
+extern VOID *kernel_load_address;
extern VOID *initrd_start;
extern UINTN initrd_size;
UINT32 kernel_entry;
UINT16 kernel_cs;
} jumpvector;
- UINTN njump;
VOID *jump_start;
+ uint64_t temp;
/*
* Disable interrupts.
asm volatile ( "cli" : : );
/*
- * Relocate initrd, if present.
+ * Relocate kernel (if needed), and initrd (if present).
+ * Copy kernel first, in case kernel was loaded overlapping where we're
+ * planning to copy the initrd. This assumes that the initrd didn't
+ * get loaded overlapping where we're planning to copy the kernel, but
+ * that's pretty unlikely since we couldn't alloc that space for the
+ * kernel (or the kernel would already be there).
*/
+ if (kernel_start != kernel_load_address) {
+ MEMCPY(kernel_start, kernel_load_address, kernel_size);
+ }
if (bp->s.initrd_start) {
- MEMCPY(INITRD_START, bp->s.initrd_start, bp->s.initrd_size);
+ temp = bp->s.initrd_start;
+ MEMCPY(INITRD_START, temp , bp->s.initrd_size);
bp->s.initrd_start = INITRD_START;
}
/*
MEMCPY(high_base_mem, bp, 0x4000);
+ bp = (boot_params_t *)high_base_mem;
+ bp->s.cmdline_addr = high_base_mem + bp->s.cmdline_offset;
+
/*
* Initialize Linux GDT.
*/
/*
* Jump to kernel entry point.
+ *
+ * Cast is to tell gcc that we know we're going from
+ * 64-bit ptr to 32-bit integer.
*/
- jumpvector.kernel_entry=kentry;
+ jumpvector.kernel_entry=(UINT32)((UINT64)kentry);
jumpvector.kernel_cs=0x10;
- njump = &jumpvector;
jump_start = (VOID *)&jumpvector;
//asm volatile ( "mov %0, %%rcx" : : "m" (&jumpvector) );
asm volatile ( "mov %0, %%rcx" : : "m" (jump_start) );
- //asm volatile ( "mov %0, %%rcx" : : "m" (njump) );
asm volatile ( "ljmp *(%%rcx)" : :);
/* Never come back to here. */
}