*/
#define INITRD_START (50*1024*1024)
+/* Default start address for kernel. */
+#define DEFAULT_KERNEL_START 0x100000
+
+
/*
* This version must match the one in the kernel.
*
UINT8 *t = (UINT8 *)(to); \
UINT8 *f = (UINT8 *)(from); \
UINTN n = cnt; \
- if (t && f && n) { \
+ if (t && f && n && (t<f)) { \
while (n--) { \
*t++ = *f++; \
} \
+ } else if (t && f && n && (t>f)) { \
+ t += n; \
+ f += n; \
+ while (n--) { \
+ *t-- = *f--; \
+ } \
} \
}
extern VOID *kernel_start;
extern UINTN kernel_size;
+extern VOID *kernel_load_address;
extern VOID *initrd_start;
extern UINTN initrd_size;
UINT32 kernel_entry;
UINT16 kernel_cs;
} jumpvector;
- UINTN njump;
VOID *jump_start;
+ uint64_t temp;
/*
* Disable interrupts.
asm volatile ( "cli" : : );
/*
- * Relocate initrd, if present.
+ * Relocate kernel (if needed), and initrd (if present).
+ * Copy kernel first, in case kernel was loaded overlapping where we're
+ * planning to copy the initrd. This assumes that the initrd didn't
+ * get loaded overlapping where we're planning to copy the kernel, but
+ * that's pretty unlikely since we couldn't alloc that space for the
+ * kernel (or the kernel would already be there).
*/
+ if (kernel_start != kernel_load_address) {
+ MEMCPY(kernel_start, kernel_load_address, kernel_size);
+ }
if (bp->s.initrd_start) {
- MEMCPY(INITRD_START, bp->s.initrd_start, bp->s.initrd_size);
+ temp = bp->s.initrd_start;
+ MEMCPY(INITRD_START, temp , bp->s.initrd_size);
bp->s.initrd_start = INITRD_START;
}
/*
/*
* Jump to kernel entry point.
+ *
+ * Cast is to tell gcc that we know we're going from
+ * 64-bit ptr to 32-bit integer.
*/
- jumpvector.kernel_entry=kentry;
+ jumpvector.kernel_entry=(UINT32)((UINT64)kentry);
jumpvector.kernel_cs=0x10;
- njump = &jumpvector;
jump_start = (VOID *)&jumpvector;
//asm volatile ( "mov %0, %%rcx" : : "m" (&jumpvector) );
asm volatile ( "mov %0, %%rcx" : : "m" (jump_start) );
- //asm volatile ( "mov %0, %%rcx" : : "m" (njump) );
asm volatile ( "ljmp *(%%rcx)" : :);
/* Never come back to here. */
}