diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bc25b9f..873e5c5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1450,9 +1450,7 @@ config KEXEC_JUMP config PHYSICAL_START hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) - default "0x1000000" if X86_NUMAQ - default "0x200000" if X86_64 - default "0x100000" + default "0x1000000" ---help--- This gives the physical address where the kernel is loaded. @@ -1511,8 +1509,8 @@ config RELOCATABLE config PHYSICAL_ALIGN hex prompt "Alignment value to which kernel should be aligned" if X86_32 - default "0x100000" if X86_32 - default "0x200000" if X86_64 + default "0x200000" if X86_64 || X86_PAE + default "0x400000" range 0x2000 0x400000 ---help--- This value puts the alignment restrictions on physical address diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 3a8a866..a7a2837 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -39,11 +39,11 @@ ENTRY(startup_32) cli movl $(__BOOT_DS),%eax - movl %eax,%ds - movl %eax,%es - movl %eax,%fs - movl %eax,%gs - movl %eax,%ss + movl %eax, %ds + movl %eax, %es + movl %eax, %fs + movl %eax, %gs + movl %eax, %ss 1: /* Calculate the delta between where we were compiled to run @@ -64,12 +64,18 @@ ENTRY(startup_32) */ #ifdef CONFIG_RELOCATABLE - movl %ebp, %ebx - addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebx - andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebx + movl $LOAD_PHYSICAL_ADDR, %eax + movl %ebp, %ebx + cmpl %ebx, %eax + jbe 1f + movl %eax, %ebx +1: + addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebx + andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebx #else movl $LOAD_PHYSICAL_ADDR, %ebx #endif + movl %ebx, %edi /* Replace the compressed data size with the uncompressed size */ subl input_len(%ebp), %ebx @@ -84,28 +90,29 @@ ENTRY(startup_32) addl $4095, %ebx andl $~4095, %ebx +/* + * Setup the stack + */ + leal boot_stack_end(%ebx), %esp + pushl %edi /* Saved kernel target address */ + /* Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushl %esi - leal _end(%ebp), %esi - leal _end(%ebx), %edi - movl $(_end - startup_32), %ecx + leal (_bss-4)(%ebp), %esi + leal (_bss-4)(%ebx), %edi + movl $(_bss - startup_32), %ecx + shrl $2, %ecx std - rep - movsb + rep; movsl cld popl %esi -/* Compute the kernel start address. +/* + * %ebp -> kernel target address */ -#ifdef CONFIG_RELOCATABLE - addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebp - andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebp -#else - movl $LOAD_PHYSICAL_ADDR, %ebp -#endif - + popl %ebp /* * Jump to the relocated address. */ @@ -119,18 +126,12 @@ relocated: /* * Clear BSS */ - xorl %eax,%eax - leal _edata(%ebx),%edi - leal _end(%ebx), %ecx - subl %edi,%ecx - cld - rep - stosb - -/* - * Setup the stack for the decompressor - */ - leal boot_stack_end(%ebx), %esp + xorl %eax, %eax + leal _bss(%ebx), %edi + leal (_ebss+3)(%ebx), %ecx + subl %edi, %ecx + shrl $2, %ecx + rep; stosl /* * Do the decompression, and jump to the new kernel.. @@ -178,14 +179,15 @@ relocated: /* * Jump to the decompressed kernel. */ - xorl %ebx,%ebx + xorl %ebx, %ebx jmp *%ebp -.bss /* Stack and heap for uncompression */ -.balign 4 + .bss + .balign 4 boot_heap: .fill BOOT_HEAP_SIZE, 1, 0 + .section ".stack","aw" boot_stack: .fill BOOT_STACK_SIZE, 1, 0 boot_stack_end: diff --git a/arch/x86/boot/compressed/vmlinux_32.lds b/arch/x86/boot/compressed/vmlinux_32.lds index bb3c483..952dc63 100644 --- a/arch/x86/boot/compressed/vmlinux_32.lds +++ b/arch/x86/boot/compressed/vmlinux_32.lds @@ -33,11 +33,19 @@ SECTIONS *(.data.*) _edata = . ; } + . = ALIGN(4); .bss : { _bss = . ; *(.bss) *(.bss.*) *(COMMON) - _end = . ; + _ebss = .; } + . = ALIGN(4); + .stack : { + _stack = .; + *(.stack) + _estack = .; + } + _end = . ; }