Before this patch we would only change the alignment of a section if it did not have a start and end address that was aligned properly.
This meant that there was nothing stopping the alignment of this section degrading in the future. On first glance this looks like it would not be a problem since this function only adjusts sections in order of increasing VMA (hence it would seem that the alignment of the current section can not be reduced).
However, in some cases layout_sections_again can be seen to reduce the alignment of sections if there was some initial space before the .text section that it shrinks for some reason. This led to a degredation of the alignment of all sections after that point (until another highly aligned section).
The testcase added for this change (in the final "testsuite" commit of this patch series) is a good example of this, on first entry to the elfNN_c64_resize_sections function .text happened to have a start address of 0xb0 (which meant that .data.rel.ro was also aligned to such a boundary and the function did not believe there was a need to align .data.rel.ro to a 16 byte boundary). However after the first call to layout_sections_again this changed to 0x78, reducing the alignment of .data.rel.ro in the process.
############### Attachment also inlined for ease of reply ###############
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c index f3f058934ea203cb735b9b4baf77ba8e8f854066..fb50608b72c41fae81b1d2b3a655161ce8c55e8d 100644 --- a/bfd/elfnn-aarch64.c +++ b/bfd/elfnn-aarch64.c @@ -4872,7 +4872,8 @@ record_section_change (asection *sec, struct sec_change_queue **queue) bfd_vma high = sec->vma + sec->size; unsigned alignment;
- if (!c64_valid_cap_range (&low, &high, &alignment)) + if (!c64_valid_cap_range (&low, &high, &alignment) + || sec->alignment_power < alignment) queue_section_padding (queue, sec); }
@@ -5037,9 +5038,6 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info,
if (!c64_valid_cap_range (&low, &high, &align)) { - if (queue->sec->alignment_power < align) - queue->sec->alignment_power = align; - padding = high - low - queue->sec->size;
if (queue->sec != pcc_high_sec) @@ -5048,6 +5046,8 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info, padding = 0; } } + if (queue->sec->alignment_power < align) + queue->sec->alignment_power = align;
/* If we have crossed all sections within the PCC range, set up alignment and padding for the PCC range. */ @@ -5063,15 +5063,14 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info,
if (!c64_valid_cap_range (&pcc_low, &pcc_high, &align)) { - if (pcc_low_sec->alignment_power < align) - pcc_low_sec->alignment_power = align; - bfd_vma current_length = (pcc_high_sec->vma + pcc_high_sec->size) - pcc_low_sec->vma; bfd_vma desired_length = (pcc_high - pcc_low); padding = desired_length - current_length; c64_pad_section (pcc_high_sec, padding); } + if (pcc_low_sec->alignment_power < align) + pcc_low_sec->alignment_power = align; }
(*htab->layout_sections_again) (); diff --git a/ld/testsuite/ld-aarch64/morello-sec-always-align.d b/ld/testsuite/ld-aarch64/morello-sec-always-align.d new file mode 100644 index 0000000000000000000000000000000000000000..f46ebdfb821eac114f84dee8b385d1ab1648af30 --- /dev/null +++ b/ld/testsuite/ld-aarch64/morello-sec-always-align.d @@ -0,0 +1,20 @@ +#as: -march=morello+c64 +#ld: -static -T morello-sec-round-adjust.ld +#objdump: -x +#... +Sections: +Idx Name Size VMA LMA File off Algn +#... + 0 .text 00000100 0000000000000080 [0-9a-f]+ [0-9a-f]+ 2**4 + CONTENTS, ALLOC, LOAD, READONLY, CODE + 1 .data.rel.ro 00008000 0000000000000180 [0-9a-f]+ [0-9a-f]+ 2**4 + CONTENTS, ALLOC, LOAD, DATA + 2 .got 00000030 0000000000008180 [0-9a-f]+ [0-9a-f]+ 2**4 + CONTENTS, ALLOC, LOAD, DATA + 3 .got.plt 00000030 00000000000081b0 [0-9a-f]+ [0-9a-f]+ 2**4 + CONTENTS, ALLOC, LOAD, DATA + 4 .rela.dyn 000000b8 00000000000081e0 [0-9a-f]+ [0-9a-f]+ 2**3 + CONTENTS, ALLOC, LOAD, READONLY, DATA +#pass + + diff --git a/ld/testsuite/ld-aarch64/morello-sec-always-align.ld b/ld/testsuite/ld-aarch64/morello-sec-always-align.ld new file mode 100644 index 0000000000000000000000000000000000000000..87613d0f3b3fa1321c26160b8cf421c541ba004d --- /dev/null +++ b/ld/testsuite/ld-aarch64/morello-sec-always-align.ld @@ -0,0 +1,17 @@ +SECTIONS { + . = SIZEOF_HEADERS; + .text : + { + *(.text) + } + .data.rel.ro : + { + __data_rel_ro_startsym = .; + *(.data.rel.ro) + } + .got : { *(.got) } + .iplt : { *(.iplt) } + .data : { *(.data) } + .rela.dyn : { *(.rela.dyn) } + .interp : { *(.interp) } +} diff --git a/ld/testsuite/ld-aarch64/morello-sec-always-align.s b/ld/testsuite/ld-aarch64/morello-sec-always-align.s new file mode 100644 index 0000000000000000000000000000000000000000..1a3992dd93363f6182cf34e34465069114b79cae --- /dev/null +++ b/ld/testsuite/ld-aarch64/morello-sec-always-align.s @@ -0,0 +1,22 @@ +.text +__start: + # Use a GOT relocation because the linker currently doesn't do the + # padding and alignment unless we have GOT relocations. This is + # another linker bug that we need to fix. + adrp c0, :got:__data_rel_ro_startsym + ret +.zero 0x0f8 +.section .data.rel.ro,"aw",@progbits +# We use a linker defined symbol that points into .data.rel.ro so that the +# linker will need to ensure this section is aligned and padded so that its +# bounds can be precisely represented by a capability. This is done in the +# linker script. +# +# Here we ensure that the size of the section is large enough that it will need +# padding. +.zero 0x8000 +.section .got,"aw",@progbits +.capinit __data_rel_ro_startsym +.xword 0 +.xword 0 +