This was newly failing because it was checking for a value *without* the
LSB set. In a recent commit we have fixed the bug which lost the LSB,
and that caused this test to fail.
Here we use the new testsuite implementation to test for "one plus the
location" rather than "one of the values A, B, C, ...", which is a
better representation of what we're trying to check.
############### Attachment also inlined for ease of reply
###############
diff --git a/ld/testsuite/ld-aarch64/c64-ifunc-2.d
b/ld/testsuite/ld-aarch64/c64-ifunc-2.d
index
b87908b27a4366a181ddee8b7e4b7343ebd3f4b7..2954f67b0c50ece30e2e7a002fc7e03f2afb9f4c
100644
--- a/ld/testsuite/ld-aarch64/c64-ifunc-2.d
+++ b/ld/testsuite/ld-aarch64/c64-ifunc-2.d
@@ -4,10 +4,17 @@
#objdump: -dw
#source: ifunc-2.s
+#record: INDIRECT_LOC FOO_LOCATION
#...
-0+(130|1a0|1c8|1e0) <foo>:
+0+([0-9a-f]{3}).*0x([0-9a-f]{3})@plt>:
#...
-[ \t0-9a-f]+:[ \t0-9a-f]+bl[ \t0-9a-f]+<\*ABS\*\+0x(130|1a0|1c8|1e0)@plt>
+Disassembly of section \.text:
+
+#check: FOO_LOC string tolower $FOO_LOCATION
+#check: INDIRECT_POS format %x [expr "0x$INDIRECT_LOC + 1"]
+0+FOO_LOC <foo>:
+#...
+[ \t0-9a-f]+:[ \t0-9a-f]+bl[ \t0-9a-f]+<\*ABS\*\+0xFOO_LOC@plt>
[ \t0-9a-f]+:[ \t0-9a-f]+adrp[ \t]+c0, 0 <.*>
-[ \t0-9a-f]+:[ \t0-9a-f]+add[ \t]+c0, c0, #0x(120|190|1b8|1d0)
+[ \t0-9a-f]+:[ \t0-9a-f]+add[ \t]+c0, c0, #0xINDIRECT_POS
#pass
Really don't like that we use hard-coded addresses. There are examples
in the existing testsuite that use options of hard-coded addresses, but
I want something more general that we can actually test the things we
need to test with.
Here we add an initial implementation to do such a thing.
This initial implementation has quite a lot of problems, but it adds
a lot in the fact that we can write testcases which should work across
different setups.
Hopefully we can work out the problems with use (or maybe identify that
the problems don't actually matter very much in practice) and eventually
upstream something better.
To document the problems:
- The implementation means that recording something actually puts that
into the regexp_diff namespace which could shadow existing variables.
- We don't have a way to say "the previous value", but always have to
write some TCL procedure to return that previous value.
############### Attachment also inlined for ease of reply
###############
diff --git a/binutils/testsuite/lib/binutils-common.exp
b/binutils/testsuite/lib/binutils-common.exp
index
b9a1e6e4bc0c8644a3273a8532088ed05eb4fcea..efa476ee7a215fd3fd20b5995d2f64bc4d03ddbf
100644
--- a/binutils/testsuite/lib/binutils-common.exp
+++ b/binutils/testsuite/lib/binutils-common.exp
@@ -363,6 +363,23 @@ proc check_relro_support { } {
# Optionally match REGEXP against line from FILE_1. If the REGEXP
# does not match then the next line from FILE_2 is tried.
#
+# #record: <names>
+# Sets names under which to record matched subexpressions in the regexp
+# on the next line. Use all uppercase variable names to avoid
+# interacting with local variables in the given function.
+# N.b. PREVMATCH is always set to the *entire* of the previous match
+# (whether or not said match was directly after a #record line).
+#
+# #check: <name> <substitution>
+# Replaces any occurance of <name> in the following regexp lines
with the
+# result of evaluating the string <substitution> in TCL. Often
used in
+# combination with #record to set variables for future use in the
+# <substitution> field.
+#
+# #clearcheck
+# Clears all extra substitutions added with #check for future regexp
+# lines.
+#
# Other # lines are comments. Regexp lines starting with the `!'
character
# specify inverse matching (use `\!' for literal matching against a
leading
# `!'). Skip empty lines in both files.
@@ -383,6 +400,12 @@ proc regexp_diff { file_1 file_2 args } {
set diff_pass 0
set fail_if_match 0
set ref_subst ""
+
+ set PREVMATCH ""
+ set extra_vars ""
+ # set STRPOS "uninitialised"
+ set extra_subst ""
+
if { [llength $args] > 0 } {
set ref_subst [lindex $args 0]
}
@@ -440,14 +463,19 @@ proc regexp_diff { file_1 file_2 args } {
foreach {name value} $ref_subst {
regsub -- $name $line_bx $value line_bx
}
+ foreach {name value} $extra_subst {
+ set value [expr $value];
+ regsub -- $name $line_bx $value line_bx
+ }
verbose "looking for $n\"^$line_bx$\"" 3
- while { [expr [regexp "^$line_bx$" "$line_a"] == $negated] } {
+ while { [expr [regexp "^$line_bx$" "$line_a" PREVMATCH
{*}$extra_vars] == $negated] } {
verbose "skipping \"$line_a\"" 3
if { [gets $file_a line_a] == $eof } {
set end_1 1
break
}
}
+ set extra_vars ""
break
} elseif { [string match "#\\?*" $line_b] } {
if { ! $end_1 } {
@@ -459,17 +487,46 @@ proc regexp_diff { file_1 file_2 args } {
foreach {name value} $ref_subst {
regsub -- $name $line_bx $value line_bx
}
+ foreach {name value} $extra_subst {
+ set value [expr $value];
+ regsub -- $name $line_bx $value line_bx
+ }
verbose "optional match for $n\"^$line_bx$\"" 3
- if { [expr [regexp "^$line_bx$" "$line_a"] != $negated] } {
+ if { [expr [regexp "^$line_bx$" "$line_a" PREVMATCH
{*}$extra_vars] != $negated] } {
+ # Choice here between having #?<regexp> *always* clear
+ # the extra_vars, or only clear the extra_vars if it
+ # actually matched. Right now have no use case for
+ # extra_vars combined with the #?<regexp> pattern.
+ # Currently choosing to have this only clear the
+ # extra_vars if the line actually matched, but could
+ # happily change later on if needs be.
+ set extra_vars ""
break
}
}
+ } elseif { [string match "#record: *" $line_b] } {
+ if { ! $end_1 } {
+ set extra_vars [concat [string range $line_b 9 end]]
+ }
+ } elseif { [string match "#clearcheck*" $line_b] } {
+ if { ! $end_1 } {
+ set extra_subst ""
+ }
+ } elseif { [string match "#check: *" $line_b] } {
+ if { ! $end_1 } {
+ set value [lindex [regexp -inline "#check: (\\S+).*" $line_b] 1]
+ lappend extra_subst $value
+ lappend extra_subst [string range $line_b [expr 8+[string length
$value]] end]
+ }
+ # send_user "extra_subst is now: $extra_subst\n"
}
if { [gets $file_b line_b] == $eof } {
set end_2 1
break
}
}
+ # send_user "STRPOS is $STRPOS\n"
+ # send_user "$line_b\n"
if { $diff_pass } {
break
@@ -494,13 +551,21 @@ proc regexp_diff { file_1 file_2 args } {
foreach {name value} $ref_subst {
regsub -- $name $line_bx $value line_bx
}
+ foreach {name value} $extra_subst {
+ set value [eval $value];
+ # send_user "match: $name\n"
+ # send_user "replacement: $value\n"
+ regsub -- $name $line_bx $value line_bx
+ }
+ # send_user "checking against $line_bx\n"
verbose "regexp $n\"^$line_bx$\"\nline \"$line_a\"" 3
- if { [expr [regexp "^$line_bx$" "$line_a"] == $negated] } {
+ if { [expr [regexp "^$line_bx$" "$line_a" PREVMATCH
{*}$extra_vars] == $negated] } {
send_log "regexp_diff match failure\n"
send_log "regexp $n\"^$line_bx$\"\nline $s\"$line_a\"\n"
verbose "regexp_diff match failure\n" 3
set differences 1
}
+ set extra_vars ""
}
}
diff --git a/ld/testsuite/ld-aarch64/emit-relocs-morello-2.d
b/ld/testsuite/ld-aarch64/emit-relocs-morello-2.d
index
fe59dee85f7bbbc8a11ca36168068ae3dfbd1564..c5eebec4e1af2e1d6003138da91f912c0db6ac60
100644
--- a/ld/testsuite/ld-aarch64/emit-relocs-morello-2.d
+++ b/ld/testsuite/ld-aarch64/emit-relocs-morello-2.d
@@ -23,7 +23,8 @@ Disassembly of section .got:
Disassembly of section .data:
-0000000000010360 <str>:
+#record: STRPOS
+(0000000000010360|0000000000010380) <str>:
.*: 6c6c6548 .*
.*: 6874206f .*
.*: 20657265 .*
@@ -34,11 +35,12 @@ Disassembly of section .data:
.*: R_AARCH64_RELATIVE \*ABS\*\+.*
.* <ptr>:
-.*: 00010360 .*
+#check: SHORTSTR string range $STRPOS end-7 end
+.*: SHORTSTR .*
...
.* <cap>:
-.*: 00010360 .*
+.*: SHORTSTR .*
.*: R_MORELLO_RELATIVE \*ABS\*
.*: 00000000 .*
.*: 0000001b .*
There is special handling to ensure that symbols which look like they
are supposed to point at the start of a section are given a size to span
that entire section.
GNU ld has special `start_stop` symbols which are automatically provided
by the linker for sections where the output section and input section
share a name and that name is representable as a C identifier.
(see commit cbd0eecf2)
These special symbols represent the start and end address of the output
section. These special symbols are used in much the same way in source
code as section-start symbols provided by the linker script. Glibc uses
these for the __libc_atexit section containing pointers for functions to
run at exit.
This change accounts for these `start_stop` symbols by giving them the
size of the "remaining" range of the output section in the same way as
linker script defined symbols. This means that the `start` symbols get
section-spanning bounds and the `stop` symbols get bounds of zero.
N.b. We will have to also account for these symbols in the
`resize_sections` function, but that's not done yet.
############### Attachment also inlined for ease of reply
###############
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index
a33aea0eab02ac97cb605cac677d8ac27a475967..9a9bd46f4579d6ad1ec0d2a7c8df3b7d7ba1000a
100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -6456,6 +6456,15 @@ c64_symbol_section_adjustment (struct
elf_link_hash_entry *h, bfd_vma value,
}
return C64_SYM_LDSCRIPT_DEF;
}
+
+ if (h->start_stop)
+ {
+ asection *s = h->u2.start_stop_section->output_section;
+ BFD_ASSERT (s != NULL);
+ *ret_sec = s;
+ return C64_SYM_LDSCRIPT_DEF;
+ }
+
return C64_SYM_STANDARD;
}
diff --git a/ld/testsuite/ld-aarch64/aarch64-elf.exp
b/ld/testsuite/ld-aarch64/aarch64-elf.exp
index
49b2e70adca947b61294bf1d589ce366b81da569..f0d2048efc37c2df3f4dfea304c7f93f8fe3a169
100644
--- a/ld/testsuite/ld-aarch64/aarch64-elf.exp
+++ b/ld/testsuite/ld-aarch64/aarch64-elf.exp
@@ -271,6 +271,7 @@ run_dump_test_lp64 "morello-sec-round-include-relro"
run_dump_test_lp64 "morello-pcc-bounds-include-readonly"
run_dump_test_lp64 "morello-sec-round-choose-linker-syms"
run_dump_test_lp64 "morello-entry-point"
+run_dump_test_lp64 "morello-sec-start_stop-round"
run_dump_test_lp64 "morello-tlsdesc"
run_dump_test_lp64 "morello-tlsdesc-static"
run_dump_test_lp64 "morello-tlsdesc-staticpie"
diff --git a/ld/testsuite/ld-aarch64/morello-sec-start_stop-round.d
b/ld/testsuite/ld-aarch64/morello-sec-start_stop-round.d
new file mode 100644
index
0000000000000000000000000000000000000000..3987696e5ab864a4ef1f57016eb9a87f2849f8a2
--- /dev/null
+++ b/ld/testsuite/ld-aarch64/morello-sec-start_stop-round.d
@@ -0,0 +1,26 @@
+#as: -march=morello+c64
+#ld: -static
+#objdump: -d -j .data -j __libc_atexit
+
+.*: file format .*
+
+
+Disassembly of section \.data:
+
+[0-9a-f]+ <__data_start>:
+#record: START_LIBC_ADDR
+.*: ([0-9a-f]+) .*
+.*: 00000000 .*
+.*: 00000008 .*
+.*: 02000000 .*
+
+Disassembly of section __libc_atexit:
+
+# Use `string tolower` because we know we only have a number so it
won't change
+# anything. That's needed because the current record/check implementation
+# doesn't have a way to define a replacement which is just the existing
+# variable.
+#check: START_LIBC string tolower $START_LIBC_ADDR
+00000000START_LIBC <__start___libc_atexit>:
+.*: 0000002a .*
+.*: 00000000 .*
diff --git a/ld/testsuite/ld-aarch64/morello-sec-start_stop-round.s
b/ld/testsuite/ld-aarch64/morello-sec-start_stop-round.s
new file mode 100644
index
0000000000000000000000000000000000000000..b89273e82460c05cadfd2e3365be671411558e93
--- /dev/null
+++ b/ld/testsuite/ld-aarch64/morello-sec-start_stop-round.s
@@ -0,0 +1,10 @@
+.section __libc_atexit,"aw"
+ .xword 42
+.data
+atexit_location:
+ .chericap __start___libc_atexit
+.text
+.globl _start
+.type _start STT_FUNC
+_start:
+ add c0, c0, :lo12:atexit_location
The LSB on STT_FUNC symbols was missed in a few different places.
1) Absolute relocations coming from .xword, .word, and .hword
directives and the lowest bit MOVW relocations did not account for
the LSB at all.
2) Relocations for the ADR instruction only added the LSB on local
symbols.
Here we account for these by adding the LSB in each clause in
elfNN_aarch64_final_link_relocate.
The change under the BFD_RELOC_AARCH64_NN clause handles absolute 64 bit
relocations, the change for BFD_RELOC_AARCH64_ADR_LO21_PCREL handles the
relocation on ADR instructions, and the extra relocations checked
against in the clause including BFD_RELOC_AARCH64_ADD_LO12 ore the
remaining items.
N.b. we noticed the MOVW relocation problem because glibc's start.S was
using these direct MOV relocations to access the value of `main`. Since
`main` is a function we need to include the LSB in the resulting
relocation value. These relocations did not include the LSB from
STT_FUNC symbols.
Others were found from inspection of each relocation in turn.
############### Attachment also inlined for ease of reply
###############
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index
cb9e5f132cca2d360b8bd2cf2e2eb1fbbf1695f0..a33aea0eab02ac97cb605cac677d8ac27a475967
100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -6900,6 +6900,11 @@ elfNN_aarch64_final_link_relocate
(reloc_howto_type *howto,
return bfd_reloc_ok;
case BFD_RELOC_AARCH64_NN:
+ /* If we are relocating against a C64 symbol, then the value can't
+ already have the LSB set (since STT_FUNC symbols are code labels and
+ they will be aligned). Hence it's safe just to or-equal in order
+ to ensure the LSB is set in that case. */
+ value |= to_c64 ? 1 : 0;
/* When generating a shared object or relocatable executable, these
relocations are copied into the output file to be resolved at
@@ -7115,8 +7120,7 @@ elfNN_aarch64_final_link_relocate
(reloc_howto_type *howto,
signed_addend,
weak_undef_p);
- if (bfd_r_type == BFD_RELOC_AARCH64_ADR_LO21_PCREL && isym != NULL
- && isym->st_target_internal & ST_BRANCH_TO_C64)
+ if (bfd_r_type == BFD_RELOC_AARCH64_ADR_LO21_PCREL && to_c64)
value |= 1;
break;
@@ -7172,8 +7176,13 @@ elfNN_aarch64_final_link_relocate
(reloc_howto_type *howto,
value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
place, value,
signed_addend, weak_undef_p);
- if (bfd_r_type == BFD_RELOC_AARCH64_ADD_LO12 && isym != NULL
- && isym->st_target_internal & ST_BRANCH_TO_C64)
+ if ((bfd_r_type == BFD_RELOC_AARCH64_ADD_LO12
+ || bfd_r_type == BFD_RELOC_AARCH64_MOVW_G0
+ || bfd_r_type == BFD_RELOC_AARCH64_MOVW_G0_S
+ || bfd_r_type == BFD_RELOC_AARCH64_MOVW_G0_NC
+ || bfd_r_type == BFD_RELOC_AARCH64_32
+ || bfd_r_type == BFD_RELOC_AARCH64_16)
+ && to_c64)
value |= 1;
break;
diff --git a/ld/testsuite/ld-aarch64/aarch64-elf.exp
b/ld/testsuite/ld-aarch64/aarch64-elf.exp
index
9352db42e9913021d17a8e5dd8d7d8cf2125fe29..49b2e70adca947b61294bf1d589ce366b81da569
100644
--- a/ld/testsuite/ld-aarch64/aarch64-elf.exp
+++ b/ld/testsuite/ld-aarch64/aarch64-elf.exp
@@ -250,6 +250,7 @@ run_dump_test_lp64 "emit-relocs-morello-6"
run_dump_test_lp64 "emit-relocs-morello-6b"
run_dump_test_lp64 "emit-relocs-morello-7"
run_dump_test_lp64 "emit-relocs-morello-8"
+run_dump_test_lp64 "emit-relocs-morello-9"
run_dump_test_lp64 "emit-morello-reloc-markers-1"
run_dump_test_lp64 "emit-morello-reloc-markers-2"
run_dump_test_lp64 "emit-morello-reloc-markers-3"
diff --git a/ld/testsuite/ld-aarch64/emit-relocs-morello-9.d
b/ld/testsuite/ld-aarch64/emit-relocs-morello-9.d
new file mode 100644
index
0000000000000000000000000000000000000000..a9e1c3f37485df9d0ea9f2a52edab97cc9edf355
--- /dev/null
+++ b/ld/testsuite/ld-aarch64/emit-relocs-morello-9.d
@@ -0,0 +1,33 @@
+#source: emit-relocs-morello-9.s
+#as: -march=morello+c64
+#ld: -static -Ttext-segment 0x0
+#objdump: -d -j .data -j .text
+
+.*: file format .*
+
+
+Disassembly of section \.text:
+
+0000000000000000 <_start>:
+ 0: f2800020 movk x0, #0x1
+ 4: f2800020 movk x0, #0x1
+ 8: 30ffffc0 adr c0, 1 <_start\+0x1>
+ c: 30ffffa0 adr c0, 1 <_start\+0x1>
+ 10: 02000400 add c0, c0, #0x1
+ 14: 02000400 add c0, c0, #0x1
+ 18: d2800020 mov x0, #0x1 // #1
+ 1c: d2800020 mov x0, #0x1 // #1
+ 20: f2800020 movk x0, #0x1
+ 24: f2800020 movk x0, #0x1
+
+Disassembly of section \.data:
+
+.* <val>:
+ .*: 00000001 .word 0x00000001
+ .*: 00000001 .word 0x00000001
+ .*: 00000001 .word 0x00000001
+ .*: 00000000 .word 0x00000000
+ .*: 00000001 .word 0x00000001
+ .*: 00000001 .word 0x00000001
+ .*: 00000001 .word 0x00000001
+ .*: 00000000 .word 0x00000000
diff --git a/ld/testsuite/ld-aarch64/emit-relocs-morello-9.s
b/ld/testsuite/ld-aarch64/emit-relocs-morello-9.s
new file mode 100644
index
0000000000000000000000000000000000000000..854482cd027dd853aecd41465c1b2238bbbedb7d
--- /dev/null
+++ b/ld/testsuite/ld-aarch64/emit-relocs-morello-9.s
@@ -0,0 +1,42 @@
+# Attempting to check that the LSB is set on all relocations to a function
+# symbol.
+#
+# This should only happen for those relocations which load an address
into a
+# register, since relocations that jump to a PC relative address like `bl`
+# should not include the LSB.
+.text
+.global _start
+.type _start,@function
+.type otherstart,@function
+_start:
+otherstart:
+ movk x0, #:abs_g0_nc:_start
+ movk x0, #:abs_g0_nc:otherstart
+ adr c0, _start
+ adr c0, otherstart
+ add c0, c0, :lo12:_start
+ add c0, c0, :lo12:otherstart
+ # The below are not as much of a worry if they go wrong since they
+ # check overflow, and the likelyhood of there being a function which
+ # fits in the lowest 16 bits of an address is low. However, we can
+ # still test it in our testsuite with arguments to the linker, so we
+ # still get to check this edge case.
+ movz x0, #:abs_g0_s:_start
+ movz x0, #:abs_g0_s:otherstart
+ movk x0, #:abs_g0:_start
+ movk x0, #:abs_g0:otherstart
+.data
+.align 4
+.global val
+val:
+ # LSB should be included in the value of function symbols even if they
+ # are just added via absolute relocations.
+ .hword _start
+ .hword 0
+ .word _start
+ .xword _start
+ .hword otherstart
+ .hword 0
+ .word otherstart
+ .xword otherstart
+ .size val, .-val
The previous code was not actually using the size of a symbol when the
symbol was in the hash table. This meant that our TLS relaxations
created an instruction sequence with bounds of zero so that the GCC TLS
instruction sequence eventually ended up giving a length-zero
capability.
Also handle extra size of pointers in TCB for c64. For purecap we have
16 byte pointers. Hence the TCB is 32 bytes. This was not yet handled
in our relaxations.
Here we determine whether to use a 32 or 16 byte TCB based on the flags
of the current BFD (i.e. whether this is a purecap binary that we're
creating).
Testcases are updated to account for the fact that the length
of the capability to the symbol itself is now sometimes non-zero and for
the different offset required into the TLS block for modules loaded at
startup time.
############### Attachment also inlined for ease of reply
###############
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index
1f6d83041eecf01f87494b0699e59647ddc07293..cb9e5f132cca2d360b8bd2cf2e2eb1fbbf1695f0
100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -2929,7 +2929,8 @@ c64_value_p (asection *section, unsigned int value)
}
/* The size of the thread control block which is defined to be two
pointers. */
-#define TCB_SIZE (ARCH_SIZE/8)*2
+#define TCB_SIZE(cur_bfd) \
+ elf_elfheader(cur_bfd)->e_flags & EF_AARCH64_CHERI_PURECAP ? 32 :
(ARCH_SIZE/8)*2
struct elf_aarch64_local_symbol
{
@@ -6043,7 +6044,7 @@ tpoff_base (struct bfd_link_info *info)
/* If tls_sec is NULL, we should have signalled an error already. */
BFD_ASSERT (htab->tls_sec != NULL);
- bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
+ bfd_vma base = align_power ((bfd_vma) TCB_SIZE (info->output_bfd),
htab->tls_sec->alignment_power);
return htab->tls_sec->vma - base;
}
@@ -7700,7 +7701,7 @@ elfNN_aarch64_tls_relax (bfd *input_bfd, struct
bfd_link_info *info,
BFD_ASSERT (globals && input_bfd && contents && rel);
- if (is_local)
+ if (is_local || !bfd_link_pic (info))
{
if (h != NULL)
sym_size = h->size;
@@ -8106,7 +8107,7 @@ set_nop:
/* No need of CALL26 relocation for tls_get_addr. */
rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0);
- bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
+ bfd_putl32 (add_R0_R0 | (TCB_SIZE (input_bfd) << 10),
contents + rel->r_offset + 4);
return bfd_reloc_ok;
}
@@ -8135,7 +8136,7 @@ set_nop:
BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
/* No need of CALL26 relocation for tls_get_addr. */
rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
- bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
+ bfd_putl32 (add_R0_R0 | (TCB_SIZE (input_bfd) << 10),
contents + rel->r_offset + 0);
bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
return bfd_reloc_ok;
diff --git a/ld/testsuite/ld-aarch64/morello-tlsdesc-static.d
b/ld/testsuite/ld-aarch64/morello-tlsdesc-static.d
index
372f369e7a23625238a14aa2505f5a1de7d286ed..9026f14115b996ea90628325eb51e1773f08828e
100644
--- a/ld/testsuite/ld-aarch64/morello-tlsdesc-static.d
+++ b/ld/testsuite/ld-aarch64/morello-tlsdesc-static.d
@@ -14,8 +14,8 @@ Disassembly of section .text:
.*: c29bd042 mrs c2, ctpidr_el0
.*: d2a00001 movz x1, #0x0, lsl #16
.*: d2a00000 movz x0, #0x0, lsl #16
-.*: f2800200 movk x0, #0x10
-.*: f2800001 movk x1, #0x0
+.*: f2800400 movk x0, #0x20
+.*: f2800081 movk x1, #0x4
.*: c2a06040 add c0, c2, x0, uxtx
.*: c2c10000 scbnds c0, c0, x1
@@ -23,7 +23,7 @@ Disassembly of section .text:
.*: c29bd042 mrs c2, ctpidr_el0
.*: d2a00001 movz x1, #0x0, lsl #16
.*: d2a00000 movz x0, #0x0, lsl #16
-.*: f2800280 movk x0, #0x14
+.*: f2800480 movk x0, #0x24
.*: f2800281 movk x1, #0x14
.*: c2a06040 add c0, c2, x0, uxtx
.*: c2c10000 scbnds c0, c0, x1
diff --git a/ld/testsuite/ld-aarch64/morello-tlsdesc-staticpie.d
b/ld/testsuite/ld-aarch64/morello-tlsdesc-staticpie.d
index
e391d86962c1eb6c4b79aead7d6d67f817e970e1..bf2b67aa4574db66373a1932acb572609a793d69
100644
--- a/ld/testsuite/ld-aarch64/morello-tlsdesc-staticpie.d
+++ b/ld/testsuite/ld-aarch64/morello-tlsdesc-staticpie.d
@@ -42,7 +42,7 @@ Disassembly of section .text:
.*: c29bd042 mrs c2, ctpidr_el0
.*: d2a00001 movz x1, #0x0, lsl #16
.*: d2a00000 movz x0, #0x0, lsl #16
-.*: f2800280 movk x0, #0x14
+.*: f2800480 movk x0, #0x24
.*: f2800281 movk x1, #0x14
.*: c2....40 add c0, c2, x0, uxtx
.*: c2c10000 scbnds c0, c0, x1
The handling is done by putting the value that we want in a buffer and
using that as the entry_symbol.name which lang_end picks up.
Another option would be to find the entry symbol *after* lang_end has
finished (e.g. in elfNN_aarch64_init_file_header) and add the LSB to it
if that symbol is a C64 symbol.
This approach was mainly chosen in order to match more closely what
Thumb has done.
N.b. we set the LSB based on the LSB of the entry point symbol.
If the entry point symbol is in c64 code but is not an STT_FUNC (e.g.
it is an STT_NOTYPE) then the LSB will not be set.
This matches Morello clang behaviour.
############### Attachment also inlined for ease of reply
###############
diff --git a/ld/emultempl/aarch64elf.em b/ld/emultempl/aarch64elf.em
index
8a123106e3df3a0236cf818051430b5ef27eca8e..11512a127db039066f6c11e6132f7089d0528994
100644
--- a/ld/emultempl/aarch64elf.em
+++ b/ld/emultempl/aarch64elf.em
@@ -330,6 +330,43 @@ gld${EMULATION_NAME}_finish (void)
}
finish_default ();
+
+ struct bfd_link_hash_entry * h;
+ struct elf_link_hash_entry * eh;
+
+ if (!entry_symbol.name)
+ return;
+
+ h = bfd_link_hash_lookup (link_info.hash, entry_symbol.name,
+ FALSE, FALSE, TRUE);
+ eh = (struct elf_link_hash_entry *)h;
+ if (!h || !(eh->target_internal & ST_BRANCH_TO_C64))
+ return;
+ if (h->type != bfd_link_hash_defined
+ && h->type != bfd_link_hash_defweak)
+ return;
+ if (h->u.def.section->output_section == NULL)
+ return;
+
+ static char buffer[67];
+ bfd_vma val;
+
+ /* Special procesing is required for a C64 entry symbol. The
+ bottom bit of its address must be set. */
+ val = (h->u.def.value
+ + bfd_section_vma (h->u.def.section->output_section)
+ + h->u.def.section->output_offset);
+
+ val |= 1;
+
+ /* Now convert this value into a string and store it in entry_symbol
+ where the lang_end() function will pick it up. */
+ buffer[0] = '0';
+ buffer[1] = 'x';
+
+ sprintf_vma (buffer + 2, val);
+
+ entry_symbol.name = buffer;
}
/* This is a convenient point to tell BFD about target specific flags.
diff --git a/ld/testsuite/ld-aarch64/aarch64-elf.exp
b/ld/testsuite/ld-aarch64/aarch64-elf.exp
index
721d16e09bc1392fbc5e7920a080962bb4b374a2..9352db42e9913021d17a8e5dd8d7d8cf2125fe29
100644
--- a/ld/testsuite/ld-aarch64/aarch64-elf.exp
+++ b/ld/testsuite/ld-aarch64/aarch64-elf.exp
@@ -269,6 +269,7 @@ run_dump_test_lp64 "morello-sec-round-data-only"
run_dump_test_lp64 "morello-sec-round-include-relro"
run_dump_test_lp64 "morello-pcc-bounds-include-readonly"
run_dump_test_lp64 "morello-sec-round-choose-linker-syms"
+run_dump_test_lp64 "morello-entry-point"
run_dump_test_lp64 "morello-tlsdesc"
run_dump_test_lp64 "morello-tlsdesc-static"
run_dump_test_lp64 "morello-tlsdesc-staticpie"
diff --git a/ld/testsuite/ld-aarch64/morello-entry-point.d
b/ld/testsuite/ld-aarch64/morello-entry-point.d
new file mode 100644
index
0000000000000000000000000000000000000000..29fb431199b977f1714da9d792f9da79e182339a
--- /dev/null
+++ b/ld/testsuite/ld-aarch64/morello-entry-point.d
@@ -0,0 +1,10 @@
+# Checking that the entry point address of a binary with a c64 function
symbol
+# as the entry address is odd (i.e. has the LSB set).
+#source: emit-relocs-morello-2.s
+#as: -march=morello+c64
+#ld: -static
+#readelf: --file-header
+
+#...
+ Entry point address: 0x.*[13579]
+#pass
This patch series re-works the function elfNN_c64_resize_sections.
The function is called early on in linking to ensure that the range of sections
which need to be reachable via the PCC can be precisely bounded by the CHERI
capability format.
It also handles padding and alignign individual sections if there are any
relocations to symbols which will be given a size spanning the entire section
(i.e. section start linker defined symbols).
We hit a few problems in this function trying to link glibc, and discovered
some more while working on the function.
This patch series is a combination of the changes we've applied. I plan to
apply it as a series of patches rather than as one large patch to keep the
mapping between change and rationale.
Overall, the changes are about two things:
The first is making these padding and alignment adjustments to sections in such
a way that they are robust against changes earlier on in the linker script
instead of relying on making the adjustments in section-VMA order and having no
changes earlier on in the linker script after any adjustment has been made.
The fact that the PCC bounds span multiple sections means that we can't rely on
making adjustments in section-VMA order (increasing the alignment of the first
section in the PCC bounds will mean a change earlier on than any section in the
PCC bounds, while changes on the sections inside these bounds can adjust the
alignment needed on the PCC).
The second set of changes are around performing the padding and alignment
changes in more valid cases. We notice that data-only PURECAP shared libraries
(i.e. objects without C64 code) could still have relocations in them pointing
to section-start symbols (though won't need a non-existent PCC bounds to be
precise), and we fix a bug that did not account for section starting symbols
defined in the section they are supposed to mark.
A summary of the patch series:
1) Add padding by setting "dot" to the *expression* "dot + padding required"
rather than the value of "dot + padding required".
This means that adjustments to alignment of sections before the one being
padded cause changes which get propagated past this section.
We also include the padding in the section size, which trades unnecessary
zerod data in the final binary against having the precisely bounded size
clear in the final binary.
2) Set the alignment of sections to what is needed for precise bounds even if
the existing VMA happens to be aligned enough. This means that if
earlier sections are adjusted then the linker will ensure this section
stays aligned correctly.
3) Ensure that PCC bounds are precise independent of whether there are any
section-start symbols requiring specific sections to be precisely bounded.
(bugfix).
4) Pad and align sections for data-only executables if they are made from
PURECAP objects and need it due to their relocations.
5) Refactor the function based on above changes so that rather than construct
a list of adjustments to make and sorting that list before applying the
adjustments, we simply make adjustments as we find the requirement
(possible because the adjustments are now robust against adjustments on
earlier sections).
6) Testsuite updates and new tests.
7) Adjust section padding on a section if there is a section start symbol *in
that section* (as well as just before it). I.e. if there is a
__data_relro_start symbol in the .data.rel.ro section then we adjust the
.data.rel.ro section (rather than checking to see if the next section
has the same start address as the end address of the current section and
padding the next section if that's the case).
This is a little tricky since we don't know the position of a symbol within
a section at this point, but like the existing code we handle that lack of
knowledge by overfitting and adjusting any section that may be relevent.
The entire patch series has been regression tested on AArch64 bare-metal
purecap with no regressions.
Thanks,
Matthew
Entire patch series attached to cover letter.
Before this they would span sections which are SEC_CODE or some specific
known sections like the GOT and PLT.
This is not enough, since the compiler can want to access .rodata via
relative offsets to PCC. Hence we need to include READONLY sections.
Similarly, we want to include .data.rel.ro sections in the PCC bounds so
that they can be accessed via PCC -- this allows the capability
indirection table to be accessed.
We have not been noticing this until now because the default linker
script happens to order sections such that the PCC being required to
span .got and .text happens to end up including these problematic
sections.
RELRO sections are a bit interesting since the fact they are RELRO is
not recorded anywhere on the section itself. Rather it is stored in the
fact that the section is covered by the RELRO segment.
This means that we need to check if the sections VMA is within the
relevant range rather than just look at the section. This turns out to
be pretty easy since we have a structure containing the RELRO range,
however we do need to ensure that we don't mix up the uses of the
section VMA and the RELRO start and end around calls of
layout_sections_again since this call can change both.
############### Attachment also inlined for ease of reply ###############
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index c44e7aa458e2d28f2fefe1ec754482923c448d69..f33388629164b59e07cdbac7bb4bce82e2f62b8b 100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -4997,12 +4997,16 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info,
#define NOT_OP_SECTION(s) ((s) == NULL || (s)->output_section != sec)
- if ((sec->flags & SEC_CODE) == 0
+ if ((sec->flags & SEC_READONLY) == 0
&& NOT_OP_SECTION (htab->root.sgotplt)
&& NOT_OP_SECTION (htab->root.igotplt)
&& NOT_OP_SECTION (htab->root.sgot)
&& NOT_OP_SECTION (htab->root.splt)
- && NOT_OP_SECTION (htab->root.iplt))
+ && NOT_OP_SECTION (htab->root.iplt)
+ && (sec->vma < info->relro_start
+ || sec->vma >= info->relro_end))
+ continue;
+ if ((sec->flags & SEC_ALLOC) == 0)
continue;
if (sec->vma < low)
We were specifying section alignment requirements based on the alignment
that the section base happened to have. This sometimes resulted in very
strange alignment requests that were much greater than actually
required.
That is not usually a problem, but it does give unnecessary padding upon
re-adjustments due to changing the PCC bounds after individual sections
have been padded.
This patch adds an interface such that we return the alignment actually
required for exact capability bounds from c64_valid_cap_range. We then
use that alignment as our alignment requirement on the sections which
have a section-sized symbol associated with them.
############### Attachment also inlined for ease of reply ###############
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index 76ab691b33e459a4ca80d337bdc70a1c53d6c1e0..c44e7aa458e2d28f2fefe1ec754482923c448d69 100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -4801,11 +4801,12 @@ exponent (uint64_t len)
#define ALIGN_UP(x, a) (((x) + ONES (a)) & (~ONES (a)))
static bfd_boolean
-c64_valid_cap_range (bfd_vma *basep, bfd_vma *limitp)
+c64_valid_cap_range (bfd_vma *basep, bfd_vma *limitp, unsigned *alignmentp)
{
bfd_vma base = *basep, size = *limitp - *basep;
unsigned e, old_e;
+ *alignmentp = 1;
if ((e = exponent (size)) == (unsigned) -1)
return TRUE;
@@ -4818,6 +4819,7 @@ c64_valid_cap_range (bfd_vma *basep, bfd_vma *limitp)
base = ALIGN_UP (base, e + 3);
+ *alignmentp = e+3;
if (base == *basep && *limitp == base + size)
return TRUE;
@@ -4868,17 +4870,18 @@ record_section_change (asection *sec, struct sec_change_queue **queue)
{
bfd_vma low = sec->vma;
bfd_vma high = sec->vma + sec->size;
+ unsigned alignment;
- if (!c64_valid_cap_range (&low, &high))
+ if (!c64_valid_cap_range (&low, &high, &alignment))
queue_section_padding (queue, sec);
}
/* Make sure that all capabilities that refer to sections have bounds that
won't overlap with neighbouring sections. This is needed in two specific
cases. The first case is that of PCC, which needs to span across all
- executable sections as well as the GOT and PLT sections in the output
- binary. The second case is that of linker and ldscript defined symbols that
- indicate start and/or end of sections.
+ readonly sections as well as the GOT and PLT sections in the output binary.
+ The second case is that of linker and ldscript defined symbols that indicate
+ start and/or end of sections and/or zero-sized symbols.
In both cases, overlap of capability bounds are avoided by aligning the base
of the section and if necessary, adding a pad at the end of the section so
@@ -4963,7 +4966,6 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info,
if (len > 8 && name[0] == '_' && name[1] == '_'
&& (!strncmp (name + 2, "start_", 6)
|| !strcmp (name + len - 6, "_start")))
-
{
bfd_vma value = os->vma + os->size;
@@ -5029,10 +5031,8 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info,
low = queue->sec->vma;
high = queue->sec->vma + queue->sec->size;
- if (!c64_valid_cap_range (&low, &high))
+ if (!c64_valid_cap_range (&low, &high, &align))
{
- align = __builtin_ctzl (low);
-
if (queue->sec->alignment_power < align)
queue->sec->alignment_power = align;
@@ -5057,9 +5057,8 @@ elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info,
pcc_low = pcc_low_sec->vma;
pcc_high = pcc_high_sec->vma + pcc_high_sec->size + padding;
- if (!c64_valid_cap_range (&pcc_low, &pcc_high))
+ if (!c64_valid_cap_range (&pcc_low, &pcc_high, &align))
{
- align = __builtin_ctzl (pcc_low);
if (pcc_low_sec->alignment_power < align)
pcc_low_sec->alignment_power = align;
@@ -6533,8 +6532,9 @@ c64_fixup_frag (bfd *input_bfd, struct bfd_link_info *info,
return bfd_reloc_outofrange;
bfd_vma base = value, limit = value + size;
+ unsigned align = 0;
- if (!bounds_ok && !c64_valid_cap_range (&base, &limit))
+ if (!bounds_ok && !c64_valid_cap_range (&base, &limit, &align))
{
/* Just warn about this. It's not a requirement that bounds on
objects should be precise, so there's no reason to error out on
The permissions that a capability to an object should end up with is
based on the section it should point into. With symbols that point into
SHN_ABS sections we have nothing to base the permissions on (since these
sections don't have associated permission flags).
For the moment we are making a default of choosing Read-Write
permissions and warning the user about it. The permissions match what
Morello LLD currently does (from observation).
When Morello linkers use the symbol type to determine whether a
capability should have executable permissions or not, this should end up
being able to handle all uses (since STT_FUNC would get RX perms while
everything else gets RW perms).
In the only case we know of in the GNU team the symbol ends up with
zero-size anyway, so the choice of Read-Write doesn't seem too lax.
(Having zero-size is fine for the use-case we know of in glibc, since
that use case simply checks if the address of the symbol is non-zero.
Hence we have no need as yet to dereference the symbol).
The use case we know about are the `_nl_current_<LANG>_used` symbols
defined with `_NL_CURRENT_DEFINE` in the locale/lc-<lang>.c files in
statically linked glibc. If any case that requires non-zero size or
different permissions becomes important then something more will be
required across the toolchain.
############### Attachment also inlined for ease of reply ###############
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index 8b69c03c4a2f66a7f3d4a6dc88494c00ac72646b..0a455763faebf2d0c1e3cbe689d5b82163bfa43e 100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -6364,7 +6364,7 @@ aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc)
/* Build capability meta data, i.e. size and permissions for a capability. */
static bfd_vma
-cap_meta (size_t size, const asection *sec)
+cap_meta (size_t size, const asection *sec, bfd_boolean *guessed)
{
if (size >= (1ULL << 56))
@@ -6386,10 +6386,20 @@ cap_meta (size_t size, const asection *sec)
else if (sec->flags & SEC_ALLOC)
flags = 2;
- /* We should always be able to derive a valid set of permissions
- from the section flags. */
+ /* We should usually be able to derive a valid set of permissions
+ from the section flags. We know that when a relocation is against an
+ SHN_ABS symbol the section has no associated flags and we must guess.
+
+ As it stands we don't know of any other instances where we do not have
+ permission flags on a section. We choose to allow instances that we do
+ not know of rather than abort on them so that if the guess is correct we
+ don't hamper anyone progressing. */
if (flags == 0)
- abort ();
+ {
+ flags = 2;
+ *guessed = TRUE;
+ }
+
return size | (flags << 56);
}
@@ -6451,15 +6461,18 @@ c64_symbol_section_adjustment (struct elf_link_hash_entry *h, bfd_vma value,
static bfd_reloc_status_type
c64_fixup_frag (bfd *input_bfd, struct bfd_link_info *info,
- Elf_Internal_Sym *sym, struct elf_link_hash_entry *h,
- asection *sym_sec, bfd_byte *frag_loc, bfd_vma value,
- bfd_signed_vma addend)
+ bfd_reloc_code_real_type bfd_r_type, Elf_Internal_Sym *sym,
+ struct elf_link_hash_entry *h, asection *sym_sec,
+ asection *reloc_sec, bfd_byte *frag_loc, bfd_vma value,
+ bfd_signed_vma addend, bfd_vma r_offset)
{
BFD_ASSERT (h || sym);
bfd_vma size = sym ? sym->st_size : h->size;
asection *perm_sec = sym_sec;
bfd_boolean bounds_ok = FALSE;
+ const int aarch64_reloc_idx = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
+ const char *reloc_name = elfNN_aarch64_howto_table[aarch64_reloc_idx].name;
const char *sym_name;
if (sym)
@@ -6534,11 +6547,22 @@ c64_fixup_frag (bfd *input_bfd, struct bfd_link_info *info,
if (perm_sec != NULL)
{
- bfd_vma frag = cap_meta (size, perm_sec);
+ bfd_boolean permissions_guessed = FALSE;
+ bfd_vma frag = cap_meta (size, perm_sec, &permissions_guessed);
if (frag == (bfd_vma) -1)
return bfd_reloc_outofrange;
+ if (permissions_guessed)
+ {
+ _bfd_error_handler (_("%pB(%pA+%#" PRIx64 "): "
+ "warning: relocation %s against symbol '%s' in "
+ "section without permission flags '%s'. "
+ "Assuming Read-Write."),
+ input_bfd, reloc_sec, r_offset, reloc_name,
+ sym_name, perm_sec->name);
+ }
+
bfd_put_64 (input_bfd, frag, frag_loc);
}
@@ -7304,9 +7328,9 @@ elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
{
bfd_reloc_status_type ret;
- ret = c64_fixup_frag (input_bfd, info, sym, h,
- sym_sec, base_got->contents + off + 8,
- orig_value, 0);
+ ret = c64_fixup_frag (input_bfd, info, bfd_r_type, sym, h,
+ sym_sec, s, base_got->contents + off + 8,
+ orig_value, 0, off);
if (ret != bfd_reloc_continue)
return ret;
@@ -7509,8 +7533,9 @@ elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
bfd_reloc_status_type ret;
- ret = c64_fixup_frag (input_bfd, info, sym, h, sym_sec,
- hit_data + 8, value, signed_addend);
+ ret = c64_fixup_frag (input_bfd, info, bfd_r_type, sym, h, sym_sec,
+ input_section, hit_data + 8, value,
+ signed_addend, rel->r_offset);
if (ret != bfd_reloc_continue)
return ret;
diff --git a/ld/testsuite/ld-aarch64/aarch64-elf.exp b/ld/testsuite/ld-aarch64/aarch64-elf.exp
index 9168a810fc57783dd122ce40b6385dddc6888042..01f65dc934de3f012b6e134ff2ccebd1f4207650 100644
--- a/ld/testsuite/ld-aarch64/aarch64-elf.exp
+++ b/ld/testsuite/ld-aarch64/aarch64-elf.exp
@@ -249,6 +249,7 @@ run_dump_test_lp64 "emit-relocs-morello-5"
run_dump_test_lp64 "emit-relocs-morello-6"
run_dump_test_lp64 "emit-relocs-morello-6b"
run_dump_test_lp64 "emit-relocs-morello-7"
+run_dump_test_lp64 "emit-relocs-morello-8"
run_dump_test_lp64 "emit-morello-reloc-markers-1"
run_dump_test_lp64 "emit-morello-reloc-markers-2"
run_dump_test_lp64 "emit-morello-reloc-markers-3"
diff --git a/ld/testsuite/ld-aarch64/emit-relocs-morello-8.d b/ld/testsuite/ld-aarch64/emit-relocs-morello-8.d
new file mode 100644
index 0000000000000000000000000000000000000000..e907f7de72e084263acd208374c09e91d6bd065f
--- /dev/null
+++ b/ld/testsuite/ld-aarch64/emit-relocs-morello-8.d
@@ -0,0 +1,18 @@
+#source: emit-relocs-morello-8.s
+#source: emit-relocs-morello-8b.s
+#as: -march=morello+c64
+#ld: -static -pie
+#objdump: -DR -j .data.rel.ro
+#warning: .*relocation R_MORELLO_CAPINIT against symbol 'absolute_sym' in section without permission flags '\*ABS\*'\. Assuming Read-Write\.
+
+.*: file format .*
+
+
+Disassembly of section \.data\.rel\.ro:
+
+0.* <\.LC1>:
+ .*: 00001000 udf #4096
+ 101e0: R_MORELLO_RELATIVE \*ABS\*
+ .*: 00000000 udf #0
+ .*: 00000004 udf #4
+ .*: 02000000 add c0, c0, #0x0
diff --git a/ld/testsuite/ld-aarch64/emit-relocs-morello-8.s b/ld/testsuite/ld-aarch64/emit-relocs-morello-8.s
new file mode 100644
index 0000000000000000000000000000000000000000..4a42962a67534e13217a18048daf7279a89201b4
--- /dev/null
+++ b/ld/testsuite/ld-aarch64/emit-relocs-morello-8.s
@@ -0,0 +1,16 @@
+ .arch morello+crc+c64
+ .text
+ .align 2
+ .global _start
+_start:
+ adrp c0, .LC1
+ add c0, c0, :lo12:.LC1
+ ldr c0, [c0]
+
+ .section .data.rel.ro.local,"aw"
+ .align 4
+ .type .LC1, %object
+ .size .LC1, 16
+.LC1:
+ .chericap absolute_sym
+ .ident "GCC: (unknown) 11.0.0 20200826 (experimental)"
diff --git a/ld/testsuite/ld-aarch64/emit-relocs-morello-8b.s b/ld/testsuite/ld-aarch64/emit-relocs-morello-8b.s
new file mode 100644
index 0000000000000000000000000000000000000000..fa12fb43b4e349c0949bb08b9648935032368110
--- /dev/null
+++ b/ld/testsuite/ld-aarch64/emit-relocs-morello-8b.s
@@ -0,0 +1,5 @@
+ .arch morello+crc+c64
+ .text
+ .global absolute_sym
+absolute_sym = 0x1000
+ .size absolute_sym, 4