Warning: This file is not a C or C++ file. It does not have highlighting.

1/* Machine-dependent ELF dynamic relocation inline functions. Alpha version.
2 Copyright (C) 1996-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <https://www.gnu.org/licenses/>. */
18
19/* This was written in the absence of an ABI -- don't expect
20 it to remain unchanged. */
21
22#ifndef dl_machine_h
23#define dl_machine_h 1
24
25#define ELF_MACHINE_NAME "alpha"
26
27#include <string.h>
28#include <dl-static-tls.h>
29#include <dl-machine-rel.h>
30
31
32/* Mask identifying addresses reserved for the user program,
33 where the dynamic linker should not map anything. */
34#define ELF_MACHINE_USER_ADDRESS_MASK 0x120000000UL
35
36/* Translate a processor specific dynamic tag to the index in l_info array. */
37#define DT_ALPHA(x) (DT_ALPHA_##x - DT_LOPROC + DT_NUM)
38
39/* Return nonzero iff ELF header is compatible with the running host. */
40static inline int
41elf_machine_matches_host (const Elf64_Ehdr *ehdr)
42{
43 return ehdr->e_machine == EM_ALPHA;
44}
45
46/* Return the link-time address of _DYNAMIC. The multiple-got-capable
47 linker no longer allocates the first .got entry for this. But not to
48 worry, no special tricks are needed. */
49static inline Elf64_Addr
50elf_machine_dynamic (void)
51{
52#ifndef NO_AXP_MULTI_GOT_LD
53 return (Elf64_Addr) &_DYNAMIC;
54#else
55 register Elf64_Addr *gp __asm__ ("$29");
56 return gp[-4096];
57#endif
58}
59
60/* Return the run-time load address of the shared object. */
61
62static inline Elf64_Addr
63elf_machine_load_address (void)
64{
65 /* This relies on the compiler using gp-relative addresses for static symbols. */
66 static void *dot = &dot;
67 return (void *)&dot - dot;
68}
69
70/* Set up the loaded object described by L so its unrelocated PLT
71 entries will jump to the on-demand fixup code in dl-runtime.c. */
72
73static inline int
74elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
75 int lazy, int profile)
76{
77 extern char _dl_runtime_resolve_new[] attribute_hidden;
78 extern char _dl_runtime_profile_new[] attribute_hidden;
79 extern char _dl_runtime_resolve_old[] attribute_hidden;
80 extern char _dl_runtime_profile_old[] attribute_hidden;
81
82 struct pltgot {
83 char *resolve;
84 struct link_map *link;
85 };
86
87 struct pltgot *pg;
88 long secureplt;
89 char *resolve;
90
91 if (map->l_info[DT_JMPREL] == 0 || !lazy)
92 return lazy;
93
94 /* Check to see if we're using the read-only plt form. */
95 secureplt = map->l_info[DT_ALPHA(PLTRO)] != 0;
96
97 /* If the binary uses the read-only secure plt format, PG points to
98 the .got.plt section, which is the right place for ld.so to place
99 its hooks. Otherwise, PG is currently pointing at the start of
100 the plt; the hooks go at offset 16. */
101 pg = (struct pltgot *) D_PTR (map, l_info[DT_PLTGOT]);
102 pg += !secureplt;
103
104 /* This function will be called to perform the relocation. They're
105 not declared as functions to convince the compiler to use gp
106 relative relocations for them. */
107 if (secureplt)
108 resolve = _dl_runtime_resolve_new;
109 else
110 resolve = _dl_runtime_resolve_old;
111
112 if (__builtin_expect (profile, 0))
113 {
114 if (secureplt)
115 resolve = _dl_runtime_profile_new;
116 else
117 resolve = _dl_runtime_profile_old;
118
119 if (GLRO(dl_profile) && _dl_name_match_p (GLRO(dl_profile), map))
120 {
121 /* This is the object we are looking for. Say that we really
122 want profiling and the timers are started. */
123 GL(dl_profile_map) = map;
124 }
125 }
126
127 pg->resolve = resolve;
128 pg->link = map;
129
130 return lazy;
131}
132
133/* Initial entry point code for the dynamic linker.
134 The C function `_dl_start' is the real entry point;
135 its return value is the user program's entry point. */
136
137#define RTLD_START asm ("\
138 .section .text \n\
139 .set at \n\
140 .globl _start \n\
141 .ent _start \n\
142_start: \n\
143 .frame $31,0,$31,0 \n\
144 br $gp, 0f \n\
1450: ldgp $gp, 0($gp) \n\
146 .prologue 0 \n\
147 /* Pass pointer to argument block to _dl_start. */ \n\
148 mov $sp, $16 \n\
149 bsr $26, _dl_start !samegp \n\
150 .end _start \n\
151 /* FALLTHRU */ \n\
152 .globl _dl_start_user \n\
153 .ent _dl_start_user \n\
154_dl_start_user: \n\
155 .frame $31,0,$31,0 \n\
156 .prologue 0 \n\
157 /* Save the user entry point address in s0. */ \n\
158 mov $0, $9 \n\
159 /* See if we were run as a command with the executable \n\
160 file name as an extra leading argument. */ \n\
161 ldah $1, _dl_skip_args($gp) !gprelhigh \n\
162 ldl $1, _dl_skip_args($1) !gprellow \n\
163 bne $1, $fixup_stack \n\
164$fixup_stack_ret: \n\
165 /* The special initializer gets called with the stack \n\
166 just as the application's entry point will see it; \n\
167 it can switch stacks if it moves these contents \n\
168 over. */ \n\
169" RTLD_START_SPECIAL_INIT " \n\
170 /* Call _dl_init(_dl_loaded, argc, argv, envp) to run \n\
171 initializers. */ \n\
172 ldah $16, _rtld_local($gp) !gprelhigh \n\
173 ldq $16, _rtld_local($16) !gprellow \n\
174 ldq $17, 0($sp) \n\
175 lda $18, 8($sp) \n\
176 s8addq $17, 8, $19 \n\
177 addq $19, $18, $19 \n\
178 bsr $26, _dl_init !samegp \n\
179 /* Pass our finalizer function to the user in $0. */ \n\
180 ldah $0, _dl_fini($gp) !gprelhigh \n\
181 lda $0, _dl_fini($0) !gprellow \n\
182 /* Jump to the user's entry point. */ \n\
183 mov $9, $27 \n\
184 jmp ($9) \n\
185$fixup_stack: \n\
186 /* Adjust the stack pointer to skip _dl_skip_args words.\n\
187 This involves copying everything down, since the \n\
188 stack pointer must always be 16-byte aligned. */ \n\
189 ldah $7, __GI__dl_argv($gp) !gprelhigh \n\
190 ldq $2, 0($sp) \n\
191 ldq $5, __GI__dl_argv($7) !gprellow \n\
192 subq $31, $1, $6 \n\
193 subq $2, $1, $2 \n\
194 s8addq $6, $5, $5 \n\
195 mov $sp, $4 \n\
196 s8addq $1, $sp, $3 \n\
197 stq $2, 0($sp) \n\
198 stq $5, __GI__dl_argv($7) !gprellow \n\
199 /* Copy down argv. */ \n\
2000: ldq $5, 8($3) \n\
201 addq $4, 8, $4 \n\
202 addq $3, 8, $3 \n\
203 stq $5, 0($4) \n\
204 bne $5, 0b \n\
205 /* Copy down envp. */ \n\
2061: ldq $5, 8($3) \n\
207 addq $4, 8, $4 \n\
208 addq $3, 8, $3 \n\
209 stq $5, 0($4) \n\
210 bne $5, 1b \n\
211 /* Copy down auxiliary table. */ \n\
2122: ldq $5, 8($3) \n\
213 ldq $6, 16($3) \n\
214 addq $4, 16, $4 \n\
215 addq $3, 16, $3 \n\
216 stq $5, -8($4) \n\
217 stq $6, 0($4) \n\
218 bne $5, 2b \n\
219 br $fixup_stack_ret \n\
220 .end _dl_start_user \n\
221 .set noat \n\
222.previous");
223
224#ifndef RTLD_START_SPECIAL_INIT
225#define RTLD_START_SPECIAL_INIT /* nothing */
226#endif
227
228/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry
229 or TLS variables, so undefined references should not be allowed
230 to define the value.
231
232 ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve
233 to one of the main executable's symbols, as for a COPY reloc.
234 This is unused on Alpha. */
235
236# define elf_machine_type_class(type) \
237 (((type) == R_ALPHA_JMP_SLOT \
238 || (type) == R_ALPHA_DTPMOD64 \
239 || (type) == R_ALPHA_DTPREL64 \
240 || (type) == R_ALPHA_TPREL64) * ELF_RTYPE_CLASS_PLT)
241
242/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
243#define ELF_MACHINE_JMP_SLOT R_ALPHA_JMP_SLOT
244
245/* We define an initialization functions. This is called very early in
246 * _dl_sysdep_start. */
247#define DL_PLATFORM_INIT dl_platform_init ()
248
249static inline void __attribute__ ((unused))
250dl_platform_init (void)
251{
252 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
253 /* Avoid an empty string which would disturb us. */
254 GLRO(dl_platform) = NULL;
255}
256
257/* Fix up the instructions of a PLT entry to invoke the function
258 rather than the dynamic linker. */
259static inline Elf64_Addr
260elf_machine_fixup_plt (struct link_map *map, lookup_t t,
261 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
262 const Elf64_Rela *reloc,
263 Elf64_Addr *got_addr, Elf64_Addr value)
264{
265 const Elf64_Rela *rela_plt;
266 Elf64_Word *plte;
267 long int edisp;
268
269 /* Store the value we are going to load. */
270 *got_addr = value;
271
272 /* If this binary uses the read-only secure plt format, we're done. */
273 if (map->l_info[DT_ALPHA(PLTRO)])
274 return value;
275
276 /* Otherwise we have to modify the plt entry in place to do the branch. */
277
278 /* Recover the PLT entry address by calculating reloc's index into the
279 .rela.plt, and finding that entry in the .plt. */
280 rela_plt = (const Elf64_Rela *) D_PTR (map, l_info[DT_JMPREL]);
281 plte = (Elf64_Word *) (D_PTR (map, l_info[DT_PLTGOT]) + 32);
282 plte += 3 * (reloc - rela_plt);
283
284 /* Find the displacement from the plt entry to the function. */
285 edisp = (long int) (value - (Elf64_Addr)&plte[3]) / 4;
286
287 if (edisp >= -0x100000 && edisp < 0x100000)
288 {
289 /* If we are in range, use br to perfect branch prediction and
290 elide the dependency on the address load. This case happens,
291 e.g., when a shared library call is resolved to the same library. */
292
293 int hi, lo;
294 hi = value - (Elf64_Addr)&plte[0];
295 lo = (short int) hi;
296 hi = (hi - lo) >> 16;
297
298 /* Emit "lda $27,lo($27)" */
299 plte[1] = 0x237b0000 | (lo & 0xffff);
300
301 /* Emit "br $31,function" */
302 plte[2] = 0xc3e00000 | (edisp & 0x1fffff);
303
304 /* Think about thread-safety -- the previous instructions must be
305 committed to memory before the first is overwritten. */
306 __asm__ __volatile__("wmb" : : : "memory");
307
308 /* Emit "ldah $27,hi($27)" */
309 plte[0] = 0x277b0000 | (hi & 0xffff);
310 }
311 else
312 {
313 /* Don't bother with the hint since we already know the hint is
314 wrong. Eliding it prevents the wrong page from getting pulled
315 into the cache. */
316
317 int hi, lo;
318 hi = (Elf64_Addr)got_addr - (Elf64_Addr)&plte[0];
319 lo = (short)hi;
320 hi = (hi - lo) >> 16;
321
322 /* Emit "ldq $27,lo($27)" */
323 plte[1] = 0xa77b0000 | (lo & 0xffff);
324
325 /* Emit "jmp $31,($27)" */
326 plte[2] = 0x6bfb0000;
327
328 /* Think about thread-safety -- the previous instructions must be
329 committed to memory before the first is overwritten. */
330 __asm__ __volatile__("wmb" : : : "memory");
331
332 /* Emit "ldah $27,hi($27)" */
333 plte[0] = 0x277b0000 | (hi & 0xffff);
334 }
335
336 /* At this point, if we've been doing runtime resolution, Icache is dirty.
337 This will be taken care of in _dl_runtime_resolve. If instead we are
338 doing this as part of non-lazy startup relocation, that bit of code
339 hasn't made it into Icache yet, so there's nothing to clean up. */
340
341 return value;
342}
343
344/* Return the final value of a plt relocation. */
345static inline Elf64_Addr
346elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
347 Elf64_Addr value)
348{
349 return value + reloc->r_addend;
350}
351
352/* Names of the architecture-specific auditing callback functions. */
353#define ARCH_LA_PLTENTER alpha_gnu_pltenter
354#define ARCH_LA_PLTEXIT alpha_gnu_pltexit
355
356#endif /* !dl_machine_h */
357
358#ifdef RESOLVE_MAP
359
360/* Perform the relocation specified by RELOC and SYM (which is fully resolved).
361 MAP is the object containing the reloc. */
362static inline void
363__attribute__ ((always_inline))
364elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
365 const Elf64_Rela *reloc,
366 const Elf64_Sym *sym,
367 const struct r_found_version *version,
368 void *const reloc_addr_arg,
369 int skip_ifunc)
370{
371 Elf64_Addr *const reloc_addr = reloc_addr_arg;
372 unsigned long int const r_type = ELF64_R_TYPE (reloc->r_info);
373
374#if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC && !defined SHARED
375 /* This is defined in rtld.c, but nowhere in the static libc.a; make the
376 reference weak so static programs can still link. This declaration
377 cannot be done when compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP)
378 because rtld.c contains the common defn for _dl_rtld_map, which is
379 incompatible with a weak decl in the same file. */
380 weak_extern (_dl_rtld_map);
381#endif
382
383 /* We cannot use a switch here because we cannot locate the switch
384 jump table until we've self-relocated. */
385
386#if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
387 if (__builtin_expect (r_type == R_ALPHA_RELATIVE, 0))
388 {
389# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
390 /* Already done in dynamic linker. */
391 if (map != &GL(dl_rtld_map))
392# endif
393 {
394 /* XXX Make some timings. Maybe it's preferable to test for
395 unaligned access and only do it the complex way if necessary. */
396 Elf64_Addr reloc_addr_val;
397
398 /* Load value without causing unaligned trap. */
399 memcpy (&reloc_addr_val, reloc_addr_arg, 8);
400 reloc_addr_val += map->l_addr;
401
402 /* Store value without causing unaligned trap. */
403 memcpy (reloc_addr_arg, &reloc_addr_val, 8);
404 }
405 }
406 else
407#endif
408 if (__builtin_expect (r_type == R_ALPHA_NONE, 0))
409 return;
410 else
411 {
412 struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
413 r_type);
414 Elf64_Addr sym_value;
415 Elf64_Addr sym_raw_value;
416
417 sym_raw_value = sym_value = reloc->r_addend;
418 if (sym_map)
419 {
420 sym_raw_value += sym->st_value;
421 sym_value += SYMBOL_ADDRESS (sym_map, sym, true);
422 }
423
424 if (r_type == R_ALPHA_GLOB_DAT)
425 *reloc_addr = sym_value;
426#ifdef RESOLVE_CONFLICT_FIND_MAP
427 /* In .gnu.conflict section, R_ALPHA_JMP_SLOT relocations have
428 R_ALPHA_JMP_SLOT in lower 8 bits and the remaining 24 bits
429 are .rela.plt index. */
430 else if ((r_type & 0xff) == R_ALPHA_JMP_SLOT)
431 {
432 /* elf_machine_fixup_plt needs the map reloc_addr points into,
433 while in _dl_resolve_conflicts map is _dl_loaded. */
434 RESOLVE_CONFLICT_FIND_MAP (map, reloc_addr);
435 reloc = ((const Elf64_Rela *) D_PTR (map, l_info[DT_JMPREL]))
436 + (r_type >> 8);
437 elf_machine_fixup_plt (map, 0, 0, 0, reloc, reloc_addr, sym_value);
438 }
439#else
440 else if (r_type == R_ALPHA_JMP_SLOT)
441 elf_machine_fixup_plt (map, 0, 0, 0, reloc, reloc_addr, sym_value);
442#endif
443#ifndef RTLD_BOOTSTRAP
444 else if (r_type == R_ALPHA_REFQUAD)
445 {
446 /* Store value without causing unaligned trap. */
447 memcpy (reloc_addr_arg, &sym_value, 8);
448 }
449#endif
450 else if (r_type == R_ALPHA_DTPMOD64)
451 {
452# ifdef RTLD_BOOTSTRAP
453 /* During startup the dynamic linker is always index 1. */
454 *reloc_addr = 1;
455# else
456 /* Get the information from the link map returned by the
457 resolv function. */
458 if (sym_map != NULL)
459 *reloc_addr = sym_map->l_tls_modid;
460# endif
461 }
462 else if (r_type == R_ALPHA_DTPREL64)
463 {
464# ifndef RTLD_BOOTSTRAP
465 /* During relocation all TLS symbols are defined and used.
466 Therefore the offset is already correct. */
467 *reloc_addr = sym_raw_value;
468# endif
469 }
470 else if (r_type == R_ALPHA_TPREL64)
471 {
472# ifdef RTLD_BOOTSTRAP
473 *reloc_addr = sym_raw_value + map->l_tls_offset;
474# else
475 if (sym_map)
476 {
477 CHECK_STATIC_TLS (map, sym_map);
478 *reloc_addr = sym_raw_value + sym_map->l_tls_offset;
479 }
480# endif
481 }
482 else
483 _dl_reloc_bad_type (map, r_type, 0);
484 }
485}
486
487/* Let do-rel.h know that on Alpha if l_addr is 0, all RELATIVE relocs
488 can be skipped. */
489#define ELF_MACHINE_REL_RELATIVE 1
490
491static inline void
492__attribute__ ((always_inline))
493elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
494 void *const reloc_addr_arg)
495{
496 /* XXX Make some timings. Maybe it's preferable to test for
497 unaligned access and only do it the complex way if necessary. */
498 Elf64_Addr reloc_addr_val;
499
500 /* Load value without causing unaligned trap. */
501 memcpy (&reloc_addr_val, reloc_addr_arg, 8);
502 reloc_addr_val += l_addr;
503
504 /* Store value without causing unaligned trap. */
505 memcpy (reloc_addr_arg, &reloc_addr_val, 8);
506}
507
508static inline void
509__attribute__ ((always_inline))
510elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
511 Elf64_Addr l_addr, const Elf64_Rela *reloc,
512 int skip_ifunc)
513{
514 Elf64_Addr * const reloc_addr = (void *)(l_addr + reloc->r_offset);
515 unsigned long int const r_type = ELF64_R_TYPE (reloc->r_info);
516
517 if (r_type == R_ALPHA_JMP_SLOT)
518 {
519 /* Perform a RELATIVE reloc on the .got entry that transfers
520 to the .plt. */
521 *reloc_addr += l_addr;
522 }
523 else if (r_type == R_ALPHA_NONE)
524 return;
525 else
526 _dl_reloc_bad_type (map, r_type, 1);
527}
528
529#endif /* RESOLVE_MAP */
530

Warning: This file is not a C or C++ file. It does not have highlighting.

source code of glibc/sysdeps/alpha/dl-machine.h