From 5de88b9073cb67bf6c194ec618c2eb5905f3e99e Mon Sep 17 00:00:00 2001
From: rockyautomation <rockyautomation@rockylinux.org>
Date: Tue, 16 May 2023 08:50:24 +0000
Subject: [PATCH] import glibc-2.28-225.el8

---
 SOURCES/glibc-rh1159809-1.patch  | 2024 ++++++++++++++++++++++++++++++
 SOURCES/glibc-rh1159809-10.patch |   79 ++
 SOURCES/glibc-rh1159809-11.patch |   90 ++
 SOURCES/glibc-rh1159809-12.patch |   35 +
 SOURCES/glibc-rh1159809-2.patch  |  189 +++
 SOURCES/glibc-rh1159809-3.patch  |  589 +++++++++
 SOURCES/glibc-rh1159809-4.patch  |   25 +
 SOURCES/glibc-rh1159809-5.patch  |   45 +
 SOURCES/glibc-rh1159809-6.patch  |   49 +
 SOURCES/glibc-rh1159809-7.patch  |  357 ++++++
 SOURCES/glibc-rh1159809-8.patch  |   36 +
 SOURCES/glibc-rh1159809-9.patch  |   37 +
 SOURCES/glibc-rh1871383-1.patch  |  245 ++++
 SOURCES/glibc-rh1871383-2.patch  |   87 ++
 SOURCES/glibc-rh1871383-3.patch  |  100 ++
 SOURCES/glibc-rh1871383-4.patch  |  215 ++++
 SOURCES/glibc-rh1871383-5.patch  |   35 +
 SOURCES/glibc-rh1871383-6.patch  |   35 +
 SOURCES/glibc-rh1871383-7.patch  |  133 ++
 SOURCES/glibc-rh2109510-1.patch  |   27 +
 SOURCES/glibc-rh2109510-10.patch | 1449 +++++++++++++++++++++
 SOURCES/glibc-rh2109510-11.patch |  409 ++++++
 SOURCES/glibc-rh2109510-12.patch |   26 +
 SOURCES/glibc-rh2109510-13.patch |   30 +
 SOURCES/glibc-rh2109510-14.patch |   50 +
 SOURCES/glibc-rh2109510-15.patch |   26 +
 SOURCES/glibc-rh2109510-16.patch |   22 +
 SOURCES/glibc-rh2109510-17.patch |   78 ++
 SOURCES/glibc-rh2109510-18.patch |  439 +++++++
 SOURCES/glibc-rh2109510-19.patch |  598 +++++++++
 SOURCES/glibc-rh2109510-2.patch  |  208 +++
 SOURCES/glibc-rh2109510-20.patch |   36 +
 SOURCES/glibc-rh2109510-21.patch | 1295 +++++++++++++++++++
 SOURCES/glibc-rh2109510-22.patch |   34 +
 SOURCES/glibc-rh2109510-23.patch |  108 ++
 SOURCES/glibc-rh2109510-3.patch  |   32 +
 SOURCES/glibc-rh2109510-4.patch  |  157 +++
 SOURCES/glibc-rh2109510-5.patch  |  483 +++++++
 SOURCES/glibc-rh2109510-6.patch  |   98 ++
 SOURCES/glibc-rh2109510-7.patch  |  178 +++
 SOURCES/glibc-rh2109510-8.patch  |   23 +
 SOURCES/glibc-rh2109510-9.patch  |   45 +
 SOURCES/glibc-rh2116938.patch    |  449 +++++++
 SOURCES/glibc-rh2118667.patch    |   96 ++
 SOURCES/glibc-rh2121746-1.patch  |  202 +++
 SOURCES/glibc-rh2121746-2.patch  |   98 ++
 SOURCES/glibc-rh2122498.patch    |   39 +
 SOURCES/glibc-rh2122501-1.patch  |  472 +++++++
 SOURCES/glibc-rh2122501-2.patch  |  160 +++
 SOURCES/glibc-rh2122501-3.patch  |  356 ++++++
 SOURCES/glibc-rh2122501-4.patch  |   86 ++
 SOURCES/glibc-rh2122501-5.patch  |   81 ++
 SOURCES/glibc-rh2125222.patch    |   54 +
 SOURCES/glibc-rh2139875-1.patch  |   32 +
 SOURCES/glibc-rh2139875-2.patch  |   31 +
 SOURCES/glibc-rh2139875-3.patch  |   61 +
 SOURCES/glibc-rh2141989.patch    |  101 ++
 SOURCES/glibc-rh2142937-1.patch  |  354 ++++++
 SOURCES/glibc-rh2142937-2.patch  |   24 +
 SOURCES/glibc-rh2142937-3.patch  |   37 +
 SOURCES/glibc-rh2144568.patch    |   45 +
 SOURCES/glibc-rh2154914-1.patch  |  297 +++++
 SOURCES/glibc-rh2154914-2.patch  |   81 ++
 SOURCES/wrap-find-debuginfo.sh   |   35 +-
 SPECS/glibc.spec                 |  111 +-
 65 files changed, 13556 insertions(+), 2 deletions(-)
 create mode 100644 SOURCES/glibc-rh1159809-1.patch
 create mode 100644 SOURCES/glibc-rh1159809-10.patch
 create mode 100644 SOURCES/glibc-rh1159809-11.patch
 create mode 100644 SOURCES/glibc-rh1159809-12.patch
 create mode 100644 SOURCES/glibc-rh1159809-2.patch
 create mode 100644 SOURCES/glibc-rh1159809-3.patch
 create mode 100644 SOURCES/glibc-rh1159809-4.patch
 create mode 100644 SOURCES/glibc-rh1159809-5.patch
 create mode 100644 SOURCES/glibc-rh1159809-6.patch
 create mode 100644 SOURCES/glibc-rh1159809-7.patch
 create mode 100644 SOURCES/glibc-rh1159809-8.patch
 create mode 100644 SOURCES/glibc-rh1159809-9.patch
 create mode 100644 SOURCES/glibc-rh1871383-1.patch
 create mode 100644 SOURCES/glibc-rh1871383-2.patch
 create mode 100644 SOURCES/glibc-rh1871383-3.patch
 create mode 100644 SOURCES/glibc-rh1871383-4.patch
 create mode 100644 SOURCES/glibc-rh1871383-5.patch
 create mode 100644 SOURCES/glibc-rh1871383-6.patch
 create mode 100644 SOURCES/glibc-rh1871383-7.patch
 create mode 100644 SOURCES/glibc-rh2109510-1.patch
 create mode 100644 SOURCES/glibc-rh2109510-10.patch
 create mode 100644 SOURCES/glibc-rh2109510-11.patch
 create mode 100644 SOURCES/glibc-rh2109510-12.patch
 create mode 100644 SOURCES/glibc-rh2109510-13.patch
 create mode 100644 SOURCES/glibc-rh2109510-14.patch
 create mode 100644 SOURCES/glibc-rh2109510-15.patch
 create mode 100644 SOURCES/glibc-rh2109510-16.patch
 create mode 100644 SOURCES/glibc-rh2109510-17.patch
 create mode 100644 SOURCES/glibc-rh2109510-18.patch
 create mode 100644 SOURCES/glibc-rh2109510-19.patch
 create mode 100644 SOURCES/glibc-rh2109510-2.patch
 create mode 100644 SOURCES/glibc-rh2109510-20.patch
 create mode 100644 SOURCES/glibc-rh2109510-21.patch
 create mode 100644 SOURCES/glibc-rh2109510-22.patch
 create mode 100644 SOURCES/glibc-rh2109510-23.patch
 create mode 100644 SOURCES/glibc-rh2109510-3.patch
 create mode 100644 SOURCES/glibc-rh2109510-4.patch
 create mode 100644 SOURCES/glibc-rh2109510-5.patch
 create mode 100644 SOURCES/glibc-rh2109510-6.patch
 create mode 100644 SOURCES/glibc-rh2109510-7.patch
 create mode 100644 SOURCES/glibc-rh2109510-8.patch
 create mode 100644 SOURCES/glibc-rh2109510-9.patch
 create mode 100644 SOURCES/glibc-rh2116938.patch
 create mode 100644 SOURCES/glibc-rh2118667.patch
 create mode 100644 SOURCES/glibc-rh2121746-1.patch
 create mode 100644 SOURCES/glibc-rh2121746-2.patch
 create mode 100644 SOURCES/glibc-rh2122498.patch
 create mode 100644 SOURCES/glibc-rh2122501-1.patch
 create mode 100644 SOURCES/glibc-rh2122501-2.patch
 create mode 100644 SOURCES/glibc-rh2122501-3.patch
 create mode 100644 SOURCES/glibc-rh2122501-4.patch
 create mode 100644 SOURCES/glibc-rh2122501-5.patch
 create mode 100644 SOURCES/glibc-rh2125222.patch
 create mode 100644 SOURCES/glibc-rh2139875-1.patch
 create mode 100644 SOURCES/glibc-rh2139875-2.patch
 create mode 100644 SOURCES/glibc-rh2139875-3.patch
 create mode 100644 SOURCES/glibc-rh2141989.patch
 create mode 100644 SOURCES/glibc-rh2142937-1.patch
 create mode 100644 SOURCES/glibc-rh2142937-2.patch
 create mode 100644 SOURCES/glibc-rh2142937-3.patch
 create mode 100644 SOURCES/glibc-rh2144568.patch
 create mode 100644 SOURCES/glibc-rh2154914-1.patch
 create mode 100644 SOURCES/glibc-rh2154914-2.patch

diff --git a/SOURCES/glibc-rh1159809-1.patch b/SOURCES/glibc-rh1159809-1.patch
new file mode 100644
index 0000000..fb15043
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-1.patch
@@ -0,0 +1,2024 @@
+commit e6fd79f3795d46dfb583e124be49fc063bc3d58b
+Author: Chung-Lin Tang <cltang@codesourcery.com>
+Date:   Thu Oct 21 21:41:21 2021 +0800
+
+    elf: Testing infrastructure for ld.so DSO sorting (BZ #17645)
+    
+    This is the first of a 2-part patch set that fixes slow DSO sorting behavior in
+    the dynamic loader, as reported in BZ #17645. In order to facilitate such a
+    large modification to the dynamic loader, this first patch implements a testing
+    framework for validating shared object sorting behavior, to enable comparison
+    between old/new sorting algorithms, and any later enhancements.
+    
+    This testing infrastructure consists of a Python script
+    scripts/dso-ordering-test.py' which takes in a description language, consisting
+    of strings that describe a set of link dependency relations between DSOs, and
+    generates testcase programs and Makefile fragments to automatically test the
+    described situation, for example:
+    
+      a->b->c->d          # four objects linked one after another
+    
+      a->[bc]->d;b->c     # a depends on b and c, which both depend on d,
+                          # b depends on c (b,c linked to object a in fixed order)
+    
+      a->b->c;{+a;%a;-a}  # a, b, c serially dependent, main program uses
+                          # dlopen/dlsym/dlclose on object a
+    
+      a->b->c;{}!->[abc]  # a, b, c serially dependent; multiple tests generated
+                          # to test all permutations of a, b, c ordering linked
+                          # to main program
+    
+     (Above is just a short description of what the script can do, more
+      documentation is in the script comments.)
+    
+    Two files containing several new tests, elf/dso-sort-tests-[12].def are added,
+    including test scenarios for BZ #15311 and Redhat issue #1162810 [1].
+    
+    Due to the nature of dynamic loader tests, where the sorting behavior and test
+    output occurs before/after main(), generating testcases to use
+    support/test-driver.c does not suffice to control meaningful timeout for ld.so.
+    Therefore a new utility program 'support/test-run-command', based on
+    test-driver.c/support_test_main.c has been added. This does the same testcase
+    control, but for a program specified through a command-line rather than at the
+    source code level. This utility is used to run the dynamic loader testcases
+    generated by dso-ordering-test.py.
+    
+    [1] https://bugzilla.redhat.com/show_bug.cgi?id=1162810
+    
+    Signed-off-by: Chung-Lin Tang  <cltang@codesourcery.com>
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/elf/Makefile b/elf/Makefile
+index 1fdf40cbd49e233e..e92f62f279566684 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -986,6 +986,21 @@ tests-special += \
+   # tests-special
+ endif
+ 
++# DSO sorting tests:
++# The dso-ordering-test.py script generates testcase source files in $(objpfx),
++# creating a $(objpfx)<testcase-name>-dir for each testcase, and creates a
++# Makefile fragment to be included.
++define include_dsosort_tests
++$(objpfx)$(1).generated-makefile: $(1)
++	$(PYTHON) $(..)scripts/dso-ordering-test.py \
++	--description-file $$< --objpfx $(objpfx) --output-makefile $$@
++include $(objpfx)$(1).generated-makefile
++endef
++
++# Generate from each testcase description file
++$(eval $(call include_dsosort_tests,dso-sort-tests-1.def))
++$(eval $(call include_dsosort_tests,dso-sort-tests-2.def))
++
+ check-abi: $(objpfx)check-abi-ld.out
+ tests-special += $(objpfx)check-abi-ld.out
+ update-abi: update-abi-ld
+diff --git a/elf/dso-sort-tests-1.def b/elf/dso-sort-tests-1.def
+new file mode 100644
+index 0000000000000000..873ddf55d91155c6
+--- /dev/null
++++ b/elf/dso-sort-tests-1.def
+@@ -0,0 +1,66 @@
++# DSO sorting test descriptions.
++# This file is to be processed by ../scripts/dso-ordering-test.py, see usage
++# in elf/Makefile for how it is executed.
++
++# We test both dynamic loader sorting algorithms
++tunable_option: glibc.rtld.dynamic_sort=1
++tunable_option: glibc.rtld.dynamic_sort=2
++
++# Sequence of single dependencies with no cycles.
++tst-dso-ordering1: a->b->c
++output: c>b>a>{}<a<b<c
++
++# Sequence including 2 dependent DSOs not at the end of the graph.
++tst-dso-ordering2: a->b->[cd]->e
++output: e>d>c>b>a>{}<a<b<c<d<e
++
++# Complex order with 3 "layers" of full dependencies
++tst-dso-ordering3: a->[bc]->[def]->[gh]->i
++output: i>h>g>f>e>d>c>b>a>{}<a<b<c<d<e<f<g<h<i
++
++# Sequence including 2 dependent DSOs at the end of the graph.
++# Additionally the same dependencies appear in two paths.
++tst-dso-ordering4: a->b->[de];a->c->d->e
++output: e>d>c>b>a>{}<a<b<c<d<e
++
++# Test that b->c cross link is respected correctly
++tst-dso-ordering5: a!->[bc]->d;b->c
++output: d>c>b>a>{}<a<b<c<d
++
++# First DSO fully dependent on 4 DSOs, with another DSO at the end of chain.
++tst-dso-ordering6: a->[bcde]->f
++output: f>e>d>c>b>a>{}<a<b<c<d<e<f
++
++# Sequence including 2 dependent and 3 dependent DSOs, and one of the
++# dependent DSOs is dependent on an earlier DSO.
++tst-dso-ordering7: a->[bc];b->[cde];e->f
++output: f>e>d>c>b>a>{}<a<b<c<d<e<f
++
++# Sequence where the DSO c is unerlinked and calls a function in DSO a which
++# is technically a cycle.  The main executable depends on the first two DSOs.
++# Note: This test has unspecified behavior.
++tst-dso-ordering8: a->b->c=>a;{}->[ba]
++output: c>b>a>{}<a<b<c
++
++# Generate the permutation of DT_NEEDED order between the main binary and
++# all 5 DSOs; all link orders should produce exact same init/fini ordering
++tst-dso-ordering9: a->b->c->d->e;{}!->[abcde]
++output: e>d>c>b>a>{}<a<b<c<d<e
++
++# Test if init/fini ordering behavior is proper, despite main program with
++# an soname that may cause confusion
++tst-dso-ordering10: {}->a->b->c;soname({})=c
++output: b>a>{}<a<b
++
++# Complex example from Bugzilla #15311, under-linked and with circular
++# relocation(dynamic) dependencies. While this is technically unspecified, the
++# presumed reasonable practical behavior is for the destructor order to respect
++# the static DT_NEEDED links (here this means the a->b->c->d order).
++# The older dynamic_sort=1 algorithm does not achieve this, while the DFS-based
++# dynamic_sort=2 algorithm does, although it is still arguable whether going
++# beyond spec to do this is the right thing to do.
++# The below expected outputs are what the two algorithms currently produce
++# respectively, for regression testing purposes.
++tst-bz15311: {+a;+e;+f;+g;+d;%d;-d;-g;-f;-e;-a};a->b->c->d;d=>[ba];c=>a;b=>e=>a;c=>f=>b;d=>g=>c
++xfail_output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<a<c<d<g<f<b<e];}
++output(glibc.rtld.dynamic_sort=2): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<g<f<a<b<c<d<e];}
+diff --git a/elf/dso-sort-tests-2.def b/elf/dso-sort-tests-2.def
+new file mode 100644
+index 0000000000000000..b79e79ecb7dc3dbf
+--- /dev/null
++++ b/elf/dso-sort-tests-2.def
+@@ -0,0 +1,614 @@
++# Large DSO sorting testcase adapted from Red Hat Bugzilla 1162810
++#
++# Note that below we specify different expected outputs between dynamic_sort=1
++# and dynamic_sort=2 algorithms, due to circular dependencies in the testcase
++# causing different sorting behavior.  These expected outputs are what the two
++# algorithms currently produce, and are used for regression comparison tests.
++# They are not "definitively" correct outputs, for circular dependencies
++# inherently have unspecified behavior.
++
++xtest(tst-redhat-1162810):
++{}->A101
++{}->*
++A101->(B101 B163 B122 B181)
++A102->(B102 B140 B199 B158)
++A103->(B103 B117 B176 B135)
++A104->(B104 B194 B153 B112)
++A105->(B105 B171 B130 B189)
++A106->(B106 B148 B107 B166)
++A107->(B107 B125 B184 B143)
++A108->(B108 B102 B161 B120)
++A109->(B109 B179 B138 B197)
++A110->(B110 B156 B115 B174)
++A111->(B111 B133 B192 B151)
++A112->(B112 B110 B169 B128)
++A113->(B113 B187 B146 B105)
++A114->(B114 B164 B123 B182)
++A115->(B115 B141 B200 B159)
++A116->(B116 B118 B177 B136)
++A117->(B117 B195 B154 B113)
++A118->(B118 B172 B131 B190)
++A119->(B119 B149 B108 B167)
++A120->(B120 B126 B185 B144)
++A121->(B121 B103 B162)
++A122->(B122 B180 B139 B198)
++A123->(B123 B157 B116 B175)
++A124->(B124 B134 B193 B152)
++A125->(B125 B111 B170 B129)
++A126->(B126 B188 B147 B106)
++A127->(B127 B165 B124 B183)
++A128->(B128 B142 B101 B160)
++A129->(B129 B119 B178 B137)
++A130->(B130 B196 B155 B114)
++A131->(B131 B173 B132 B191)
++A132->(B132 B150 B109 B168)
++A133->(B133 B127 B186 B145)
++A134->(B134 B104 B163 B122)
++A135->(B135 B181 B140 B199)
++A136->(B136 B158 B117 B176)
++A137->(B137 B135 B194 B153)
++A138->(B138 B112 B171 B130)
++A139->(B139 B189 B148 B107)
++A140->(B140 B166 B125 B184)
++A141->(B141 B143 B102 B161)
++A142->(B142 B120 B179 B138)
++A143->(B143 B197 B156 B115)
++A144->(B144 B174 B133 B192)
++A145->(B145 B151 B110 B169)
++A146->(B146 B128 B187)
++A147->(B147 B105 B164 B123)
++A148->(B148 B182 B141 B200)
++A149->(B149 B159 B118 B177)
++A150->(B150 B136 B195 B154)
++A151->(B151 B113 B172 B131)
++A152->(B152 B190 B149 B108)
++A153->(B153 B167 B126 B185)
++A154->(B154 B144 B103 B162)
++A155->(B155 B121 B180 B139)
++A156->(B156 B198 B157 B116)
++A157->(B157 B175 B134 B193)
++A158->(B158 B152 B111 B170)
++A159->(B159 B129 B188 B147)
++A160->(B160 B106 B165 B124)
++A161->(B161 B183 B142 B101)
++A162->(B162 B160 B119 B178)
++A163->(B163 B137 B196 B155)
++A164->(B164 B114 B173 B132)
++A165->(B165 B191 B150 B109)
++A166->(B166 B168 B127 B186)
++A167->(B167 B145 B104 B163)
++A168->(B168 B122 B181 B140)
++A169->(B169 B199 B158 B117)
++A170->(B170 B176 B135 B194)
++A171->(B171 B153 B112)
++A172->(B172 B130 B189 B148)
++A173->(B173 B107 B166 B125)
++A174->(B174 B184 B143 B102)
++A175->(B175 B161 B120 B179)
++A176->(B176 B138 B197 B156)
++A177->(B177 B115 B174 B133)
++A178->(B178 B192 B151 B110)
++A179->(B179 B169 B128 B187)
++A180->(B180 B146 B105 B164)
++A181->(B181 B123 B182 B141)
++A182->(B182 B200 B159 B118)
++A183->(B183 B177 B136 B195)
++A184->(B184 B154 B113 B172)
++A185->(B185 B131 B190 B149)
++A186->(B186 B108 B167 B126)
++A187->(B187 B185 B144 B103)
++A188->(B188 B162 B121 B180)
++A189->(B189 B139 B198 B157)
++A190->(B190 B116 B175 B134)
++A191->(B191 B193 B152 B111)
++A192->(B192 B170 B129 B188)
++A193->(B193 B147 B106 B165)
++A194->(B194 B124 B183 B142)
++A195->(B195 B101 B160 B119)
++A196->(B196 B178 B137)
++A197->(B197 B155 B114 B173)
++A198->(B198 B132 B191 B150)
++A199->(B199 B109 B168 B127)
++A200->(B200 B186 B145 B104)
++B101->(C101 C164 C123 C182)
++B102->(C102 C141 C200 C159)
++B103->(C103 C118 C177 C136)
++B104->(C104 C195 C154 C113)
++B105->(C105 C172 C131 C190)
++B106->(C106 C149 C108 C167)
++B107->(C107 C126 C185 C144)
++B108->(C108 C103 C162 C121)
++B109->(C109 C180 C139 C198)
++B110->(C110 C157 C116 C175)
++B111->(C111 C134 C193 C152)
++B112->(C112 C111 C170 C129)
++B113->(C113 C188 C147 C106)
++B114->(C114 C165 C124 C183)
++B115->(C115 C142 C101 C160)
++B116->(C116 C119 C178 C137)
++B117->(C117 C196 C155 C114)
++B118->(C118 C173 C132 C191)
++B119->(C119 C150 C109 C168)
++B120->(C120 C127 C186 C145)
++B121->(C121 C104 C163 C122)
++B122->(C122 C181 C140 C199)
++B123->(C123 C158 C117 C176)
++B124->(C124 C135 C194 C153)
++B125->(C125 C112 C171 C130)
++B126->(C126 C189 C148 C107)
++B127->(C127 C166 C125 C184)
++B128->(C128 C143 C102 C161)
++B129->(C129 C120 C179 C138)
++B130->(C130 C197 C156 C115)
++B131->(C131 C174 C133 C192)
++B132->(C132 C151 C110 C169)
++B133->(C133 C128 C187 C146)
++B134->(C134 C105 C164 C123)
++B135->(C135 C182 C141 C200)
++B136->(C136 C159 C118 C177)
++B137->(C137 C136 C195 C154)
++B138->(C138 C113 C172 C131)
++B139->(C139 C190 C149 C108)
++B140->(C140 C167 C126 C185)
++B141->(C141 C144 C103 C162)
++B142->(C142 C121 C180 C139)
++B143->(C143 C198 C157 C116)
++B144->(C144 C175 C134 C193)
++B145->(C145 C152 C111 C170)
++B146->(C146 C129 C188 C147)
++B147->(C147 C106 C165 C124)
++B148->(C148 C183 C142 C101)
++B149->(C149 C160 C119 C178)
++B150->(C150 C137 C196 C155)
++B151->(C151 C114 C173 C132)
++B152->(C152 C191 C150 C109)
++B153->(C153 C168 C127 C186)
++B154->(C154 C145 C104 C163)
++B155->(C155 C122 C181 C140)
++B156->(C156 C199 C158 C117)
++B157->(C157 C176 C135 C194)
++B158->(C158 C153 C112 C171)
++B159->(C159 C130 C189 C148)
++B160->(C160 C107 C166 C125)
++B161->(C161 C184 C143 C102)
++B162->(C162 C161 C120 C179)
++B163->(C163 C138 C197 C156)
++B164->(C164 C115 C174 C133)
++B165->(C165 C192 C151 C110)
++B166->(C166 C169 C128 C187)
++B167->(C167 C146 C105 C164)
++B168->(C168 C123 C182 C141)
++B169->(C169 C200 C159 C118)
++B170->(C170 C177 C136 C195)
++B171->(C171 C154 C113 C172)
++B172->(C172 C131 C190 C149)
++B173->(C173 C108 C167 C126)
++B174->(C174 C185 C144 C103)
++B175->(C175 C162 C121 C180)
++B176->(C176 C139 C198 C157)
++B177->(C177 C116 C175 C134)
++B178->(C178 C193 C152 C111)
++B179->(C179 C170 C129 C188)
++B180->(C180 C147 C106 C165)
++B181->(C181 C124 C183 C142)
++B182->(C182 C101 C160 C119)
++B183->(C183 C178 C137 C196)
++B184->(C184 C155 C114 C173)
++B185->(C185 C132 C191 C150)
++B186->(C186 C109 C168 C127)
++B187->(C187 C186 C145 C104)
++B188->(C188 C163 C122 C181)
++B189->(C189 C140 C199 C158)
++B190->(C190 C117 C176 C135)
++B191->(C191 C194 C153 C112)
++B192->(C192 C171 C130 C189)
++B193->(C193 C148 C107 C166)
++B194->(C194 C125 C184 C143)
++B195->(C195 C102 C161 C120)
++B196->(C196 C179 C138 C197)
++B197->(C197 C156 C115 C174)
++B198->(C198 C133 C192 C151)
++B199->(C199 C110 C169 C128)
++B200->(C200 C187 C146 C105)
++C101->(A165 A124)
++C102->(A183 A142)
++C103->(A101 A160)
++C104->(A119 A178)
++C105->(A137 A196)
++C106->(A155 A114)
++C107->(A173 A132)
++C108->(A191 A150)
++C109->(A109 A168)
++C110->(A127 A186)
++C111->(A145 A104)
++C112->(A163 A122)
++C113->(A181 A140)
++C114->(A199 A158)
++C115->(A117 A176)
++C116->(A135 A194)
++C117->(A153 A112)
++C118->(A171 A130)
++C119->(A189 A148)
++C120->(A107 A166)
++C121->(A125 A184)
++C122->(A143 A102)
++C123->(A161 A120)
++C124->(A179 A138)
++C125->(A197 A156)
++C126->(A115 A174)
++C127->(A133 A192)
++C128->(A151 A110)
++C129->(A169 A128)
++C130->(A187 A146)
++C131->(A105 A164)
++C132->(A123 A182)
++C133->(A141 A200)
++C134->(A159 A118)
++C135->(A177 A136)
++C136->(A195 A154)
++C137->(A113 A172)
++C138->(A131 A190)
++C139->(A149 A108)
++C140->(A167 A126)
++C141->(A185 A144)
++C142->(A103 A162)
++C143->(A121 A180)
++C144->(A139 A198)
++C145->(A157 A116)
++C146->(A175 A134)
++C147->(A193 A152)
++C148->(A111 A170)
++C149->(A129 A188)
++C150->(A147 A106)
++C151->(A165 A124)
++C152->(A183 A142)
++C153->(A101 A160)
++C154->(A119 A178)
++C155->(A137 A196)
++C156->(A155 A114)
++C157->(A173 A132)
++C158->(A191 A150)
++C159->(A109 A168)
++C160->(A127 A186)
++C161->(A145 A104)
++C162->(A163 A122)
++C163->(A181 A140)
++C164->(A199 A158)
++C165->(A117 A176)
++C166->(A135 A194)
++C167->(A153 A112)
++C168->(A171 A130)
++C169->(A189 A148)
++C170->(A107 A166)
++C171->(A125 A184)
++C172->(A143 A102)
++C173->(A161 A120)
++C174->(A179 A138)
++C175->(A197 A156)
++C176->(A115 A174)
++C177->(A133 A192)
++C178->(A151 A110)
++C179->(A169 A128)
++C180->(A187 A146)
++C181->(A105 A164)
++C182->(A123 A182)
++C183->(A141 A200)
++C184->(A159 A118)
++C185->(A177 A136)
++C186->(A195 A154)
++C187->(A113 A172)
++C188->(A131 A190)
++C189->(A149 A108)
++C190->(A167 A126)
++C191->(A185 A144)
++C192->(A103 A162)
++C193->(A121 A180)
++C194->(A139 A198)
++C195->(A157 A116)
++C196->(A175 A134)
++C197->(A193 A152)
++C198->(A111 A170)
++C199->(A129 A188)
++C200->(A147 A106)
++M11X11->(M13X14 M12X13 M12X12 M12X11)
++M11X12->(M13X25 M12X24 M12X23 M12X22)
++M11X13->(M13X21 M12X20 M12X19 M12X18)
++M11X14->(M13X17 M12X16 M12X15 M12X14)
++M11X15->(M13X13 M12X12 M12X11 M12X25)
++M11X16->(M13X24 M12X23 M12X22 M12X21)
++M11X17->(M13X20 M12X19 M12X18 M12X17)
++M11X18->(M13X16 M12X15 M12X14 M12X13)
++M11X19->(M13X12 M12X11 M12X25 M12X24)
++M11X20->(M13X23 M12X22 M12X21 M12X20)
++M11X21->(M13X19 M12X18 M12X17 M12X16)
++M11X22->(M13X15 M12X14 M12X13 M12X12)
++M11X23->(M13X11 M12X25 M12X24 M12X23)
++M11X24->(M13X22 M12X21 M12X20 M12X19)
++M11X25->(M13X18 M12X17 M12X16 M12X15)
++M12X11->(M14X14 M13X13 M13X12 M13X11)
++M12X12->(M14X25 M13X24 M13X23 M13X22)
++M12X13->(M14X21 M13X20 M13X19 M13X18)
++M12X14->(M14X17 M13X16 M13X15 M13X14)
++M12X15->(M14X13 M13X12 M13X11 M13X25)
++M12X16->(M14X24 M13X23 M13X22 M13X21)
++M12X17->(M14X20 M13X19 M13X18 M13X17)
++M12X18->(M14X16 M13X15 M13X14 M13X13)
++M12X19->(M14X12 M13X11 M13X25 M13X24)
++M12X20->(M14X23 M13X22 M13X21 M13X20)
++M12X21->(M14X19 M13X18 M13X17 M13X16)
++M12X22->(M14X15 M13X14 M13X13 M13X12)
++M12X23->(M14X11 M13X25 M13X24 M13X23)
++M12X24->(M14X22 M13X21 M13X20 M13X19)
++M12X25->(M14X18 M13X17 M13X16 M13X15)
++M13X11->(M15X14 M14X13 M14X12 M14X11)
++M13X12->(M15X25 M14X24 M14X23 M14X22)
++M13X13->(M15X21 M14X20 M14X19 M14X18)
++M13X14->(M15X17 M14X16 M14X15 M14X14)
++M13X15->(M15X13 M14X12 M14X11 M14X25)
++M13X16->(M15X24 M14X23 M14X22 M14X21)
++M13X17->(M15X20 M14X19 M14X18 M14X17)
++M13X18->(M15X16 M14X15 M14X14 M14X13)
++M13X19->(M15X12 M14X11 M14X25 M14X24)
++M13X20->(M15X23 M14X22 M14X21 M14X20)
++M13X21->(M15X19 M14X18 M14X17 M14X16)
++M13X22->(M15X15 M14X14 M14X13 M14X12)
++M13X23->(M15X11 M14X25 M14X24 M14X23)
++M13X24->(M15X22 M14X21 M14X20 M14X19)
++M13X25->(M15X18 M14X17 M14X16 M14X15)
++M14X11->(M16X14 M15X13 M15X12 M15X11)
++M14X12->(M16X25 M15X24 M15X23 M15X22)
++M14X13->(M16X21 M15X20 M15X19 M15X18)
++M14X14->(M16X17 M15X16 M15X15 M15X14)
++M14X15->(M16X13 M15X12 M15X11 M15X25)
++M14X16->(M16X24 M15X23 M15X22 M15X21)
++M14X17->(M16X20 M15X19 M15X18 M15X17)
++M14X18->(M16X16 M15X15 M15X14 M15X13)
++M14X19->(M16X12 M15X11 M15X25 M15X24)
++M14X20->(M16X23 M15X22 M15X21 M15X20)
++M14X21->(M16X19 M15X18 M15X17 M15X16)
++M14X22->(M16X15 M15X14 M15X13 M15X12)
++M14X23->(M16X11 M15X25 M15X24 M15X23)
++M14X24->(M16X22 M15X21 M15X20 M15X19)
++M14X25->(M16X18 M15X17 M15X16 M15X15)
++M15X11->(M17X14 M16X13 M16X12 M16X11)
++M15X12->(M17X25 M16X24 M16X23 M16X22)
++M15X13->(M17X21 M16X20 M16X19 M16X18)
++M15X14->(M17X17 M16X16 M16X15 M16X14)
++M15X15->(M17X13 M16X12 M16X11 M16X25)
++M15X16->(M17X24 M16X23 M16X22 M16X21)
++M15X17->(M17X20 M16X19 M16X18 M16X17)
++M15X18->(M17X16 M16X15 M16X14 M16X13)
++M15X19->(M17X12 M16X11 M16X25 M16X24)
++M15X20->(M17X23 M16X22 M16X21 M16X20)
++M15X21->(M17X19 M16X18 M16X17 M16X16)
++M15X22->(M17X15 M16X14 M16X13 M16X12)
++M15X23->(M17X11 M16X25 M16X24 M16X23)
++M15X24->(M17X22 M16X21 M16X20 M16X19)
++M15X25->(M17X18 M16X17 M16X16 M16X15)
++M16X11->(M18X14 M17X13 M17X12 M17X11)
++M16X12->(M18X25 M17X24 M17X23 M17X22)
++M16X13->(M18X21 M17X20 M17X19 M17X18)
++M16X14->(M18X17 M17X16 M17X15 M17X14)
++M16X15->(M18X13 M17X12 M17X11 M17X25)
++M16X16->(M18X24 M17X23 M17X22 M17X21)
++M16X17->(M18X20 M17X19 M17X18 M17X17)
++M16X18->(M18X16 M17X15 M17X14 M17X13)
++M16X19->(M18X12 M17X11 M17X25 M17X24)
++M16X20->(M18X23 M17X22 M17X21 M17X20)
++M16X21->(M18X19 M17X18 M17X17 M17X16)
++M16X22->(M18X15 M17X14 M17X13 M17X12)
++M16X23->(M18X11 M17X25 M17X24 M17X23)
++M16X24->(M18X22 M17X21 M17X20 M17X19)
++M16X25->(M18X18 M17X17 M17X16 M17X15)
++M17X11->(M19X14 M18X13 M18X12 M18X11)
++M17X12->(M19X25 M18X24 M18X23 M18X22)
++M17X13->(M19X21 M18X20 M18X19 M18X18)
++M17X14->(M19X17 M18X16 M18X15 M18X14)
++M17X15->(M19X13 M18X12 M18X11 M18X25)
++M17X16->(M19X24 M18X23 M18X22 M18X21)
++M17X17->(M19X20 M18X19 M18X18 M18X17)
++M17X18->(M19X16 M18X15 M18X14 M18X13)
++M17X19->(M19X12 M18X11 M18X25 M18X24)
++M17X20->(M19X23 M18X22 M18X21 M18X20)
++M17X21->(M19X19 M18X18 M18X17 M18X16)
++M17X22->(M19X15 M18X14 M18X13 M18X12)
++M17X23->(M19X11 M18X25 M18X24 M18X23)
++M17X24->(M19X22 M18X21 M18X20 M18X19)
++M17X25->(M19X18 M18X17 M18X16 M18X15)
++M18X11->(M20X14 M19X13 M19X12 M19X11)
++M18X12->(M20X25 M19X24 M19X23 M19X22)
++M18X13->(M20X21 M19X20 M19X19 M19X18)
++M18X14->(M20X17 M19X16 M19X15 M19X14)
++M18X15->(M20X13 M19X12 M19X11 M19X25)
++M18X16->(M20X24 M19X23 M19X22 M19X21)
++M18X17->(M20X20 M19X19 M19X18 M19X17)
++M18X18->(M20X16 M19X15 M19X14 M19X13)
++M18X19->(M20X12 M19X11 M19X25 M19X24)
++M18X20->(M20X23 M19X22 M19X21 M19X20)
++M18X21->(M20X19 M19X18 M19X17 M19X16)
++M18X22->(M20X15 M19X14 M19X13 M19X12)
++M18X23->(M20X11 M19X25 M19X24 M19X23)
++M18X24->(M20X22 M19X21 M19X20 M19X19)
++M18X25->(M20X18 M19X17 M19X16 M19X15)
++M19X11->(M21X14 M20X13 M20X12 M20X11)
++M19X12->(M21X25 M20X24 M20X23 M20X22)
++M19X13->(M21X21 M20X20 M20X19 M20X18)
++M19X14->(M21X17 M20X16 M20X15 M20X14)
++M19X15->(M21X13 M20X12 M20X11 M20X25)
++M19X16->(M21X24 M20X23 M20X22 M20X21)
++M19X17->(M21X20 M20X19 M20X18 M20X17)
++M19X18->(M21X16 M20X15 M20X14 M20X13)
++M19X19->(M21X12 M20X11 M20X25 M20X24)
++M19X20->(M21X23 M20X22 M20X21 M20X20)
++M19X21->(M21X19 M20X18 M20X17 M20X16)
++M19X22->(M21X15 M20X14 M20X13 M20X12)
++M19X23->(M21X11 M20X25 M20X24 M20X23)
++M19X24->(M21X22 M20X21 M20X20 M20X19)
++M19X25->(M21X18 M20X17 M20X16 M20X15)
++M20X11->(M22X14 M21X13 M21X12 M21X11)
++M20X12->(M22X25 M21X24 M21X23 M21X22)
++M20X13->(M22X21 M21X20 M21X19 M21X18)
++M20X14->(M22X17 M21X16 M21X15 M21X14)
++M20X15->(M22X13 M21X12 M21X11 M21X25)
++M20X16->(M22X24 M21X23 M21X22 M21X21)
++M20X17->(M22X20 M21X19 M21X18 M21X17)
++M20X18->(M22X16 M21X15 M21X14 M21X13)
++M20X19->(M22X12 M21X11 M21X25 M21X24)
++M20X20->(M22X23 M21X22 M21X21 M21X20)
++M20X21->(M22X19 M21X18 M21X17 M21X16)
++M20X22->(M22X15 M21X14 M21X13 M21X12)
++M20X23->(M22X11 M21X25 M21X24 M21X23)
++M20X24->(M22X22 M21X21 M21X20 M21X19)
++M20X25->(M22X18 M21X17 M21X16 M21X15)
++M21X11->(M23X15 M22X14 M22X13 M22X12)
++M21X12->(M11X11 M23X25 M22X24 M22X23 M22X22)
++M21X13->(M23X21 M22X20 M22X19 M22X18)
++M21X14->(M23X17 M22X16 M22X15 M22X14)
++M21X15->(M23X13 M22X12 M22X11 M22X25)
++M21X16->(M23X24 M22X23 M22X22 M22X21)
++M21X17->(M23X20 M22X19 M22X18 M22X17)
++M21X18->(M23X16 M22X15 M22X14 M22X13)
++M21X19->(M23X12 M22X11 M22X25 M22X24)
++M21X20->(M23X23 M22X22 M22X21 M22X20)
++M21X21->(M23X19 M22X18 M22X17 M22X16)
++M21X22->(M23X15 M22X14 M22X13 M22X12)
++M21X23->(M23X11 M22X25 M22X24 M22X23)
++M21X24->(M23X22 M22X21 M22X20 M22X19)
++M21X25->(M23X18 M22X17 M22X16 M22X15)
++M22X11->(M24X16 M23X15 M23X14 M23X13)
++M22X12->(M12X12 M24X11 M23X25 M23X24 M23X23)
++M22X13->(M24X22 M23X21 M23X20 M23X19)
++M22X14->(M24X18 M23X17 M23X16 M23X15)
++M22X15->(M24X14 M23X13 M23X12 M23X11)
++M22X16->(M24X25 M23X24 M23X23 M23X22)
++M22X17->(M24X21 M23X20 M23X19 M23X18)
++M22X18->(M24X17 M23X16 M23X15 M23X14)
++M22X19->(M24X13 M23X12 M23X11 M23X25)
++M22X20->(M24X24 M23X23 M23X22 M23X21)
++M22X21->(M24X20 M23X19 M23X18 M23X17)
++M22X22->(M24X16 M23X15 M23X14 M23X13)
++M22X23->(M24X12 M23X11 M23X25 M23X24)
++M22X24->(M24X23 M23X22 M23X21 M23X20)
++M22X25->(M24X19 M23X18 M23X17 M23X16)
++M23X11->(M25X17 M24X16 M24X15 M24X14)
++M23X12->(M13X13 M25X12 M24X11 M24X25 M24X24)
++M23X13->(M25X23 M24X22 M24X21 M24X20)
++M23X14->(M25X19 M24X18 M24X17 M24X16)
++M23X15->(M25X15 M24X14 M24X13 M24X12)
++M23X16->(M25X11 M24X25 M24X24 M24X23)
++M23X17->(M25X22 M24X21 M24X20 M24X19)
++M23X18->(M25X18 M24X17 M24X16 M24X15)
++M23X19->(M25X14 M24X13 M24X12 M24X11)
++M23X20->(M25X25 M24X24 M24X23 M24X22)
++M23X21->(M25X21 M24X20 M24X19 M24X18)
++M23X22->(M25X17 M24X16 M24X15 M24X14)
++M23X23->(M25X13 M24X12 M24X11 M24X25)
++M23X24->(M25X24 M24X23 M24X22 M24X21)
++M23X25->(M25X20 M24X19 M24X18 M24X17)
++M24X11->(M26X18 M25X17 M25X16 M25X15)
++M24X12->(M14X14 M26X13 M25X12 M25X11 M25X25)
++M24X13->(M26X24 M25X23 M25X22 M25X21)
++M24X14->(M26X20 M25X19 M25X18 M25X17)
++M24X15->(M26X16 M25X15 M25X14 M25X13)
++M24X16->(M26X12 M25X11 M25X25 M25X24)
++M24X17->(M26X23 M25X22 M25X21 M25X20)
++M24X18->(M26X19 M25X18 M25X17 M25X16)
++M24X19->(M26X15 M25X14 M25X13 M25X12)
++M24X20->(M26X11 M25X25 M25X24 M25X23)
++M24X21->(M26X22 M25X21 M25X20 M25X19)
++M24X22->(M26X18 M25X17 M25X16 M25X15)
++M24X23->(M26X14 M25X13 M25X12 M25X11)
++M24X24->(M26X25 M25X24 M25X23 M25X22)
++M24X25->(M26X21 M25X20 M25X19 M25X18)
++M25X11->(M27X19 M26X18 M26X17 M26X16)
++M25X12->(M15X15 M27X14 M26X13 M26X12 M26X11)
++M25X13->(M27X25 M26X24 M26X23 M26X22)
++M25X14->(M27X21 M26X20 M26X19 M26X18)
++M25X15->(M27X17 M26X16 M26X15 M26X14)
++M25X16->(M27X13 M26X12 M26X11 M26X25)
++M25X17->(M27X24 M26X23 M26X22 M26X21)
++M25X18->(M27X20 M26X19 M26X18 M26X17)
++M25X19->(M27X16 M26X15 M26X14 M26X13)
++M25X20->(M27X12 M26X11 M26X25 M26X24)
++M25X21->(M27X23 M26X22 M26X21 M26X20)
++M25X22->(M27X19 M26X18 M26X17 M26X16)
++M25X23->(M27X15 M26X14 M26X13 M26X12)
++M25X24->(M27X11 M26X25 M26X24 M26X23)
++M25X25->(M27X22 M26X21 M26X20 M26X19)
++M26X11->(M28X20 M27X19 M27X18 M27X17)
++M26X12->(M16X16 M28X15 M27X14 M27X13 M27X12)
++M26X13->(M28X11 M27X25 M27X24 M27X23)
++M26X14->(M28X22 M27X21 M27X20 M27X19)
++M26X15->(M28X18 M27X17 M27X16 M27X15)
++M26X16->(M28X14 M27X13 M27X12 M27X11)
++M26X17->(M28X25 M27X24 M27X23 M27X22)
++M26X18->(M28X21 M27X20 M27X19 M27X18)
++M26X19->(M28X17 M27X16 M27X15 M27X14)
++M26X20->(M28X13 M27X12 M27X11 M27X25)
++M26X21->(M28X24 M27X23 M27X22 M27X21)
++M26X22->(M28X20 M27X19 M27X18 M27X17)
++M26X23->(M28X16 M27X15 M27X14 M27X13)
++M26X24->(M28X12 M27X11 M27X25 M27X24)
++M26X25->(M28X23 M27X22 M27X21 M27X20)
++M27X11->(M29X21 M28X20 M28X19 M28X18)
++M27X12->(M17X17 M29X16 M28X15 M28X14 M28X13)
++M27X13->(M29X12 M28X11 M28X25 M28X24)
++M27X14->(M29X23 M28X22 M28X21 M28X20)
++M27X15->(M29X19 M28X18 M28X17 M28X16)
++M27X16->(M29X15 M28X14 M28X13 M28X12)
++M27X17->(M29X11 M28X25 M28X24 M28X23)
++M27X18->(M29X22 M28X21 M28X20 M28X19)
++M27X19->(M29X18 M28X17 M28X16 M28X15)
++M27X20->(M29X14 M28X13 M28X12 M28X11)
++M27X21->(M29X25 M28X24 M28X23 M28X22)
++M27X22->(M29X21 M28X20 M28X19 M28X18)
++M27X23->(M29X17 M28X16 M28X15 M28X14)
++M27X24->(M29X13 M28X12 M28X11 M28X25)
++M27X25->(M29X24 M28X23 M28X22 M28X21)
++M28X11->(M30X22 M29X21 M29X20 M29X19)
++M28X12->(M18X18 M30X17 M29X16 M29X15 M29X14)
++M28X13->(M30X13 M29X12 M29X11 M29X25)
++M28X14->(M30X24 M29X23 M29X22 M29X21)
++M28X15->(M30X20 M29X19 M29X18 M29X17)
++M28X16->(M30X16 M29X15 M29X14 M29X13)
++M28X17->(M30X12 M29X11 M29X25 M29X24)
++M28X18->(M30X23 M29X22 M29X21 M29X20)
++M28X19->(M30X19 M29X18 M29X17 M29X16)
++M28X20->(M30X15 M29X14 M29X13 M29X12)
++M28X21->(M30X11 M29X25 M29X24 M29X23)
++M28X22->(M30X22 M29X21 M29X20 M29X19)
++M28X23->(M30X18 M29X17 M29X16 M29X15)
++M28X24->(M30X14 M29X13 M29X12 M29X11)
++M28X25->(M30X25 M29X24 M29X23 M29X22)
++M29X11->(M30X22 M30X21 M30X20)
++M29X12->(M30X17 M30X16 M30X15)
++M29X13->(M30X13 M30X12 M30X11)
++M29X14->(M30X24 M30X23 M30X22)
++M29X15->(M30X20 M30X19 M30X18)
++M29X16->(M30X16 M30X15 M30X14)
++M29X17->(M30X12 M30X11 M30X25)
++M29X18->(M30X23 M30X22 M30X21)
++M29X19->(M30X19 M30X18 M30X17)
++M29X20->(M30X15 M30X14 M30X13)
++M29X21->(M30X11 M30X25 M30X24)
++M29X22->(M30X22 M30X21 M30X20)
++M29X23->(M30X18 M30X17 M30X16)
++M29X24->(M30X14 M30X13 M30X12)
++M29X25->(M30X25 M30X24 M30X23)
++M30X11
++M30X12
++M30X13
++M30X14
++M30X15
++M30X16
++M30X17
++M30X18
++M30X19
++M30X20
++M30X21
++M30X22
++M30X23
++M30X24
++M30X25
++xfail_output(glibc.rtld.dynamic_sort=1): M30X19>M30X15>M30X16>M30X11>M30X12>M30X17>M30X13>M30X14>M29X20>M30X23>M30X24>M30X20>M30X18>M29X15>M29X12>M30X22>M30X21>M29X22>M30X25>M29X19>M29X23>M29X16>M29X24>M29X13>M29X17>M29X18>M28X19>M29X21>M29X25>M29X14>M28X20>M28X15>M28X16>M28X21>M27X18>M29X11>M28X17>M28X11>M28X22>M27X14>M28X18>M27X15>M28X13>M27X11>M28X23>M27X25>M28X14>M28X25>M27X23>M27X22>M28X24>M27X21>M27X13>M27X19>M27X17>M26X11>M26X23>M26X21>M26X22>M26X20>M26X16>M25X21>M17X22>M15X15>M20X14>M20X16>M18X18>M28X12>M27X24>M25X17>M27X20>M26X18>M26X17>M27X16>M26X19>M25X18>M26X24>M25X20>M24X17>M23X18>M25X13>M26X13>M17X23>M16X16>M26X12>M25X12>M26X15>M24X19>M25X23>M25X24>M25X25>M24X20>M25X19>M24X21>M23X17>M22X21>M24X14>M23X22>M24X24>M22X20>M24X13>M25X11>M24X12>M25X15>M23X15>M25X16>M24X22>M23X13>M24X18>M23X14>M22X22>M21X20>M24X25>M23X16>M22X25>M21X19>M22X14>M23X11>M22X15>M21X18>M22X19>M21X17>M20X17>M19X17>M21X24>M21X12>M20X22>M19X16>M18X25>M19X21>M19X20>M18X24>M20X12>M19X11>M23X20>M22X24>M22X16>M21X21>M25X14>M23X19>M23X24>M20X24>M19X12>M18X15>M17X14>M16X18>M14X25>M16X22>M16X20>M17X17>M22X12>M21X11>M20X15>M18X22>M19X24>M19X18>M18X21>M17X16>M17X18>M16X21>M15X20>M19X22>M18X20>M18X11>M17X19>M16X17>M15X21>M16X14>M16X13>M15X22>M14X20>M17X25>M16X19>M14X21>M13X24>M12X12>M16X24>M15X23>M14X16>M16X15>M15X25>M15X11>M15X12>M14X15>M13X14>M14X22>M13X20>M12X13>M11X11>M22X23>M21X15>M21X16>M20X21>M20X20>M18X17>M19X25>M18X23>M21X13>M15X17>M15X18>M18X19>M17X24>M16X12>M17X13>M20X25>M19X23>M15X19>M14X13>M13X18>M15X13>M17X12>M16X11>M18X13>M18X12>M14X11>M14X24>M13X19>M15X14>M17X20>M20X11>M20X13>M21X14>M15X24>M14X12>M13X22>M14X23>M13X23>M14X19>M17X15>M16X25>M17X11>M18X14>M19X19>M21X25>M13X12>M13X11>M14X18>M13X13>M12X11>M15X16>M14X14>M27X12>M17X21>M20X23>M22X13>M21X22>M24X16>M24X15>M26X25>M23X25>M26X14>M23X12>M22X18>M24X11>M16X23>M19X14>M19X13>M21X23>M22X17>M23X23>M23X21>M25X22>M18X16>M19X15>M20X18>M20X19>M22X11>M24X23>C156>C118>C143>C137>C147>C106>C168>C113>C163>C155>C105>C146>C187>A150>C139>C180>C164>C193>C157>A191>C158>B188>A159>C184>C121>C154>B171>A105>C131>C104>B104>C161>C111>B145>C160>B155>A163>C112>C142>B148>C133>B198>A198>A115>C114>B157>A156>C175>B144>A120>C173>B184>A174>C126>B107>A139>C194>B194>A194>C116>B116>C166>B160>B110>A110>C128>B128>A128>C179>B162>A154>C186>B187>A179>C124>B181>A101>C153>B158>A136>C135>C176>A192>B133>A133>C177>B177>A177>C185>C103>B141>A141>C183>A162>C192>C129>B179>C144>B124>B183>C127>B127>A127>B108>A112>B153>A153>C167>B167>A186>A122>C162>A144>B149>C174>B131>A185>C141>B106>A126>A167>C140>B122>A170>C198>B143>C117>C123>B123>A147>A106>C200>B169>C191>B175>A123>B118>A182>C132>B151>A145>A104>A109>C159>C150>B119>A119>A178>B164>B114>A164>C181>A102>C122>B134>A157>A116>C195>B191>B111>C172>B172>A118>B129>A129>C149>A107>C170>B197>A197>A173>B168>A132>C107>B165>A160>A131>C188>A168>B109>C178>A189>A148>C119>C190>C120>B166>B176>C108>B135>B139>A103>B178>A169>B132>C125>C138>B163>A111>B170>C110>A165>C151>C169>C199>A138>C182>A135>B101>B142>C101>C148>B193>B152>A158>A199>C136>B137>A161>B120>A108>A149>A125>B113>A184>C171>A134>A175>A124>B150>B161>B102>A146>A187>C130>B192>B200>A200>A142>A183>C102>B105>B156>A176>C165>B147>A137>A196>B190>A190>B125>C134>C189>B126>B186>A166>B136>B195>A195>B154>B138>B112>B173>A117>B159>B182>A181>A140>C145>B117>A152>A193>C197>B130>A172>A113>A151>B115>A143>B140>B185>B103>A121>A180>A130>A171>B199>C196>B146>B180>C115>B174>B121>A188>B196>B189>C152>C109>A155>A114>M14X17>M13X15>M13X16>M13X17>M12X17>M12X21>M12X25>M12X14>M13X25>M12X15>M13X21>M12X16>M12X18>M12X19>M12X20>M12X22>M12X23>M12X24>M11X25>M11X24>M11X23>M11X22>M11X21>M11X20>M11X19>M11X18>M11X17>M11X16>M11X15>M11X14>M11X13>M11X12>{}<M11X12<M11X13<M11X14<M11X15<M11X16<M11X17<M11X18<M11X19<M11X20<M11X21<M11X22<M11X23<M11X24<M11X25<M12X24<M12X23<M12X22<M12X20<M12X19<M12X18<M12X16<M13X21<M12X15<M13X25<M12X14<M12X25<M12X21<M12X17<M13X17<M13X16<M13X15<M14X17<A114<A155<C109<C152<B189<B196<A188<B121<B174<C115<B180<B146<C196<B199<A171<A130<A180<A121<B103<B185<B140<A143<B115<A151<A113<A172<B130<C197<A193<A152<B117<C145<A140<A181<B182<B159<A117<B173<B112<B138<B154<A195<B195<B136<A166<B186<B126<C189<C134<B125<A190<B190<A196<A137<B147<C165<A176<B156<B105<C102<A183<A142<A200<B200<B192<C130<A187<A146<B102<B161<B150<A124<A175<A134<C171<A184<B113<A125<A149<A108<B120<A161<B137<C136<A199<A158<B152<B193<C148<C101<B142<B101<A135<C182<A138<C199<C169<C151<A165<C110<B170<A111<B163<C138<C125<B132<A169<B178<A103<B139<B135<C108<B176<B166<C120<C190<C119<A148<A189<C178<B109<A168<C188<A131<A160<B165<C107<A132<B168<A173<A197<B197<C170<A107<C149<A129<B129<A118<B172<C172<B111<B191<C195<A116<A157<B134<C122<A102<C181<A164<B114<B164<A178<A119<B119<C150<C159<A109<A104<A145<B151<C132<A182<B118<A123<B175<C191<B169<C200<A106<A147<B123<C123<C117<B143<C198<A170<B122<C140<A167<A126<B106<C141<A185<B131<C174<B149<A144<C162<A122<A186<B167<C167<A153<B153<A112<B108<A127<B127<C127<B183<B124<C144<B179<C129<C192<A162<C183<A141<B141<C103<C185<A177<B177<C177<A133<B133<A192<C176<C135<A136<B158<C153<A101<B181<C124<A179<B187<C186<A154<B162<C179<A128<B128<C128<A110<B110<B160<C166<B116<C116<A194<B194<C194<A139<B107<C126<A174<B184<C173<A120<B144<C175<A156<B157<C114<A115<A198<B198<C133<B148<C142<C112<A163<B155<C160<B145<C111<C161<B104<C104<C131<A105<B171<C154<C121<C184<A159<B188<C158<A191<C157<C193<C164<C180<C139<A150<C187<C146<C105<C155<C163<C113<C168<C106<C147<C137<C143<C118<C156<M24X23<M22X11<M20X19<M20X18<M19X15<M18X16<M25X22<M23X21<M23X23<M22X17<M21X23<M19X13<M19X14<M16X23<M24X11<M22X18<M23X12<M26X14<M23X25<M26X25<M24X15<M24X16<M21X22<M22X13<M20X23<M17X21<M27X12<M14X14<M15X16<M12X11<M13X13<M14X18<M13X11<M13X12<M21X25<M19X19<M18X14<M17X11<M16X25<M17X15<M14X19<M13X23<M14X23<M13X22<M14X12<M15X24<M21X14<M20X13<M20X11<M17X20<M15X14<M13X19<M14X24<M14X11<M18X12<M18X13<M16X11<M17X12<M15X13<M13X18<M14X13<M15X19<M19X23<M20X25<M17X13<M16X12<M17X24<M18X19<M15X18<M15X17<M21X13<M18X23<M19X25<M18X17<M20X20<M20X21<M21X16<M21X15<M22X23<M11X11<M12X13<M13X20<M14X22<M13X14<M14X15<M15X12<M15X11<M15X25<M16X15<M14X16<M15X23<M16X24<M12X12<M13X24<M14X21<M16X19<M17X25<M14X20<M15X22<M16X13<M16X14<M15X21<M16X17<M17X19<M18X11<M18X20<M19X22<M15X20<M16X21<M17X18<M17X16<M18X21<M19X18<M19X24<M18X22<M20X15<M21X11<M22X12<M17X17<M16X20<M16X22<M14X25<M16X18<M17X14<M18X15<M19X12<M20X24<M23X24<M23X19<M25X14<M21X21<M22X16<M22X24<M23X20<M19X11<M20X12<M18X24<M19X20<M19X21<M18X25<M19X16<M20X22<M21X12<M21X24<M19X17<M20X17<M21X17<M22X19<M21X18<M22X15<M23X11<M22X14<M21X19<M22X25<M23X16<M24X25<M21X20<M22X22<M23X14<M24X18<M23X13<M24X22<M25X16<M23X15<M25X15<M24X12<M25X11<M24X13<M22X20<M24X24<M23X22<M24X14<M22X21<M23X17<M24X21<M25X19<M24X20<M25X25<M25X24<M25X23<M24X19<M26X15<M25X12<M26X12<M16X16<M17X23<M26X13<M25X13<M23X18<M24X17<M25X20<M26X24<M25X18<M26X19<M27X16<M26X17<M26X18<M27X20<M25X17<M27X24<M28X12<M18X18<M20X16<M20X14<M15X15<M17X22<M25X21<M26X16<M26X20<M26X22<M26X21<M26X23<M26X11<M27X17<M27X19<M27X13<M27X21<M28X24<M27X22<M27X23<M28X25<M28X14<M27X25<M28X23<M27X11<M28X13<M27X15<M28X18<M27X14<M28X22<M28X11<M28X17<M29X11<M27X18<M28X21<M28X16<M28X15<M28X20<M29X14<M29X25<M29X21<M28X19<M29X18<M29X17<M29X13<M29X24<M29X16<M29X23<M29X19<M30X25<M29X22<M30X21<M30X22<M29X12<M29X15<M30X18<M30X20<M30X24<M30X23<M29X20<M30X14<M30X13<M30X17<M30X12<M30X11<M30X16<M30X15<M30X19
++output(glibc.rtld.dynamic_sort=2): M30X19>M30X15>M30X16>M30X11>M30X12>M30X17>M30X13>M30X14>M29X20>M30X23>M30X24>M30X20>M30X18>M29X15>M29X12>M30X22>M30X21>M29X22>M30X25>M29X19>M29X23>M29X16>M29X24>M29X13>M29X17>M29X18>M28X19>M29X21>M29X25>M29X14>M28X20>M28X15>M28X16>M28X21>M27X18>M29X11>M28X17>M28X11>M28X22>M28X24>M28X23>M27X21>M28X13>M27X20>M27X19>M26X14>M27X25>M28X18>M27X11>M28X25>M27X24>M26X24>M27X15>M27X14>M27X13>M26X23>M27X17>M26X22>M25X13>M28X14>M27X16>M26X19>M26X18>M27X23>M27X22>M26X17>M25X18>M26X21>M25X17>M26X20>M26X15>M26X13>M25X19>M24X14>M25X23>M26X11>M26X25>M25X16>M25X15>M24X22>M25X21>M25X20>M24X21>M25X25>M25X24>M24X20>M23X13>M22X15>M25X14>M24X19>M23X17>M24X25>M23X24>M24X13>M23X15>M24X18>M23X14>M22X11>M24X15>M23X22>M24X11>M23X19>M22X21>M24X24>M23X21>M22X20>M23X25>M22X19>M21X24>M20X23>M22X22>M25X11>M23X16>M22X18>M23X20>M22X17>M21X21>M21X20>M20X24>M22X14>M22X13>M21X11>M21X17>M22X23>M21X16>M20X25>M19X23>M18X16>M21X22>M20X20>M20X19>M21X13>M20X18>M19X13>M21X18>M20X21>M19X24>M18X12>M20X14>M20X13>M22X25>M20X12>M20X15>M19X14>M18X22>M19X18>M20X17>M19X17>M19X16>M18X21>M17X20>M19X19>M18X13>M17X11>M18X17>M19X25>M18X15>M17X25>M18X19>M17X24>M16X19>M15X17>M17X21>M16X24>M18X23>M17X16>M16X25>M19X15>M18X25>M17X23>M16X23>M15X23>M18X14>M17X14>M16X14>M17X18>M16X13>M17X22>M16X12>M15X22>M14X16>M17X12>M16X22>M15X12>M16X11>M15X11>M16X15>M15X25>M14X15>M13X14>M15X18>M16X21>M15X16>M14X21>M15X14>M16X20>M15X13>M14X22>M15X20>M14X20>M13X20>M14X11>M15X19>M14X24>M13X19>M14X13>M13X18>M12X13>M15X24>M14X23>M13X12>M14X12>M13X11>M12X11>M11X11>M21X12>M20X11>M19X11>M18X11>M17X15>M16X18>M14X25>M14X19>M13X24>M13X23>M13X22>M12X12>M22X12>M21X15>M19X22>M18X20>M16X17>M14X14>M24X12>M23X23>M22X16>M21X14>M20X22>M18X24>M16X16>M26X12>M24X16>M23X11>M21X23>M19X20>M17X17>M27X12>M26X16>M25X22>M24X17>M23X18>M21X25>M19X12>M17X19>M15X21>M14X18>M13X13>M23X12>M21X19>M19X21>M17X13>M15X15>M25X12>M24X23>M22X24>M20X16>M18X18>M28X12>A150>C158>B112>A112>C167>B146>A146>C180>B180>A180>C143>B143>A115>C126>B126>A126>C190>B190>A190>C138>B138>A138>C174>B174>A102>C122>B122>A122>C162>B162>A162>C142>B142>A142>C102>B102>A174>C176>B176>A176>C115>B115>A143>C172>B172>A172>C187>B187>A187>C130>B130>A130>C118>B118>A118>C184>B184>A184>C171>B171>A171>C168>B182>A182>C182>B168>A168>C109>B109>A109>C159>B159>A159>C134>B134>A134>C146>B167>A167>C140>B140>A140>C163>B163>A163>C112>B158>A158>C164>B164>A164>C131>B131>A131>C188>B188>A188>C199>B199>A199>C114>B114>A114>C106>B106>A106>C200>B200>A200>C183>B183>A183>C152>B152>A152>C147>B147>A147>C150>B150>A198>C144>B144>A144>C191>B191>A191>C108>B108>A108>C139>B139>A139>C194>B194>A194>C166>B166>A166>C120>B120>A120>C123>B123>A123>C132>B132>A132>C107>B107>A107>C170>B170>A170>C198>B198>A156>C125>B125>A125>C121>B121>A121>C193>B193>A193>C197>B197>A197>C175>B175>A175>C196>B196>A196>C105>B105>A105>C181>B181>A181>C113>B113>A113>C137>B137>A137>C155>B155>A155>C156>B156>A110>C128>B128>A128>C179>B179>A179>C124>B124>A124>C151>B151>A151>C178>B178>A178>C104>B104>A104>C111>B111>A111>C148>B148>A148>C169>B169>A169>C129>B129>A129>C149>B149>A149>C189>B189>A189>C119>B119>A119>C154>B154>A154>C136>B136>A136>C135>B135>A135>C116>B116>A116>C145>B145>A145>C161>B161>A161>C173>B173>A173>C157>B157>A157>C195>B195>A195>C186>B186>A186>C160>B160>A160>C153>B153>A153>C117>B117>A117>C165>B165>A165>C101>B101>A101>C103>B103>A103>C192>B192>A192>C177>B177>A177>C185>B185>A185>C141>B141>A141>C133>B133>A133>C127>B127>A127>C110>B110>M14X17>M13X15>M13X16>M13X17>M12X17>M12X21>M12X25>M12X14>M13X25>M12X15>M13X21>M12X16>M12X18>M12X19>M12X20>M12X22>M12X23>M12X24>M11X25>M11X24>M11X23>M11X22>M11X21>M11X20>M11X19>M11X18>M11X17>M11X16>M11X15>M11X14>M11X13>M11X12>{}<M11X12<M11X13<M11X14<M11X15<M11X16<M11X17<M11X18<M11X19<M11X20<M11X21<M11X22<M11X23<M11X24<M11X25<M12X24<M12X23<M12X22<M12X20<M12X19<M12X18<M12X16<M13X21<M12X15<M13X25<M12X14<M12X25<M12X21<M12X17<M13X17<M13X16<M13X15<M14X17<B110<C110<A127<B127<C127<A133<B133<C133<A141<B141<C141<A185<B185<C185<A177<B177<C177<A192<B192<C192<A103<B103<C103<A101<B101<C101<A165<B165<C165<A117<B117<C117<A153<B153<C153<A160<B160<C160<A186<B186<C186<A195<B195<C195<A157<B157<C157<A173<B173<C173<A161<B161<C161<A145<B145<C145<A116<B116<C116<A135<B135<C135<A136<B136<C136<A154<B154<C154<A119<B119<C119<A189<B189<C189<A149<B149<C149<A129<B129<C129<A169<B169<C169<A148<B148<C148<A111<B111<C111<A104<B104<C104<A178<B178<C178<A151<B151<C151<A124<B124<C124<A179<B179<C179<A128<B128<C128<A110<B156<C156<A155<B155<C155<A137<B137<C137<A113<B113<C113<A181<B181<C181<A105<B105<C105<A196<B196<C196<A175<B175<C175<A197<B197<C197<A193<B193<C193<A121<B121<C121<A125<B125<C125<A156<B198<C198<A170<B170<C170<A107<B107<C107<A132<B132<C132<A123<B123<C123<A120<B120<C120<A166<B166<C166<A194<B194<C194<A139<B139<C139<A108<B108<C108<A191<B191<C191<A144<B144<C144<A198<B150<C150<A147<B147<C147<A152<B152<C152<A183<B183<C183<A200<B200<C200<A106<B106<C106<A114<B114<C114<A199<B199<C199<A188<B188<C188<A131<B131<C131<A164<B164<C164<A158<B158<C112<A163<B163<C163<A140<B140<C140<A167<B167<C146<A134<B134<C134<A159<B159<C159<A109<B109<C109<A168<B168<C182<A182<B182<C168<A171<B171<C171<A184<B184<C184<A118<B118<C118<A130<B130<C130<A187<B187<C187<A172<B172<C172<A143<B115<C115<A176<B176<C176<A174<B102<C102<A142<B142<C142<A162<B162<C162<A122<B122<C122<A102<B174<C174<A138<B138<C138<A190<B190<C190<A126<B126<C126<A115<B143<C143<A180<B180<C180<A146<B146<C167<A112<B112<C158<A150<M28X12<M18X18<M20X16<M22X24<M24X23<M25X12<M15X15<M17X13<M19X21<M21X19<M23X12<M13X13<M14X18<M15X21<M17X19<M19X12<M21X25<M23X18<M24X17<M25X22<M26X16<M27X12<M17X17<M19X20<M21X23<M23X11<M24X16<M26X12<M16X16<M18X24<M20X22<M21X14<M22X16<M23X23<M24X12<M14X14<M16X17<M18X20<M19X22<M21X15<M22X12<M12X12<M13X22<M13X23<M13X24<M14X19<M14X25<M16X18<M17X15<M18X11<M19X11<M20X11<M21X12<M11X11<M12X11<M13X11<M14X12<M13X12<M14X23<M15X24<M12X13<M13X18<M14X13<M13X19<M14X24<M15X19<M14X11<M13X20<M14X20<M15X20<M14X22<M15X13<M16X20<M15X14<M14X21<M15X16<M16X21<M15X18<M13X14<M14X15<M15X25<M16X15<M15X11<M16X11<M15X12<M16X22<M17X12<M14X16<M15X22<M16X12<M17X22<M16X13<M17X18<M16X14<M17X14<M18X14<M15X23<M16X23<M17X23<M18X25<M19X15<M16X25<M17X16<M18X23<M16X24<M17X21<M15X17<M16X19<M17X24<M18X19<M17X25<M18X15<M19X25<M18X17<M17X11<M18X13<M19X19<M17X20<M18X21<M19X16<M19X17<M20X17<M19X18<M18X22<M19X14<M20X15<M20X12<M22X25<M20X13<M20X14<M18X12<M19X24<M20X21<M21X18<M19X13<M20X18<M21X13<M20X19<M20X20<M21X22<M18X16<M19X23<M20X25<M21X16<M22X23<M21X17<M21X11<M22X13<M22X14<M20X24<M21X20<M21X21<M22X17<M23X20<M22X18<M23X16<M25X11<M22X22<M20X23<M21X24<M22X19<M23X25<M22X20<M23X21<M24X24<M22X21<M23X19<M24X11<M23X22<M24X15<M22X11<M23X14<M24X18<M23X15<M24X13<M23X24<M24X25<M23X17<M24X19<M25X14<M22X15<M23X13<M24X20<M25X24<M25X25<M24X21<M25X20<M25X21<M24X22<M25X15<M25X16<M26X25<M26X11<M25X23<M24X14<M25X19<M26X13<M26X15<M26X20<M25X17<M26X21<M25X18<M26X17<M27X22<M27X23<M26X18<M26X19<M27X16<M28X14<M25X13<M26X22<M27X17<M26X23<M27X13<M27X14<M27X15<M26X24<M27X24<M28X25<M27X11<M28X18<M27X25<M26X14<M27X19<M27X20<M28X13<M27X21<M28X23<M28X24<M28X22<M28X11<M28X17<M29X11<M27X18<M28X21<M28X16<M28X15<M28X20<M29X14<M29X25<M29X21<M28X19<M29X18<M29X17<M29X13<M29X24<M29X16<M29X23<M29X19<M30X25<M29X22<M30X21<M30X22<M29X12<M29X15<M30X18<M30X20<M30X24<M30X23<M29X20<M30X14<M30X13<M30X17<M30X12<M30X11<M30X16<M30X15<M30X19
+diff --git a/scripts/dso-ordering-test.py b/scripts/dso-ordering-test.py
+new file mode 100644
+index 0000000000000000..944ee740527d60fd
+--- /dev/null
++++ b/scripts/dso-ordering-test.py
+@@ -0,0 +1,1144 @@
++#!/usr/bin/python3
++# Generate testcase files and Makefile fragments for DSO sorting test
++# Copyright (C) 2021 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <http://www.gnu.org/licenses/>.
++
++"""Generate testcase files and Makefile fragments for DSO sorting test
++
++This script takes a small description string language, and generates
++testcases for displaying the ELF dynamic linker's dependency sorting
++behavior, allowing verification.
++
++Testcase descriptions are semicolon-separated description strings, and
++this tool generates a testcase from the description, including main program,
++associated modules, and Makefile fragments for including into elf/Makefile.
++
++This allows automation of what otherwise would be very laborous manual
++construction of complex dependency cases, however it must be noted that this
++is only a tool to speed up testcase construction, and thus the generation
++features are largely mechanical in nature; inconsistencies or errors may occur
++if the input description was itself erroneous or have unforeseen interactions.
++
++The format of the input test description files are:
++
++  # Each test description has a name, lines of description,
++  # and an expected output specification.  Comments use '#'.
++  testname1: <test-description-line>
++  output: <expected-output-string>
++
++  # Tests can be marked to be XFAIL by using 'xfail_output' instead
++  testname2: <test-description-line>
++  xfail_output: <expected-output-string>
++
++  # A default set of GLIBC_TUNABLES tunables can be specified, for which
++  # all following tests will run multiple times, once for each of the
++  # GLIBC_TUNABLES=... strings set by the 'tunable_option' command.
++  tunable_option: <glibc-tunable-string1>
++  tunable_option: <glibc-tunable-string2>
++
++  # Test descriptions can use multiple lines, which will all be merged
++  # together, so order is not important.
++  testname3: <test-description-line>
++  <test-description-line>
++  <test-description-line>
++  ...
++  output: <expected-output-string>
++
++  # 'testname3' will be run and compared two times, for both
++  # GLIBC_TUNABLES=<glibc-tunable-string1> and
++  # GLIBC_TUNABLES=<glibc-tunable-string2>.  This can be cleared and reset by the
++  # 'clear_tunables' command:
++  clear_tunables
++
++  # Multiple expected outputs can also be specified, with an associated
++  # tunable option in (), which multiple tests will be run with each
++  # GLIBC_TUNABLES=... option tried.
++  testname4:
++  <test-description-line>
++  ...
++  output(<glibc-tunable-string1>): <expected-output-string-1>
++  output(<glibc-tunable-string2>): <expected-output-string-2>
++  # Individual tunable output cases can be XFAILed, though note that
++  # this will have the effect of XFAILing the entire 'testname4' test
++  # in the final top-level tests.sum summary.
++  xfail_output(<glibc-tunable-string3>): <expected-output-string-3>
++
++  # When multiple outputs (with specific tunable strings) are specified,
++  # these take priority over any active 'tunable_option' settings.
++
++  # When a test is meant to be placed under 'xtests' (not run under
++  # "make check", but only when "make xtests" is used), the testcase name can be
++  # declared using 'xtest(<test-name>)':
++  ...
++  xtest(test-too-big1): <test-description>
++  output: <expected-output-string>
++  ...
++
++  # Do note that under current elf/Makefile organization, for such a xtest case,
++  # while the test execution is only run under 'make xtests', the associated
++  # DSOs are always built even under 'make check'.
++
++On the description language used, an example description line string:
++
++  a->b!->[cdef];c=>g=>h;{+c;%c;-c}->a
++
++Each identifier represents a shared object module, currently sequences of
++letters/digits are allowed, case-sensitive.
++
++All such shared objects have a constructor/destructor generated for them
++that emits its name followed by a '>' for constructors, and '<' followed by
++its name for destructors, e.g. if the name is 'obj1', then "obj1>" and "<obj1"
++is printed by its constructor/destructor respectively.
++
++The -> operator specifies a link time dependency, these can be chained for
++convenience (e.g. a->b->c->d).
++
++The => operator creates a call-reference, e.g. for a=>b, an fn_a() function
++is created inside module 'a', which calls fn_b() in module 'b'.
++These module functions emit 'name()' output in nested form,
++e.g. a=>b emits 'a(b())'
++
++For single character object names, square brackets [] in the description
++allows specifying multiple objects; e.g. a->[bcd]->e is equivalent to
++ a->b->e;a->c->e;a->d->e
++
++The () parenthesis construct with space separated names is also allowed for
++specifying objects.  For names with integer suffixes a range can also be used,
++e.g. (foo1 bar2-5), specifies DSOs foo1, bar2, bar2, bar3, bar4, bar5.
++
++A {} construct specifies the main test program, and its link dependencies
++are also specified using ->.  Inside {}, a few ;-separated constructs are
++allowed:
++         +a   Loads module a using dlopen(RTLD_LAZY|RTLD_GLOBAL)
++         ^a   Loads module a using dlopen(RTLD_LAZY)
++         %a   Use dlsym() to load and call fn_a()
++         @a   Calls fn_a() directly.
++         -a   Unloads module a using dlclose()
++
++The generated main program outputs '{' '}' with all output from above
++constructs in between.  The other output before/after {} are the ordered
++constructor/destructor output.
++
++If no {} construct is present, a default empty main program is linked
++against all objects which have no dependency linked to it. e.g. for
++'[ab]->c;d->e', the default main program is equivalent to '{}->[abd]'
++
++Sometimes for very complex or large testcases, besides specifying a
++few explicit dependencies from main{}, the above default dependency
++behavior is still useful to automatically have, but is turned off
++upon specifying a single explicit {}->dso_name.
++In this case, add {}->* to explicitly add this generation behavior:
++
++   # Main program links to 'foo', and all other objects which have no
++   # dependency linked to it.
++   {}->foo,{}->*
++
++Note that '*' works not only on main{}, but can be used as the
++dependency target of any object.  Note that it only works as a target,
++not a dependency source.
++
++The '!' operator after object names turns on permutation of its
++dependencies, e.g. while a->[bcd] only generates one set of objects,
++with 'a.so' built with a link line of "b.so c.so d.so", for a!->[bcd]
++permutations of a's dependencies creates multiple testcases with
++different link line orders: "b.so c.so d.so", "c.so b.so d.so",
++"b.so d.so c.so", etc.  Note that for a <test-name> specified on
++the script command-line, multiple <test-name_1>, <test-name_2>, etc.
++tests will be generated (e.g. for a!->[bc]!->[de], eight tests with
++different link orders for a, b, and c will be generated)
++
++It is possible to specify the ELF soname field for an object or the
++main program:
++   # DSO 'a' will be linked with the appropriate -Wl,-soname=x setting
++   a->b->c;soname(a)=x
++   # The the main program can also have a soname specified
++   soname({})=y
++
++This can be used to test how ld.so behaves when objects and/or the
++main program have such a field set.
++
++
++Strings Output by Generated Testcase Programs
++
++The text output produced by a generated testcase consists of three main
++parts:
++  1. The constructors' output
++  2. Output from the main program
++  3. Destructors' output
++
++To see by example, a simple test description "a->b->c" generates a testcase
++that when run, outputs: "c>b>a>{}<a<b<c"
++
++Each generated DSO constructor prints its name followed by a '>' character,
++and the "c>b>a" part above is the full constructor output by all DSOs, the
++order indicating that DSO 'c', which does not depend on any other DSO, has
++its constructor run first, followed by 'b' and then 'a'.
++
++Destructor output for each DSO is a '<' character followed by its name,
++reflecting its reverse nature of constructors.  In the above example, the
++destructor output part is "<a<b<c".
++
++The middle "{}" part is the main program.  In this simple example, nothing
++was specified for the main program, so by default it is implicitly linked
++to the DSO 'a' (with no other DSOs depending on it) and only prints the
++brackets {} with no actions inside.
++
++To see an example with actions inside the main program, lets see an example
++description: c->g=>h;{+c;%c;-c}->a->h
++
++This produces a testcase, that when executed outputs:
++             h>a>{+c[g>c>];%c();-c[<c<g];}<a<h
++
++The constructor and destructor parts display the a->h dependency as expected.
++Inside the main program, the "+c" action triggers a dlopen() of DSO 'c',
++causing another chain of constructors "g>c>" to be triggered.  Here it is
++displayed inside [] brackets for each dlopen call.  The same is done for "-c",
++a dlclose() of 'c'.
++
++The "%c" output is due to calling to fn_c() inside DSO 'c', this comprises
++of two parts: the '%' character is printed by the caller, here it is the main
++program.  The 'c' character is printed from inside fn_c().  The '%' character
++indicates that this is called by a dlsym() of "fn_c".  A '@' character would
++mean a direct call (with a symbol reference).  These can all be controlled
++by the main test program constructs documented earlier.
++
++The output strings described here is the exact same form placed in
++test description files' "output: <expected output>" line.
++"""
++
++import sys
++import re
++import os
++import subprocess
++import argparse
++from collections import OrderedDict
++import itertools
++
++# BUILD_GCC is only used under the --build option,
++# which builds the generated testcase, including DSOs using BUILD_GCC.
++# Mainly for testing purposes, especially debugging of this script,
++# and can be changed here to another toolchain path if needed.
++build_gcc = "gcc"
++
++def get_parser():
++    parser = argparse.ArgumentParser("")
++    parser.add_argument("description",
++                         help="Description string of DSO dependency test to be "
++                         "generated (see script source for documentation of "
++                         "description language), either specified here as "
++                         "command line argument, or by input file using "
++                         "-f/--description-file option",
++                         nargs="?", default="")
++    parser.add_argument("test_name",
++                        help="Identifier for testcase being generated",
++                        nargs="?", default="")
++    parser.add_argument("--objpfx",
++                        help="Path to place generated files, defaults to "
++                        "current directory if none specified",
++                        nargs="?", default="./")
++    parser.add_argument("-m", "--output-makefile",
++                        help="File to write Makefile fragment to, defaults to "
++                        "stdout when option not present",
++                        nargs="?", default="")
++    parser.add_argument("-f", "--description-file",
++                        help="Input file containing testcase descriptions",
++                        nargs="?", default="")
++    parser.add_argument("--build", help="After C testcase generated, build it "
++                        "using gcc (for manual testing purposes)",
++                        action="store_true")
++    parser.add_argument("--debug-output",
++                        help="Prints some internal data "
++                        "structures; used for debugging of this script",
++                        action="store_true")
++    return parser
++
++# Main script starts here.
++cmdlineargs = get_parser().parse_args()
++test_name = cmdlineargs.test_name
++description = cmdlineargs.description
++objpfx = cmdlineargs.objpfx
++description_file = cmdlineargs.description_file
++output_makefile = cmdlineargs.output_makefile
++makefile = ""
++default_tunable_options = []
++
++current_input_lineno = 0
++def error(msg):
++    global current_input_lineno
++    print("Error: %s%s" % ((("Line %d, " % current_input_lineno)
++                            if current_input_lineno != 0 else ""),
++                           msg))
++    exit(1)
++
++if(test_name or description) and description_file:
++    error("both command-line testcase and input file specified")
++if test_name and not description:
++    error("command-line testcase name without description string")
++
++# Main class type describing a testcase.
++class TestDescr:
++    def __init__(self):
++        self.objs = []              # list of all DSO objects
++        self.deps = OrderedDict()   # map of DSO object -> list of dependencies
++
++        # map of DSO object -> list of call refs
++        self.callrefs = OrderedDict()
++
++        # map of DSO object -> list of permutations of dependencies
++        self.dep_permutations = OrderedDict()
++
++        # map of DSO object -> SONAME of object (if one is specified)
++        self.soname_map = OrderedDict()
++
++        # list of main program operations
++        self.main_program = []
++        # set if default dependencies added to main
++        self.main_program_default_deps = True
++
++        self.test_name = ""                   # name of testcase
++        self.expected_outputs = OrderedDict() # expected outputs of testcase
++        self.xfail = False                    # set if this is a XFAIL testcase
++        self.xtest = False                    # set if this is put under 'xtests'
++
++    # Add 'object -> [object, object, ...]' relations to CURR_MAP
++    def __add_deps_internal(self, src_objs, dst_objs, curr_map):
++        for src in src_objs:
++            for dst in dst_objs:
++                if not src in curr_map:
++                    curr_map[src] = []
++                if not dst in curr_map[src]:
++                    curr_map[src].append(dst)
++    def add_deps(self, src_objs, dst_objs):
++        self.__add_deps_internal(src_objs, dst_objs, self.deps)
++    def add_callrefs(self, src_objs, dst_objs):
++        self.__add_deps_internal(src_objs, dst_objs, self.callrefs)
++
++# Process commands inside the {} construct.
++# Note that throughout this script, the main program object is represented
++# by the '#' string.
++def process_main_program(test_descr, mainprog_str):
++    if mainprog_str:
++        test_descr.main_program = mainprog_str.split(';')
++    for s in test_descr.main_program:
++        m = re.match(r"^([+\-%^@])([0-9a-zA-Z]+)$", s)
++        if not m:
++            error("'%s' is not recognized main program operation" % (s))
++        opr = m.group(1)
++        obj = m.group(2)
++        if not obj in test_descr.objs:
++            test_descr.objs.append(obj)
++        if opr == '%' or opr == '@':
++            test_descr.add_callrefs(['#'], [obj])
++    # We have a main program specified, turn this off
++    test_descr.main_program_default_deps = False
++
++# For(a1 a2 b1-12) object set descriptions, expand into an object list
++def expand_object_set_string(descr_str):
++    obj_list = []
++    descr_list = descr_str.split()
++    for descr in descr_list:
++        m = re.match(r"^([a-zA-Z][0-9a-zA-Z]*)(-[0-9]+)?$", descr)
++        if not m:
++            error("'%s' is not a valid object set description" % (descr))
++        obj = m.group(1)
++        idx_end = m.group(2)
++        if not idx_end:
++            if not obj in obj_list:
++                obj_list.append(obj)
++        else:
++            idx_end = int(idx_end[1:])
++            m = re.match(r"^([0-9a-zA-Z][a-zA-Z]*)([0-9]+)$", obj)
++            if not m:
++                error("object description '%s' is malformed" % (obj))
++            obj_name = m.group(1)
++            idx_start = int(m.group (2))
++            if idx_start > idx_end:
++                error("index range %s-%s invalid" % (idx_start, idx_end))
++            for i in range(idx_start, idx_end + 1):
++                o = obj_name + str(i)
++                if not o in obj_list:
++                    obj_list.append(o)
++    return obj_list
++
++# Lexer for tokens
++tokenspec = [ ("SONAME",   r"soname\(([0-9a-zA-Z{}]+)\)=([0-9a-zA-Z]+)"),
++              ("OBJ",      r"([0-9a-zA-Z]+)"),
++              ("DEP",      r"->"),
++              ("CALLREF",  r"=>"),
++              ("OBJSET",   r"\[([0-9a-zA-Z]+)\]"),
++              ("OBJSET2",  r"\(([0-9a-zA-Z \-]+)\)"),
++              ("OBJSET3",  r"\*"),
++              ("PROG",     r"{([0-9a-zA-Z;+^\-%@]*)}"),
++              ("PERMUTE",  r"!"),
++              ("SEMICOL",  r";"),
++              ("ERROR",    r".") ]
++tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tokenspec)
++
++# Main line parser of description language
++def parse_description_string(t, descr_str):
++    # State used when parsing dependencies
++    curr_objs = []
++    in_dep = False
++    in_callref = False
++    def clear_dep_state():
++        nonlocal in_dep, in_callref
++        in_dep = in_callref = False
++
++    for m in re.finditer(tok_re, descr_str):
++        kind = m.lastgroup
++        value = m.group()
++        if kind == "SONAME":
++            s = re.match(r"soname\(([0-9a-zA-Z{}]+)\)=([0-9a-zA-Z]+)", value)
++            obj = s.group(1)
++            val = s.group(2)
++            if obj == "{}":
++                if '#' in t.soname_map:
++                    error("soname of main program already set")
++                # Adjust to internal name
++                obj = '#'
++            else:
++                if re.match(r"[{}]", obj):
++                    error("invalid object name '%s'" % (obj))
++                if not obj in t.objs:
++                    error("'%s' is not name of already defined object" % (obj))
++                if obj in t.soname_map:
++                    error("'%s' already has soname of '%s' set"
++                          % (obj, t.soname_map[obj]))
++            t.soname_map[obj] = val
++
++        elif kind == "OBJ":
++            if in_dep:
++                t.add_deps(curr_objs, [value])
++            elif in_callref:
++                t.add_callrefs(curr_objs, [value])
++            clear_dep_state()
++            curr_objs = [value]
++            if not value in t.objs:
++                t.objs.append(value)
++
++        elif kind == "OBJSET":
++            objset = value[1:len(value)-1]
++            if in_dep:
++                t.add_deps(curr_objs, list (objset))
++            elif in_callref:
++                t.add_callrefs(curr_objs, list (objset))
++            clear_dep_state()
++            curr_objs = list(objset)
++            for o in list(objset):
++                if not o in t.objs:
++                    t.objs.append(o)
++
++        elif kind == "OBJSET2":
++            descr_str = value[1:len(value)-1]
++            descr_str.strip()
++            objs = expand_object_set_string(descr_str)
++            if not objs:
++                error("empty object set '%s'" % (value))
++            if in_dep:
++                t.add_deps(curr_objs, objs)
++            elif in_callref:
++                t.add_callrefs(curr_objs, objs)
++            clear_dep_state()
++            curr_objs = objs
++            for o in objs:
++                if not o in t.objs:
++                    t.objs.append(o)
++
++        elif kind == "OBJSET3":
++            if in_dep:
++                t.add_deps(curr_objs, ['*'])
++            elif in_callref:
++                t.add_callrefs(curr_objs, ['*'])
++            else:
++                error("non-dependence target set '*' can only be used "
++                      "as target of ->/=> operations")
++            clear_dep_state()
++            curr_objs = ['*']
++
++        elif kind == "PERMUTE":
++            if in_dep or in_callref:
++                error("syntax error, permute operation invalid here")
++            if not curr_objs:
++                error("syntax error, no objects to permute here")
++
++            for obj in curr_objs:
++                if not obj in t.dep_permutations:
++                    # Signal this object has permuted dependencies
++                    t.dep_permutations[obj] = []
++
++        elif kind == "PROG":
++            if t.main_program:
++                error("cannot have more than one main program")
++            if in_dep:
++                error("objects cannot have dependency on main program")
++            if in_callref:
++                # TODO: A DSO can resolve to a symbol in the main binary,
++                # which we syntactically allow here, but haven't yet
++                # implemented.
++                t.add_callrefs(curr_objs, ["#"])
++            process_main_program(t, value[1:len(value)-1])
++            clear_dep_state()
++            curr_objs = ["#"]
++
++        elif kind == "DEP":
++            if in_dep or in_callref:
++                error("syntax error, multiple contiguous ->,=> operations")
++            if '*' in curr_objs:
++                error("non-dependence target set '*' can only be used "
++                      "as target of ->/=> operations")
++            in_dep = True
++
++        elif kind == "CALLREF":
++            if in_dep or in_callref:
++                error("syntax error, multiple contiguous ->,=> operations")
++            if '*' in curr_objs:
++                error("non-dependence target set '*' can only be used "
++                      "as target of ->/=> operations")
++            in_callref = True
++
++        elif kind == "SEMICOL":
++            curr_objs = []
++            clear_dep_state()
++
++        else:
++            error("unknown token '%s'" % (value))
++    return t
++
++# Main routine to process each testcase description
++def process_testcase(t):
++    global objpfx
++    assert t.test_name
++
++    base_test_name = t.test_name
++    test_subdir = base_test_name + "-dir"
++    testpfx = objpfx + test_subdir + "/"
++
++    if not os.path.exists(testpfx):
++        os.mkdir(testpfx)
++
++    def find_objs_not_depended_on(t):
++        objs_not_depended_on = []
++        for obj in t.objs:
++            skip = False
++            for r in t.deps.items():
++                if obj in r[1]:
++                    skip = True
++                    break
++            if not skip:
++                objs_not_depended_on.append(obj)
++        return objs_not_depended_on
++
++    non_dep_tgt_objs = find_objs_not_depended_on(t)
++    for obj in t.objs:
++        if obj in t.deps:
++            deps = t.deps[obj]
++            if '*' in deps:
++                t.deps[obj].remove('*')
++                t.add_deps([obj], non_dep_tgt_objs)
++        if obj in t.callrefs:
++            deps = t.callrefs[obj]
++            if '*' in deps:
++                t.deps[obj].remove('*')
++                t.add_callrefs([obj], non_dep_tgt_objs)
++    if "#" in t.deps:
++        deps = t.deps["#"]
++        if '*' in deps:
++            t.deps["#"].remove('*')
++            t.add_deps(["#"], non_dep_tgt_objs)
++
++    # If no main program was specified in dependency description, make a
++    # default main program with deps pointing to all DSOs which are not
++    # depended by another DSO.
++    if t.main_program_default_deps:
++        main_deps = non_dep_tgt_objs
++        if not main_deps:
++            error("no objects for default main program to point "
++                  "dependency to(all objects strongly connected?)")
++        t.add_deps(["#"], main_deps)
++
++    # Some debug output
++    if cmdlineargs.debug_output:
++        print("Testcase: %s" % (t.test_name))
++        print("All objects: %s" % (t.objs))
++        print("--- Static link dependencies ---")
++        for r in t.deps.items():
++            print("%s -> %s" % (r[0], r[1]))
++        print("--- Objects whose dependencies are to be permuted ---")
++        for r in t.dep_permutations.items():
++            print("%s" % (r[0]))
++        print("--- Call reference dependencies ---")
++        for r in t.callrefs.items():
++            print("%s => %s" % (r[0], r[1]))
++        print("--- main program ---")
++        print(t.main_program)
++
++    # Main testcase generation routine, does Makefile fragment generation,
++    # testcase source generation, and if --build specified builds testcase.
++    def generate_testcase(test_descr, test_suffix):
++
++        test_name = test_descr.test_name + test_suffix
++
++        # Print out needed Makefile fragments for use in glibc/elf/Makefile.
++        module_names = ""
++        for o in test_descr.objs:
++            module_names += " " + test_subdir + "/" + test_name + "-" + o
++        makefile.write("modules-names +=%s\n" % (module_names))
++
++        # Depth-first traversal, executing FN(OBJ) in post-order
++        def dfs(t, fn):
++            def dfs_rec(obj, fn, obj_visited):
++                if obj in obj_visited:
++                    return
++                obj_visited[obj] = True
++                if obj in t.deps:
++                    for dep in t.deps[obj]:
++                        dfs_rec(dep, fn, obj_visited)
++                fn(obj)
++
++            obj_visited = {}
++            for obj in t.objs:
++                dfs_rec(obj, fn, obj_visited)
++
++        # Generate link dependencies for all DSOs, done in a DFS fashion.
++        # Usually this doesn't need to be this complex, just listing the direct
++        # dependencies is enough.  However to support creating circular
++        # dependency situations, traversing it by DFS and tracking processing
++        # status is the natural way to do it.
++        obj_processed = {}
++        fake_created = {}
++        def gen_link_deps(obj):
++            if obj in test_descr.deps:
++                dso = test_subdir + "/" + test_name + "-" + obj + ".so"
++                dependencies = ""
++                for dep in test_descr.deps[obj]:
++                    if dep in obj_processed:
++                        depstr = (" $(objpfx)" + test_subdir + "/"
++                                  + test_name + "-" + dep + ".so")
++                    else:
++                        # A circular dependency is satisfied by making a
++                        # fake DSO tagged with the correct SONAME
++                        depstr = (" $(objpfx)" + test_subdir + "/"
++                                  + test_name + "-" + dep + ".FAKE.so")
++                        # Create empty C file and Makefile fragments for fake
++                        # object.  This only needs to be done at most once for
++                        # an object name.
++                        if not dep in fake_created:
++                            f = open(testpfx + test_name + "-" + dep
++                                     + ".FAKE.c", "w")
++                            f.write(" \n")
++                            f.close()
++                            # Generate rule to create fake object
++                            makefile.write \
++                                ("LDFLAGS-%s = -Wl,--no-as-needed "
++                                 "-Wl,-soname=%s\n"
++                                 % (test_name + "-" + dep + ".FAKE.so",
++                                    ("$(objpfx)" + test_subdir + "/"
++                                     + test_name + "-" + dep + ".so")))
++                            makefile.write \
++                                ("modules-names += %s\n"
++                                 % (test_subdir + "/"
++                                    + test_name + "-" + dep + ".FAKE"))
++                            fake_created[dep] = True
++                    dependencies += depstr
++                makefile.write("$(objpfx)%s:%s\n" % (dso, dependencies))
++            # Mark obj as processed
++            obj_processed[obj] = True
++
++        dfs(test_descr, gen_link_deps)
++
++        # Print LDFLAGS-* and *-no-z-defs
++        for o in test_descr.objs:
++            dso = test_name + "-" + o + ".so"
++            ldflags = "-Wl,--no-as-needed"
++            if o in test_descr.soname_map:
++                soname = ("$(objpfx)" + test_subdir + "/"
++                          + test_name + "-"
++                          + test_descr.soname_map[o] + ".so")
++                ldflags += (" -Wl,-soname=" + soname)
++            makefile.write("LDFLAGS-%s = %s\n" % (dso, ldflags))
++            if o in test_descr.callrefs:
++                makefile.write("%s-no-z-defs = yes\n" % (dso))
++
++        # Print dependencies for main test program.
++        depstr = ""
++        if '#' in test_descr.deps:
++            for o in test_descr.deps['#']:
++                depstr += (" $(objpfx)" + test_subdir + "/"
++                           + test_name + "-" + o + ".so")
++        makefile.write("$(objpfx)%s/%s:%s\n" % (test_subdir, test_name, depstr))
++        ldflags = "-Wl,--no-as-needed"
++        if '#' in test_descr.soname_map:
++            soname = ("$(objpfx)" + test_subdir + "/"
++                      + test_name + "-"
++                      + test_descr.soname_map['#'] + ".so")
++            ldflags += (" -Wl,-soname=" + soname)
++        makefile.write("LDFLAGS-%s = %s\n" % (test_name, ldflags))
++
++        not_depended_objs = find_objs_not_depended_on(test_descr)
++        if not_depended_objs:
++            depstr = ""
++            for dep in not_depended_objs:
++                depstr += (" $(objpfx)" + test_subdir + "/"
++                           + test_name + "-" + dep + ".so")
++            makefile.write("$(objpfx)%s.out:%s\n" % (base_test_name, depstr))
++
++        # Add main executable to test-srcs
++        makefile.write("test-srcs += %s/%s\n" % (test_subdir, test_name))
++        # Add dependency on main executable of test
++        makefile.write("$(objpfx)%s.out: $(objpfx)%s/%s\n"
++                        % (base_test_name, test_subdir, test_name))
++
++        for r in test_descr.expected_outputs.items():
++            tunable_options = []
++            specific_tunable = r[0]
++            xfail = r[1][1]
++            if specific_tunable != "":
++                tunable_options = [specific_tunable]
++            else:
++                tunable_options = default_tunable_options
++                if not tunable_options:
++                    tunable_options = [""]
++
++            for tunable in tunable_options:
++                tunable_env = ""
++                tunable_sfx = ""
++                exp_tunable_sfx = ""
++                if tunable:
++                    tunable_env = "GLIBC_TUNABLES=%s " % tunable
++                    tunable_sfx = "-" + tunable.replace("=","_")
++                if specific_tunable:
++                    tunable_sfx = "-" + specific_tunable.replace("=","_")
++                    exp_tunable_sfx = tunable_sfx
++                tunable_descr = ("(%s)" % tunable_env.strip()
++                                 if tunable_env else "")
++                # Write out fragment of shell script for this single test.
++                test_descr.sh.write \
++                    ("%s${test_wrapper_env} ${run_program_env} \\\n"
++                     "${common_objpfx}support/test-run-command \\\n"
++                     "${common_objpfx}elf/ld.so \\\n"
++                     "--library-path ${common_objpfx}elf/%s:"
++                     "${common_objpfx}elf:${common_objpfx}.:"
++                     "${common_objpfx}dlfcn \\\n"
++                     "${common_objpfx}elf/%s/%s > \\\n"
++                     "  ${common_objpfx}elf/%s/%s%s.output\n"
++                     % (tunable_env ,test_subdir,
++                        test_subdir, test_name, test_subdir, test_name,
++                        tunable_sfx))
++                # Generate a run of each test and compare with expected out
++                test_descr.sh.write \
++                    ("if [ $? -ne 0 ]; then\n"
++                     "  echo '%sFAIL: %s%s execution test'\n"
++                     "  something_failed=true\n"
++                     "else\n"
++                     "  diff -wu ${common_objpfx}elf/%s/%s%s.output \\\n"
++                     "           ${common_objpfx}elf/%s/%s%s.exp\n"
++                     "  if [ $? -ne 0 ]; then\n"
++                     "    echo '%sFAIL: %s%s expected output comparison'\n"
++                     "    something_failed=true\n"
++                     "  fi\n"
++                     "fi\n"
++                     % (("X" if xfail else ""), test_name, tunable_descr,
++                        test_subdir, test_name, tunable_sfx,
++                        test_subdir, base_test_name, exp_tunable_sfx,
++                        ("X" if xfail else ""), test_name, tunable_descr))
++
++        # Generate C files according to dependency and calling relations from
++        # description string.
++        for obj in test_descr.objs:
++            src_name = test_name + "-" + obj + ".c"
++            f = open(testpfx + src_name, "w")
++            if obj in test_descr.callrefs:
++                called_objs = test_descr.callrefs[obj]
++                for callee in called_objs:
++                    f.write("extern void fn_%s (void);\n" % (callee))
++            if len(obj) == 1:
++                f.write("extern int putchar(int);\n")
++                f.write("static void __attribute__((constructor)) " +
++                         "init(void){putchar('%s');putchar('>');}\n" % (obj))
++                f.write("static void __attribute__((destructor)) " +
++                         "fini(void){putchar('<');putchar('%s');}\n" % (obj))
++            else:
++                f.write('extern int printf(const char *, ...);\n')
++                f.write('static void __attribute__((constructor)) ' +
++                         'init(void){printf("%s>");}\n' % (obj))
++                f.write('static void __attribute__((destructor)) ' +
++                         'fini(void){printf("<%s");}\n' % (obj))
++            if obj in test_descr.callrefs:
++                called_objs = test_descr.callrefs[obj]
++                if len(obj) != 1:
++                    f.write("extern int putchar(int);\n")
++                f.write("void fn_%s (void) {\n" % (obj))
++                if len(obj) == 1:
++                    f.write("  putchar ('%s');\n" % (obj));
++                    f.write("  putchar ('(');\n");
++                else:
++                    f.write('  printf ("%s(");\n' % (obj));
++                for callee in called_objs:
++                    f.write("  fn_%s ();\n" % (callee))
++                f.write("  putchar (')');\n");
++                f.write("}\n")
++            else:
++                for callref in test_descr.callrefs.items():
++                    if obj in callref[1]:
++                        if len(obj) == 1:
++                            # We need to declare printf here in this case.
++                            f.write('extern int printf(const char *, ...);\n')
++                        f.write("void fn_%s (void) {\n" % (obj))
++                        f.write('  printf ("%s()");\n' % (obj))
++                        f.write("}\n")
++                        break
++            f.close()
++
++        # Open C file for writing main program
++        f = open(testpfx + test_name + ".c", "w")
++
++        # if there are some operations in main(), it means we need -ldl
++        f.write("#include <stdio.h>\n")
++        f.write("#include <stdlib.h>\n")
++        f.write("#include <dlfcn.h>\n")
++        for s in test_descr.main_program:
++            if s[0] == '@':
++                f.write("extern void fn_%s (void);\n" % (s[1:]));
++        f.write("int main (void) {\n")
++        f.write("  putchar('{');\n")
++
++        # Helper routine for generating sanity checking code.
++        def put_fail_check(fail_cond, action_desc):
++            f.write('  if (%s) { printf ("\\n%s failed: %%s\\n", '
++                     'dlerror()); exit (1);}\n' % (fail_cond, action_desc))
++        i = 0
++        while i < len(test_descr.main_program):
++            s = test_descr.main_program[i]
++            obj = s[1:]
++            dso = test_name + "-" + obj
++            if s[0] == '+' or s[0] == '^':
++                if s[0] == '+':
++                    dlopen_flags = "RTLD_LAZY|RTLD_GLOBAL"
++                    f.write("  putchar('+');\n");
++                else:
++                    dlopen_flags = "RTLD_LAZY"
++                    f.write("  putchar(':');\n");
++                if len(obj) == 1:
++                    f.write("  putchar('%s');\n" % (obj));
++                else:
++                    f.write('  printf("%s");\n' % (obj));
++                f.write("  putchar('[');\n");
++                f.write('  void *%s = dlopen ("%s.so", %s);\n'
++                         % (obj, dso, dlopen_flags))
++                put_fail_check("!%s" % (obj),
++                                "%s.so dlopen" % (dso))
++                f.write("  putchar(']');\n");
++            elif s[0] == '-':
++                f.write("  putchar('-');\n");
++                if len(obj) == 1:
++                    f.write("  putchar('%s');\n" % (obj));
++                else:
++                    f.write('  printf("%s");\n' % (obj));
++                f.write("  putchar('[');\n");
++                put_fail_check("dlclose (%s) != 0" % (obj),
++                                "%s.so dlclose" % (dso))
++                f.write("  putchar(']');\n");
++            elif s[0] == '%':
++                f.write("  putchar('%');\n");
++                f.write('  void (*fn_%s)(void) = dlsym (%s, "fn_%s");\n'
++                         % (obj, obj, obj))
++                put_fail_check("!fn_%s" % (obj),
++                                "dlsym(fn_%s) from %s.so" % (obj, dso))
++                f.write("  fn_%s ();\n" % (obj))
++            elif s[0] == '@':
++                f.write("  putchar('@');\n");
++                f.write("  fn_%s ();\n" % (obj))
++            f.write("  putchar(';');\n");
++            i += 1
++        f.write("  putchar('}');\n")
++        f.write("  return 0;\n")
++        f.write("}\n")
++        f.close()
++
++        # --build option processing: build generated sources using 'build_gcc'
++        if cmdlineargs.build:
++            # Helper routine to run a shell command, for running GCC below
++            def run_cmd(args):
++                cmd = str.join(' ', args)
++                if cmdlineargs.debug_output:
++                    print(cmd)
++                p = subprocess.Popen(args)
++                p.wait()
++                if p.returncode != 0:
++                    error("error running command: %s" % (cmd))
++
++            # Compile individual .os files
++            for obj in test_descr.objs:
++                src_name = test_name + "-" + obj + ".c"
++                obj_name = test_name + "-" + obj + ".os"
++                run_cmd([build_gcc, "-c", "-fPIC", testpfx + src_name,
++                          "-o", testpfx + obj_name])
++
++            obj_processed = {}
++            fake_created = {}
++            # Function to create <test_name>-<obj>.so
++            def build_dso(obj):
++                obj_name = test_name + "-" + obj + ".os"
++                dso_name = test_name + "-" + obj + ".so"
++                deps = []
++                if obj in test_descr.deps:
++                    for dep in test_descr.deps[obj]:
++                        if dep in obj_processed:
++                            deps.append(dep)
++                        else:
++                            deps.append(dep + ".FAKE")
++                            if not dep in fake_created:
++                                base_name = testpfx + test_name + "-" + dep
++                                cmd = [build_gcc, "-Wl,--no-as-needed",
++                                       ("-Wl,-soname=" + base_name + ".so"),
++                                       "-shared", base_name + ".FAKE.c",
++                                       "-o", base_name + ".FAKE.so"]
++                                run_cmd(cmd)
++                                fake_created[dep] = True
++                dso_deps = map(lambda d: testpfx + test_name + "-" + d + ".so",
++                               deps)
++                cmd = [build_gcc, "-shared", "-o", testpfx + dso_name,
++                       testpfx + obj_name, "-Wl,--no-as-needed"]
++                if obj in test_descr.soname_map:
++                    soname = ("-Wl,-soname=" + testpfx + test_name + "-"
++                              + test_descr.soname_map[obj] + ".so")
++                    cmd += [soname]
++                cmd += list(dso_deps)
++                run_cmd(cmd)
++                obj_processed[obj] = True
++
++            # Build all DSOs, this needs to be in topological dependency order,
++            # or link will fail
++            dfs(test_descr, build_dso)
++
++            # Build main program
++            deps = []
++            if '#' in test_descr.deps:
++                deps = test_descr.deps['#']
++            main_deps = map(lambda d: testpfx + test_name + "-" + d + ".so",
++                            deps)
++            cmd = [build_gcc, "-Wl,--no-as-needed", "-o", testpfx + test_name,
++                   testpfx + test_name + ".c", "-L%s" % (os.getcwd()),
++                   "-Wl,-rpath-link=%s" % (os.getcwd())]
++            if '#' in test_descr.soname_map:
++                soname = ("-Wl,-soname=" + testpfx + test_name + "-"
++                          + test_descr.soname_map['#'] + ".so")
++                cmd += [soname]
++            cmd += list(main_deps)
++            run_cmd(cmd)
++
++    # Check if we need to enumerate permutations of dependencies
++    need_permutation_processing = False
++    if t.dep_permutations:
++        # Adjust dep_permutations into map of object -> dependency permutations
++        for r in t.dep_permutations.items():
++            obj = r[0]
++            if obj in t.deps and len(t.deps[obj]) > 1:
++                deps = t.deps[obj]
++                t.dep_permutations[obj] = list(itertools.permutations (deps))
++                need_permutation_processing = True
++
++    def enum_permutations(t, perm_list):
++        test_subindex = 1
++        curr_perms = []
++        def enum_permutations_rec(t, perm_list):
++            nonlocal test_subindex, curr_perms
++            if len(perm_list) >= 1:
++                curr = perm_list[0]
++                obj = curr[0]
++                perms = curr[1]
++                if not perms:
++                    # This may be an empty list if no multiple dependencies to
++                    # permute were found, skip to next in this case
++                    enum_permutations_rec(t, perm_list[1:])
++                else:
++                    for deps in perms:
++                        t.deps[obj] = deps
++                        permstr = "" if obj == "#" else obj + "_"
++                        permstr += str.join('', deps)
++                        curr_perms.append(permstr)
++                        enum_permutations_rec(t, perm_list[1:])
++                        curr_perms = curr_perms[0:len(curr_perms)-1]
++            else:
++                # t.deps is now instantiated with one dependency order
++                # permutation(across all objects that have multiple
++                # permutations), now process a testcase
++                generate_testcase(t, ("_" + str (test_subindex)
++                                       + "-" + str.join('-', curr_perms)))
++                test_subindex += 1
++        enum_permutations_rec(t, perm_list)
++
++    # Create *.exp files with expected outputs
++    for r in t.expected_outputs.items():
++        sfx = ""
++        if r[0] != "":
++            sfx = "-" + r[0].replace("=","_")
++        f = open(testpfx + t.test_name + sfx + ".exp", "w")
++        (output, xfail) = r[1]
++        f.write('%s' % output)
++        f.close()
++
++    # Create header part of top-level testcase shell script, to wrap execution
++    # and output comparison together.
++    t.sh = open(testpfx + t.test_name + ".sh", "w")
++    t.sh.write("#!/bin/sh\n")
++    t.sh.write("# Test driver for %s, generated by "
++                "dso-ordering-test.py\n" % (t.test_name))
++    t.sh.write("common_objpfx=$1\n")
++    t.sh.write("test_wrapper_env=$2\n")
++    t.sh.write("run_program_env=$3\n")
++    t.sh.write("something_failed=false\n")
++
++    # Starting part of Makefile fragment
++    makefile.write("ifeq (yes,$(build-shared))\n")
++
++    if need_permutation_processing:
++        enum_permutations(t, list (t.dep_permutations.items()))
++    else:
++        # We have no permutations to enumerate, just process testcase normally
++        generate_testcase(t, "")
++
++    # If testcase is XFAIL, indicate so
++    if t.xfail:
++        makefile.write("test-xfail-%s = yes\n" % t.test_name)
++
++    # Output end part of Makefile fragment
++    expected_output_files = ""
++    for r in t.expected_outputs.items():
++        sfx = ""
++        if r[0] != "":
++            sfx = "-" + r[0].replace("=","_")
++        expected_output_files += " $(objpfx)%s/%s%s.exp" % (test_subdir,
++                                                            t.test_name, sfx)
++    makefile.write \
++    ("$(objpfx)%s.out: $(objpfx)%s/%s.sh%s "
++     "$(common-objpfx)support/test-run-command\n"
++     % (t.test_name, test_subdir, t.test_name,
++        expected_output_files))
++    makefile.write("\t$(SHELL) $< $(common-objpfx) '$(test-wrapper-env)' "
++                    "'$(run-program-env)' > $@; $(evaluate-test)\n")
++    makefile.write("ifeq ($(run-built-tests),yes)\n")
++    if t.xtest:
++        makefile.write("xtests-special += $(objpfx)%s.out\n" % (t.test_name))
++    else:
++        makefile.write("tests-special += $(objpfx)%s.out\n" % (t.test_name))
++    makefile.write("endif\n")
++    makefile.write("endif\n")
++
++    # Write ending part of shell script generation
++    t.sh.write("if $something_failed; then\n"
++                "  exit 1\n"
++                "else\n"
++                "  echo '%sPASS: all tests for %s succeeded'\n"
++                "  exit 0\n"
++                "fi\n" % (("X" if t.xfail else ""),
++                          t.test_name))
++    t.sh.close()
++
++# Decription file parsing
++def parse_description_file(filename):
++    global default_tunable_options
++    global current_input_lineno
++    f = open(filename)
++    if not f:
++        error("cannot open description file %s" % (filename))
++    descrfile_lines = f.readlines()
++    t = None
++    for line in descrfile_lines:
++        p = re.compile(r"#.*$")
++        line = p.sub("", line) # Filter out comments
++        line = line.strip() # Remove excess whitespace
++        current_input_lineno += 1
++
++        m = re.match(r"^tunable_option:\s*(.*)$", line)
++        if m:
++            if m.group(1) == "":
++                error("tunable option cannot be empty")
++            default_tunable_options.append(m.group (1))
++            continue
++
++        m = re.match(r"^clear_tunables$", line)
++        if m:
++            default_tunable_options = []
++            continue
++
++        m = re.match(r"^([^:]+):\s*(.*)$", line)
++        if m:
++            lhs = m.group(1)
++            o = re.match(r"^output(.*)$", lhs)
++            xfail = False
++            if not o:
++                o = re.match(r"^xfail_output(.*)$", lhs)
++                if o:
++                    xfail = True;
++            if o:
++                if not t:
++                    error("output specification without testcase description")
++                tsstr = ""
++                if o.group(1):
++                    ts = re.match(r"^\(([a-zA-Z0-9_.=]*)\)$", o.group (1))
++                    if not ts:
++                        error("tunable option malformed '%s'" % o.group(1))
++                    tsstr = ts.group(1)
++                t.expected_outputs[tsstr] = (m.group(2), xfail)
++                # Any tunable option XFAILed means entire testcase
++                # is XFAIL/XPASS
++                t.xfail |= xfail
++            else:
++                if t:
++                    # Starting a new test description, end and process
++                    # current one.
++                    process_testcase(t)
++                t = TestDescr()
++                x = re.match(r"^xtest\((.*)\)$", lhs)
++                if x:
++                    t.xtest = True
++                    t.test_name = x.group(1)
++                else:
++                    t.test_name = lhs
++                descr_string = m.group(2)
++                parse_description_string(t, descr_string)
++            continue
++        else:
++            if line:
++                if not t:
++                    error("no active testcase description")
++                parse_description_string(t, line)
++    # Process last completed test description
++    if t:
++        process_testcase(t)
++
++# Setup Makefile output to file or stdout as selected
++if output_makefile:
++    output_makefile_dir = os.path.dirname(output_makefile)
++    if output_makefile_dir:
++        os.makedirs(output_makefile_dir, exist_ok = True)
++    makefile = open(output_makefile, "w")
++else:
++    makefile = open(sys.stdout.fileno (), "w")
++
++# Finally, the main top-level calling of above parsing routines.
++if description_file:
++    parse_description_file(description_file)
++else:
++    t = TestDescr()
++    t.test_name = test_name
++    parse_description_string(t, description)
++    process_testcase(t)
++
++# Close Makefile fragment output
++makefile.close()
+diff --git a/support/Depend b/support/Depend
+new file mode 100644
+index 0000000000000000..7e7d5dc67c13e669
+--- /dev/null
++++ b/support/Depend
+@@ -0,0 +1 @@
++elf
+diff --git a/support/Makefile b/support/Makefile
+index 7749ac24f1ac3622..636d69c4f8e7e139 100644
+--- a/support/Makefile
++++ b/support/Makefile
+@@ -215,10 +215,16 @@ others-noinstall += shell-container echo-container true-container
+ others += $(LINKS_DSO_PROGRAM)
+ others-noinstall += $(LINKS_DSO_PROGRAM)
+ 
++others += test-run-command
++others-static += test-run-command
++others-noinstall += test-run-command
++LDLIBS-test-run-command = $(libsupport)
++
+ $(objpfx)test-container : $(libsupport)
+ $(objpfx)shell-container : $(libsupport)
+ $(objpfx)echo-container : $(libsupport)
+ $(objpfx)true-container : $(libsupport)
++$(objpfx)test-run-command : $(libsupport) $(common-objpfx)elf/static-stubs.o
+ 
+ tests = \
+   README-testing \
+diff --git a/support/support_test_main.c b/support/support_test_main.c
+index def84d803928176b..49e9d9c5baf776eb 100644
+--- a/support/support_test_main.c
++++ b/support/support_test_main.c
+@@ -228,6 +228,18 @@ run_test_function (int argc, char **argv, const struct test_config *config)
+   while (wait_for_debugger)
+     usleep (1000);
+ 
++  if (config->run_command_mode)
++    {
++      /* In run-command-mode, the child process executes the command line
++	 arguments as a new program.  */
++      char **argv_ = xmalloc (sizeof (char *) * argc);
++      memcpy (argv_, &argv[1], sizeof (char *) * (argc - 1));
++      argv_[argc - 1] = NULL;
++      execv (argv_[0], argv_);
++      printf ("error: should not return here\n");
++      exit (1);
++    }
++
+   if (config->test_function != NULL)
+     return config->test_function ();
+   else if (config->test_function_argv != NULL)
+diff --git a/support/test-driver.c b/support/test-driver.c
+index 9798f16227b9d467..93f25a99c1b9d2cb 100644
+--- a/support/test-driver.c
++++ b/support/test-driver.c
+@@ -116,7 +116,9 @@ main (int argc, char **argv)
+ #if defined (TEST_FUNCTION) && defined (TEST_FUNCTON_ARGV)
+ # error TEST_FUNCTION and TEST_FUNCTION_ARGV cannot be defined at the same time
+ #endif
+-#if defined (TEST_FUNCTION)
++#ifdef RUN_COMMAND_MODE
++  test_config.run_command_mode = 1;
++#elif defined (TEST_FUNCTION)
+   test_config.test_function = TEST_FUNCTION;
+ #elif defined (TEST_FUNCTION_ARGV)
+   test_config.test_function_argv = TEST_FUNCTION_ARGV;
+diff --git a/support/test-driver.h b/support/test-driver.h
+index 549179b254946390..818689ad1ae7fd8c 100644
+--- a/support/test-driver.h
++++ b/support/test-driver.h
+@@ -36,6 +36,7 @@ struct test_config
+   int expected_signal;   /* If non-zero, expect termination by signal.  */
+   char no_mallopt;       /* Boolean flag to disable mallopt.  */
+   char no_setvbuf;       /* Boolean flag to disable setvbuf.  */
++  char run_command_mode; /* Boolean flag to indicate run-command-mode.  */
+   const char *optstring; /* Short command line options.  */
+ };
+ 
+diff --git a/support/test-run-command.c b/support/test-run-command.c
+new file mode 100644
+index 0000000000000000..61560d7bfb1686a8
+--- /dev/null
++++ b/support/test-run-command.c
+@@ -0,0 +1,22 @@
++/* Main program for test-run-command support utility.
++   Copyright (C) 2021 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++/* This is basically a configuration of test-driver.c into a general
++   command-line program runner.  */
++#define RUN_COMMAND_MODE
++#include <test-driver.c>
diff --git a/SOURCES/glibc-rh1159809-10.patch b/SOURCES/glibc-rh1159809-10.patch
new file mode 100644
index 0000000..4713f8a
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-10.patch
@@ -0,0 +1,79 @@
+commit dbb75513f5cf9285c77c9e55777c5c35b653f890
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Tue Sep 6 07:38:10 2022 +0200
+
+    elf: Rename _dl_sort_maps parameter from skip to force_first
+    
+    The new implementation will not be able to skip an arbitrary number
+    of objects.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
+index 99354dc08a010dd3..7a586749adc3fa7d 100644
+--- a/elf/dl-sort-maps.c
++++ b/elf/dl-sort-maps.c
+@@ -27,12 +27,12 @@
+    If FOR_FINI is true, this is called for finishing an object.  */
+ static void
+ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
+-			unsigned int skip, bool for_fini)
++			bool force_first, bool for_fini)
+ {
+   /* Allows caller to do the common optimization of skipping the first map,
+      usually the main binary.  */
+-  maps += skip;
+-  nmaps -= skip;
++  maps += force_first;
++  nmaps -= force_first;
+ 
+   /* A list of one element need not be sorted.  */
+   if (nmaps <= 1)
+@@ -182,7 +182,7 @@ dfs_traversal (struct link_map ***rpo, struct link_map *map,
+ 
+ static void
+ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
+-		   unsigned int skip __attribute__ ((unused)), bool for_fini)
++		   bool force_first __attribute__ ((unused)), bool for_fini)
+ {
+   for (int i = nmaps - 1; i >= 0; i--)
+     maps[i]->l_visited = 0;
+@@ -286,7 +286,7 @@ _dl_sort_maps_init (void)
+ 
+ void
+ _dl_sort_maps (struct link_map **maps, unsigned int nmaps,
+-	       unsigned int skip, bool for_fini)
++	       bool force_first, bool for_fini)
+ {
+   /* It can be tempting to use a static function pointer to store and call
+      the current selected sorting algorithm routine, but experimentation
+@@ -296,9 +296,9 @@ _dl_sort_maps (struct link_map **maps, unsigned int nmaps,
+      input cases. A simple if-case with direct function calls appears to
+      be the fastest.  */
+   if (__glibc_likely (GLRO(dl_dso_sort_algo) == dso_sort_algorithm_original))
+-    _dl_sort_maps_original (maps, nmaps, skip, for_fini);
++    _dl_sort_maps_original (maps, nmaps, force_first, for_fini);
+   else
+-    _dl_sort_maps_dfs (maps, nmaps, skip, for_fini);
++    _dl_sort_maps_dfs (maps, nmaps, force_first, for_fini);
+ }
+ 
+ #endif /* HAVE_TUNABLES.  */
+diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
+index 9f09a4a280396659..2c1b4c47c6a6c643 100644
+--- a/sysdeps/generic/ldsodefs.h
++++ b/sysdeps/generic/ldsodefs.h
+@@ -1056,9 +1056,11 @@ extern void _dl_init (struct link_map *main_map, int argc, char **argv,
+    initializer functions have completed.  */
+ extern void _dl_fini (void) attribute_hidden;
+ 
+-/* Sort array MAPS according to dependencies of the contained objects.  */
++/* Sort array MAPS according to dependencies of the contained objects.
++   If FORCE_FIRST, MAPS[0] keeps its place even if the dependencies
++   say otherwise.  */
+ extern void _dl_sort_maps (struct link_map **maps, unsigned int nmaps,
+-			   unsigned int skip, bool for_fini) attribute_hidden;
++			   bool force_first, bool for_fini) attribute_hidden;
+ 
+ /* The dynamic linker calls this function before and having changing
+    any shared object mappings.  The `r_state' member of `struct r_debug'
diff --git a/SOURCES/glibc-rh1159809-11.patch b/SOURCES/glibc-rh1159809-11.patch
new file mode 100644
index 0000000..fc4a7e1
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-11.patch
@@ -0,0 +1,90 @@
+commit 1df71d32fe5f5905ffd5d100e5e9ca8ad6210891
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Tue Sep 20 11:00:42 2022 +0200
+
+    elf: Implement force_first handling in _dl_sort_maps_dfs (bug 28937)
+    
+    The implementation in _dl_close_worker requires that the first
+    element of l_initfini is always this very map (“We are always the
+    zeroth entry, and since we don't include ourselves in the
+    dependency analysis start at 1.”).  Rather than fixing that
+    assumption, this commit adds an implementation of the force_first
+    argument to the new dependency sorting algorithm.  This also means
+    that the directly dlopen'ed shared object is always initialized last,
+    which is the least surprising behavior in the presence of cycles.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
+index 7a586749adc3fa7d..6f5c17b47b98fbc7 100644
+--- a/elf/dl-sort-maps.c
++++ b/elf/dl-sort-maps.c
+@@ -182,8 +182,9 @@ dfs_traversal (struct link_map ***rpo, struct link_map *map,
+ 
+ static void
+ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
+-		   bool force_first __attribute__ ((unused)), bool for_fini)
++		   bool force_first, bool for_fini)
+ {
++  struct link_map *first_map = maps[0];
+   for (int i = nmaps - 1; i >= 0; i--)
+     maps[i]->l_visited = 0;
+ 
+@@ -208,14 +209,6 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
+      Adjusting the order so that maps[0] is last traversed naturally avoids
+      this problem.
+ 
+-     Further, the old "optimization" of skipping the main object at maps[0]
+-     from the call-site (i.e. _dl_sort_maps(maps+1,nmaps-1)) is in general
+-     no longer valid, since traversing along object dependency-links
+-     may "find" the main object even when it is not included in the initial
+-     order (e.g. a dlopen()'ed shared object can have circular dependencies
+-     linked back to itself). In such a case, traversing N-1 objects will
+-     create a N-object result, and raise problems.
+-
+      To summarize, just passing in the full list, and iterating from back
+      to front makes things much more straightforward.  */
+ 
+@@ -274,6 +267,27 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
+     }
+ 
+   memcpy (maps, rpo, sizeof (struct link_map *) * nmaps);
++
++  /* Skipping the first object at maps[0] is not valid in general,
++     since traversing along object dependency-links may "find" that
++     first object even when it is not included in the initial order
++     (e.g., a dlopen'ed shared object can have circular dependencies
++     linked back to itself).  In such a case, traversing N-1 objects
++     will create a N-object result, and raise problems.  Instead,
++     force the object back into first place after sorting.  This naive
++     approach may introduce further dependency ordering violations
++     compared to rotating the cycle until the first map is again in
++     the first position, but as there is a cycle, at least one
++     violation is already present.  */
++  if (force_first && maps[0] != first_map)
++    {
++      int i;
++      for (i = 0; maps[i] != first_map; ++i)
++	;
++      assert (i < nmaps);
++      memmove (&maps[1], maps, i * sizeof (maps[0]));
++      maps[0] = first_map;
++    }
+ }
+ 
+ void
+diff --git a/elf/dso-sort-tests-1.def b/elf/dso-sort-tests-1.def
+index 5f7f18ef270bc12d..4bf9052db16fb352 100644
+--- a/elf/dso-sort-tests-1.def
++++ b/elf/dso-sort-tests-1.def
+@@ -64,3 +64,10 @@ output: b>a>{}<a<b
+ tst-bz15311: {+a;+e;+f;+g;+d;%d;-d;-g;-f;-e;-a};a->b->c->d;d=>[ba];c=>a;b=>e=>a;c=>f=>b;d=>g=>c
+ output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<a<c<d<g<f<b<e];}
+ output(glibc.rtld.dynamic_sort=2): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<g<f<a<b<c<d<e];}
++
++# Test that even in the presence of dependency loops involving dlopen'ed
++# object, that object is initialized last (and not unloaded prematurely).
++# Final destructor order is indeterminate due to the cycle.
++tst-bz28937: {+a;+b;-b;+c;%c};a->a1;a->a2;a2->a;b->b1;c->a1;c=>a1
++output(glibc.rtld.dynamic_sort=1): {+a[a2>a1>a>];+b[b1>b>];-b[<b<b1];+c[c>];%c(a1());}<a<a2<c<a1
++output(glibc.rtld.dynamic_sort=2): {+a[a2>a1>a>];+b[b1>b>];-b[<b<b1];+c[c>];%c(a1());}<a2<a<c<a1
diff --git a/SOURCES/glibc-rh1159809-12.patch b/SOURCES/glibc-rh1159809-12.patch
new file mode 100644
index 0000000..d0f14d7
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-12.patch
@@ -0,0 +1,35 @@
+Downstream-specific patch to link DSO sorting tests with -ldl
+if needed.  Upstream does not need this because <dlfcn.h> interfaces
+are part of libc.
+
+diff --git a/scripts/dso-ordering-test.py b/scripts/dso-ordering-test.py
+index 43b5ec4d920ad6a3..ae85e0f4a6ae5b3e 100644
+--- a/scripts/dso-ordering-test.py
++++ b/scripts/dso-ordering-test.py
+@@ -657,6 +657,8 @@ def process_testcase(t):
+                                  % (test_name + "-" + dep + ".FAKE.so",
+                                     ("$(objpfx)" + test_subdir + "/"
+                                      + test_name + "-" + dep + ".so")))
++                            makefile.write(
++                                "LDLIBS-%s += -Wl,--as-needed -ldl -Wl,--no-as-needed\n" % dso)
+                             rule = ("$(objpfx)" + test_subdir + "/"
+                                     + test_name + "-" + dep + ".FAKE.os: "
+                                     "$(objpfx)" + test_srcdir
+@@ -685,6 +687,8 @@ def process_testcase(t):
+                           + test_descr.soname_map[o] + ".so")
+                 ldflags += (" -Wl,-soname=" + soname)
+             makefile.write("LDFLAGS-%s = %s\n" % (dso, ldflags))
++            makefile.write(
++                "LDLIBS-%s += -Wl,--as-needed -ldl -Wl,--no-as-needed\n" % dso)
+             if o in test_descr.callrefs:
+                 makefile.write("%s-no-z-defs = yes\n" % (dso))
+ 
+@@ -702,6 +706,8 @@ def process_testcase(t):
+                       + test_descr.soname_map['#'] + ".so")
+             ldflags += (" -Wl,-soname=" + soname)
+         makefile.write("LDFLAGS-%s = %s\n" % (test_name, ldflags))
++        makefile.write(
++            "LDLIBS-%s += -Wl,--as-needed -ldl -Wl,--no-as-needed\n" % test_name)
+         rule = ("$(objpfx)" + test_subdir + "/" + test_name + ".o: "
+                 "$(objpfx)" + test_srcdir + test_name + ".c\n"
+                 "\t$(compile.c) $(OUTPUT_OPTION)\n")
diff --git a/SOURCES/glibc-rh1159809-2.patch b/SOURCES/glibc-rh1159809-2.patch
new file mode 100644
index 0000000..1c415d4
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-2.patch
@@ -0,0 +1,189 @@
+commit b4bbedb1e75737a80bcc3d53d6eef1fbe0b5f4d5
+Author: H.J. Lu <hjl.tools@gmail.com>
+Date:   Sat Nov 6 14:13:27 2021 -0700
+
+    dso-ordering-test.py: Put all sources in one directory [BZ #28550]
+    
+    Put all sources for DSO sorting tests in the dso-sort-tests-src directory
+    and compile test relocatable objects with
+    
+    $(objpfx)tst-dso-ordering1-dir/tst-dso-ordering1-a.os: $(objpfx)dso-sort-tests-src/tst-dso-ordering1-a.c
+            $(compile.c) $(OUTPUT_OPTION)
+    
+    to avoid random $< values from $(before-compile) when compiling test
+    relocatable objects with
+    
+    $(objpfx)%$o: $(objpfx)%.c $(before-compile); $$(compile-command.c)
+    compile-command.c = $(compile.c) $(OUTPUT_OPTION) $(compile-mkdep-flags)
+    compile.c = $(CC) $< -c $(CFLAGS) $(CPPFLAGS)
+    
+    for 3 "make -j 28" parallel builds on a machine with 112 cores at the
+    same time.
+    
+    This partially fixes BZ #28550.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/scripts/dso-ordering-test.py b/scripts/dso-ordering-test.py
+index 944ee740527d60fd..bde0406be9da14fc 100644
+--- a/scripts/dso-ordering-test.py
++++ b/scripts/dso-ordering-test.py
+@@ -526,9 +526,13 @@ def process_testcase(t):
+     base_test_name = t.test_name
+     test_subdir = base_test_name + "-dir"
+     testpfx = objpfx + test_subdir + "/"
++    test_srcdir = "dso-sort-tests-src/"
++    testpfx_src = objpfx + test_srcdir
+ 
+     if not os.path.exists(testpfx):
+         os.mkdir(testpfx)
++    if not os.path.exists(testpfx_src):
++        os.mkdir(testpfx_src)
+ 
+     def find_objs_not_depended_on(t):
+         objs_not_depended_on = []
+@@ -595,6 +599,11 @@ def process_testcase(t):
+         # Print out needed Makefile fragments for use in glibc/elf/Makefile.
+         module_names = ""
+         for o in test_descr.objs:
++            rule = ("$(objpfx)" + test_subdir + "/" + test_name
++                    + "-" + o + ".os: $(objpfx)" + test_srcdir
++                    + test_name + "-" + o + ".c\n"
++                    "\t$(compile.c) $(OUTPUT_OPTION)\n")
++            makefile.write (rule)
+             module_names += " " + test_subdir + "/" + test_name + "-" + o
+         makefile.write("modules-names +=%s\n" % (module_names))
+ 
+@@ -637,7 +646,7 @@ def process_testcase(t):
+                         # object.  This only needs to be done at most once for
+                         # an object name.
+                         if not dep in fake_created:
+-                            f = open(testpfx + test_name + "-" + dep
++                            f = open(testpfx_src + test_name + "-" + dep
+                                      + ".FAKE.c", "w")
+                             f.write(" \n")
+                             f.close()
+@@ -648,6 +657,12 @@ def process_testcase(t):
+                                  % (test_name + "-" + dep + ".FAKE.so",
+                                     ("$(objpfx)" + test_subdir + "/"
+                                      + test_name + "-" + dep + ".so")))
++                            rule = ("$(objpfx)" + test_subdir + "/"
++                                    + test_name + "-" + dep + ".FAKE.os: "
++                                    "$(objpfx)" + test_srcdir
++                                    + test_name + "-" + dep + ".FAKE.c\n"
++                                    "\t$(compile.c) $(OUTPUT_OPTION)\n")
++                            makefile.write (rule)
+                             makefile.write \
+                                 ("modules-names += %s\n"
+                                  % (test_subdir + "/"
+@@ -687,6 +702,10 @@ def process_testcase(t):
+                       + test_descr.soname_map['#'] + ".so")
+             ldflags += (" -Wl,-soname=" + soname)
+         makefile.write("LDFLAGS-%s = %s\n" % (test_name, ldflags))
++        rule = ("$(objpfx)" + test_subdir + "/" + test_name + ".o: "
++                "$(objpfx)" + test_srcdir + test_name + ".c\n"
++                "\t$(compile.c) $(OUTPUT_OPTION)\n")
++        makefile.write (rule)
+ 
+         not_depended_objs = find_objs_not_depended_on(test_descr)
+         if not_depended_objs:
+@@ -745,7 +764,7 @@ def process_testcase(t):
+                      "  something_failed=true\n"
+                      "else\n"
+                      "  diff -wu ${common_objpfx}elf/%s/%s%s.output \\\n"
+-                     "           ${common_objpfx}elf/%s/%s%s.exp\n"
++                     "           ${common_objpfx}elf/%s%s%s.exp\n"
+                      "  if [ $? -ne 0 ]; then\n"
+                      "    echo '%sFAIL: %s%s expected output comparison'\n"
+                      "    something_failed=true\n"
+@@ -753,14 +772,14 @@ def process_testcase(t):
+                      "fi\n"
+                      % (("X" if xfail else ""), test_name, tunable_descr,
+                         test_subdir, test_name, tunable_sfx,
+-                        test_subdir, base_test_name, exp_tunable_sfx,
++                        test_srcdir, base_test_name, exp_tunable_sfx,
+                         ("X" if xfail else ""), test_name, tunable_descr))
+ 
+         # Generate C files according to dependency and calling relations from
+         # description string.
+         for obj in test_descr.objs:
+             src_name = test_name + "-" + obj + ".c"
+-            f = open(testpfx + src_name, "w")
++            f = open(testpfx_src + src_name, "w")
+             if obj in test_descr.callrefs:
+                 called_objs = test_descr.callrefs[obj]
+                 for callee in called_objs:
+@@ -804,7 +823,7 @@ def process_testcase(t):
+             f.close()
+ 
+         # Open C file for writing main program
+-        f = open(testpfx + test_name + ".c", "w")
++        f = open(testpfx_src + test_name + ".c", "w")
+ 
+         # if there are some operations in main(), it means we need -ldl
+         f.write("#include <stdio.h>\n")
+@@ -885,7 +904,7 @@ def process_testcase(t):
+             for obj in test_descr.objs:
+                 src_name = test_name + "-" + obj + ".c"
+                 obj_name = test_name + "-" + obj + ".os"
+-                run_cmd([build_gcc, "-c", "-fPIC", testpfx + src_name,
++                run_cmd([build_gcc, "-c", "-fPIC", testpfx_src + src_name,
+                           "-o", testpfx + obj_name])
+ 
+             obj_processed = {}
+@@ -903,10 +922,12 @@ def process_testcase(t):
+                             deps.append(dep + ".FAKE")
+                             if not dep in fake_created:
+                                 base_name = testpfx + test_name + "-" + dep
++                                src_base_name = (testpfx_src + test_name
++                                                 + "-" + dep)
+                                 cmd = [build_gcc, "-Wl,--no-as-needed",
+                                        ("-Wl,-soname=" + base_name + ".so"),
+                                        "-shared", base_name + ".FAKE.c",
+-                                       "-o", base_name + ".FAKE.so"]
++                                       "-o", src_base_name + ".FAKE.so"]
+                                 run_cmd(cmd)
+                                 fake_created[dep] = True
+                 dso_deps = map(lambda d: testpfx + test_name + "-" + d + ".so",
+@@ -932,7 +953,7 @@ def process_testcase(t):
+             main_deps = map(lambda d: testpfx + test_name + "-" + d + ".so",
+                             deps)
+             cmd = [build_gcc, "-Wl,--no-as-needed", "-o", testpfx + test_name,
+-                   testpfx + test_name + ".c", "-L%s" % (os.getcwd()),
++                   testpfx_src + test_name + ".c", "-L%s" % (os.getcwd()),
+                    "-Wl,-rpath-link=%s" % (os.getcwd())]
+             if '#' in test_descr.soname_map:
+                 soname = ("-Wl,-soname=" + testpfx + test_name + "-"
+@@ -987,14 +1008,14 @@ def process_testcase(t):
+         sfx = ""
+         if r[0] != "":
+             sfx = "-" + r[0].replace("=","_")
+-        f = open(testpfx + t.test_name + sfx + ".exp", "w")
++        f = open(testpfx_src + t.test_name + sfx + ".exp", "w")
+         (output, xfail) = r[1]
+         f.write('%s' % output)
+         f.close()
+ 
+     # Create header part of top-level testcase shell script, to wrap execution
+     # and output comparison together.
+-    t.sh = open(testpfx + t.test_name + ".sh", "w")
++    t.sh = open(testpfx_src + t.test_name + ".sh", "w")
+     t.sh.write("#!/bin/sh\n")
+     t.sh.write("# Test driver for %s, generated by "
+                 "dso-ordering-test.py\n" % (t.test_name))
+@@ -1022,12 +1043,12 @@ def process_testcase(t):
+         sfx = ""
+         if r[0] != "":
+             sfx = "-" + r[0].replace("=","_")
+-        expected_output_files += " $(objpfx)%s/%s%s.exp" % (test_subdir,
++        expected_output_files += " $(objpfx)%s%s%s.exp" % (test_srcdir,
+                                                             t.test_name, sfx)
+     makefile.write \
+-    ("$(objpfx)%s.out: $(objpfx)%s/%s.sh%s "
++    ("$(objpfx)%s.out: $(objpfx)%s%s.sh%s "
+      "$(common-objpfx)support/test-run-command\n"
+-     % (t.test_name, test_subdir, t.test_name,
++     % (t.test_name, test_srcdir, t.test_name,
+         expected_output_files))
+     makefile.write("\t$(SHELL) $< $(common-objpfx) '$(test-wrapper-env)' "
+                     "'$(run-program-env)' > $@; $(evaluate-test)\n")
diff --git a/SOURCES/glibc-rh1159809-3.patch b/SOURCES/glibc-rh1159809-3.patch
new file mode 100644
index 0000000..fa39efe
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-3.patch
@@ -0,0 +1,589 @@
+commit 15a0c5730d1d5aeb95f50c9ec7470640084feae8
+Author: Chung-Lin Tang <cltang@codesourcery.com>
+Date:   Thu Oct 21 21:41:22 2021 +0800
+
+    elf: Fix slow DSO sorting behavior in dynamic loader (BZ #17645)
+    
+    This second patch contains the actual implementation of a new sorting algorithm
+    for shared objects in the dynamic loader, which solves the slow behavior that
+    the current "old" algorithm falls into when the DSO set contains circular
+    dependencies.
+    
+    The new algorithm implemented here is simply depth-first search (DFS) to obtain
+    the Reverse-Post Order (RPO) sequence, a topological sort. A new l_visited:1
+    bitfield is added to struct link_map to more elegantly facilitate such a search.
+    
+    The DFS algorithm is applied to the input maps[nmap-1] backwards towards
+    maps[0]. This has the effect of a more "shallow" recursion depth in general
+    since the input is in BFS. Also, when combined with the natural order of
+    processing l_initfini[] at each node, this creates a resulting output sorting
+    closer to the intuitive "left-to-right" order in most cases.
+    
+    Another notable implementation adjustment related to this _dl_sort_maps change
+    is the removing of two char arrays 'used' and 'done' in _dl_close_worker to
+    represent two per-map attributes. This has been changed to simply use two new
+    bit-fields l_map_used:1, l_map_done:1 added to struct link_map. This also allows
+    discarding the clunky 'used' array sorting that _dl_sort_maps had to sometimes
+    do along the way.
+    
+    Tunable support for switching between different sorting algorithms at runtime is
+    also added. A new tunable 'glibc.rtld.dynamic_sort' with current valid values 1
+    (old algorithm) and 2 (new DFS algorithm) has been added. At time of commit
+    of this patch, the default setting is 1 (old algorithm).
+    
+    Signed-off-by: Chung-Lin Tang  <cltang@codesourcery.com>
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+Conflicts:
+	elf/dl-tunables.list
+	  (No mem.tagging tunable downstream.)
+
+diff --git a/elf/dl-close.c b/elf/dl-close.c
+index 74ca9a85dd309780..22225efb3226c3e1 100644
+--- a/elf/dl-close.c
++++ b/elf/dl-close.c
+@@ -167,8 +167,6 @@ _dl_close_worker (struct link_map *map, bool force)
+ 
+   bool any_tls = false;
+   const unsigned int nloaded = ns->_ns_nloaded;
+-  char used[nloaded];
+-  char done[nloaded];
+   struct link_map *maps[nloaded];
+ 
+   /* Run over the list and assign indexes to the link maps and enter
+@@ -176,24 +174,21 @@ _dl_close_worker (struct link_map *map, bool force)
+   int idx = 0;
+   for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
+     {
++      l->l_map_used = 0;
++      l->l_map_done = 0;
+       l->l_idx = idx;
+       maps[idx] = l;
+       ++idx;
+-
+     }
+   assert (idx == nloaded);
+ 
+-  /* Prepare the bitmaps.  */
+-  memset (used, '\0', sizeof (used));
+-  memset (done, '\0', sizeof (done));
+-
+   /* Keep track of the lowest index link map we have covered already.  */
+   int done_index = -1;
+   while (++done_index < nloaded)
+     {
+       struct link_map *l = maps[done_index];
+ 
+-      if (done[done_index])
++      if (l->l_map_done)
+ 	/* Already handled.  */
+ 	continue;
+ 
+@@ -204,12 +199,12 @@ _dl_close_worker (struct link_map *map, bool force)
+ 	  /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
+ 	     acquire is sufficient and correct.  */
+ 	  && atomic_load_acquire (&l->l_tls_dtor_count) == 0
+-	  && !used[done_index])
++	  && !l->l_map_used)
+ 	continue;
+ 
+       /* We need this object and we handle it now.  */
+-      done[done_index] = 1;
+-      used[done_index] = 1;
++      l->l_map_used = 1;
++      l->l_map_done = 1;
+       /* Signal the object is still needed.  */
+       l->l_idx = IDX_STILL_USED;
+ 
+@@ -225,9 +220,9 @@ _dl_close_worker (struct link_map *map, bool force)
+ 		{
+ 		  assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
+ 
+-		  if (!used[(*lp)->l_idx])
++		  if (!(*lp)->l_map_used)
+ 		    {
+-		      used[(*lp)->l_idx] = 1;
++		      (*lp)->l_map_used = 1;
+ 		      /* If we marked a new object as used, and we've
+ 			 already processed it, then we need to go back
+ 			 and process again from that point forward to
+@@ -250,9 +245,9 @@ _dl_close_worker (struct link_map *map, bool force)
+ 	      {
+ 		assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
+ 
+-		if (!used[jmap->l_idx])
++		if (!jmap->l_map_used)
+ 		  {
+-		    used[jmap->l_idx] = 1;
++		    jmap->l_map_used = 1;
+ 		    if (jmap->l_idx - 1 < done_index)
+ 		      done_index = jmap->l_idx - 1;
+ 		  }
+@@ -262,8 +257,7 @@ _dl_close_worker (struct link_map *map, bool force)
+ 
+   /* Sort the entries.  We can skip looking for the binary itself which is
+      at the front of the search list for the main namespace.  */
+-  _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE),
+-		 used + (nsid == LM_ID_BASE), true);
++  _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true);
+ 
+   /* Call all termination functions at once.  */
+   bool unload_any = false;
+@@ -277,7 +271,7 @@ _dl_close_worker (struct link_map *map, bool force)
+       /* All elements must be in the same namespace.  */
+       assert (imap->l_ns == nsid);
+ 
+-      if (!used[i])
++      if (!imap->l_map_used)
+ 	{
+ 	  assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
+ 
+@@ -315,7 +309,7 @@ _dl_close_worker (struct link_map *map, bool force)
+ 	  if (i < first_loaded)
+ 	    first_loaded = i;
+ 	}
+-      /* Else used[i].  */
++      /* Else imap->l_map_used.  */
+       else if (imap->l_type == lt_loaded)
+ 	{
+ 	  struct r_scope_elem *new_list = NULL;
+@@ -524,7 +518,7 @@ _dl_close_worker (struct link_map *map, bool force)
+   for (unsigned int i = first_loaded; i < nloaded; ++i)
+     {
+       struct link_map *imap = maps[i];
+-      if (!used[i])
++      if (!imap->l_map_used)
+ 	{
+ 	  assert (imap->l_type == lt_loaded);
+ 
+diff --git a/elf/dl-deps.c b/elf/dl-deps.c
+index 007069f670eced95..9365d54c8e03e5f4 100644
+--- a/elf/dl-deps.c
++++ b/elf/dl-deps.c
+@@ -612,10 +612,9 @@ Filters not supported with LD_TRACE_PRELINKING"));
+ 
+   /* If libc.so.6 is the main map, it participates in the sort, so
+      that the relocation order is correct regarding libc.so.6.  */
+-  if (l_initfini[0] == GL (dl_ns)[l_initfini[0]->l_ns].libc_map)
+-    _dl_sort_maps (l_initfini, nlist, NULL, false);
+-  else
+-    _dl_sort_maps (&l_initfini[1], nlist - 1, NULL, false);
++  _dl_sort_maps (l_initfini, nlist,
++		 (l_initfini[0] != GL (dl_ns)[l_initfini[0]->l_ns].libc_map),
++		 false);
+ 
+   /* Terminate the list of dependencies.  */
+   l_initfini[nlist] = NULL;
+diff --git a/elf/dl-fini.c b/elf/dl-fini.c
+index eea9d8aad736a99e..e14259a3c8806e0d 100644
+--- a/elf/dl-fini.c
++++ b/elf/dl-fini.c
+@@ -95,8 +95,7 @@ _dl_fini (void)
+ 	  /* Now we have to do the sorting.  We can skip looking for the
+ 	     binary itself which is at the front of the search list for
+ 	     the main namespace.  */
+-	  _dl_sort_maps (maps + (ns == LM_ID_BASE), nmaps - (ns == LM_ID_BASE),
+-			 NULL, true);
++	  _dl_sort_maps (maps, nmaps, (ns == LM_ID_BASE), true);
+ 
+ 	  /* We do not rely on the linked list of loaded object anymore
+ 	     from this point on.  We have our own list here (maps).  The
+diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
+index b2a01ede627be1e9..398a08f28c4d9ff1 100644
+--- a/elf/dl-sort-maps.c
++++ b/elf/dl-sort-maps.c
+@@ -16,16 +16,24 @@
+    License along with the GNU C Library; if not, see
+    <http://www.gnu.org/licenses/>.  */
+ 
++#include <assert.h>
+ #include <ldsodefs.h>
++#include <elf/dl-tunables.h>
+ 
++/* Note: this is the older, "original" sorting algorithm, being used as
++   default up to 2.35.
+ 
+-/* Sort array MAPS according to dependencies of the contained objects.
+-   Array USED, if non-NULL, is permutated along MAPS.  If FOR_FINI this is
+-   called for finishing an object.  */
+-void
+-_dl_sort_maps (struct link_map **maps, unsigned int nmaps, char *used,
+-	       bool for_fini)
++   Sort array MAPS according to dependencies of the contained objects.
++   If FOR_FINI is true, this is called for finishing an object.  */
++static void
++_dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
++			unsigned int skip, bool for_fini)
+ {
++  /* Allows caller to do the common optimization of skipping the first map,
++     usually the main binary.  */
++  maps += skip;
++  nmaps -= skip;
++
+   /* A list of one element need not be sorted.  */
+   if (nmaps <= 1)
+     return;
+@@ -66,14 +74,6 @@ _dl_sort_maps (struct link_map **maps, unsigned int nmaps, char *used,
+ 			   (k - i) * sizeof (maps[0]));
+ 		  maps[k] = thisp;
+ 
+-		  if (used != NULL)
+-		    {
+-		      char here_used = used[i];
+-		      memmove (&used[i], &used[i + 1],
+-			       (k - i) * sizeof (used[0]));
+-		      used[k] = here_used;
+-		    }
+-
+ 		  if (seen[i + 1] > nmaps - i)
+ 		    {
+ 		      ++i;
+@@ -120,3 +120,183 @@ _dl_sort_maps (struct link_map **maps, unsigned int nmaps, char *used,
+     next:;
+     }
+ }
++
++#if !HAVE_TUNABLES
++/* In this case, just default to the original algorithm.  */
++strong_alias (_dl_sort_maps_original, _dl_sort_maps);
++#else
++
++/* We use a recursive function due to its better clarity and ease of
++   implementation, as well as faster execution speed. We already use
++   alloca() for list allocation during the breadth-first search of
++   dependencies in _dl_map_object_deps(), and this should be on the
++   same order of worst-case stack usage.
++
++   Note: the '*rpo' parameter is supposed to point to one past the
++   last element of the array where we save the sort results, and is
++   decremented before storing the current map at each level.  */
++
++static void
++dfs_traversal (struct link_map ***rpo, struct link_map *map,
++	       bool *do_reldeps)
++{
++  if (map->l_visited)
++    return;
++
++  map->l_visited = 1;
++
++  if (map->l_initfini)
++    {
++      for (int i = 0; map->l_initfini[i] != NULL; i++)
++	{
++	  struct link_map *dep = map->l_initfini[i];
++	  if (dep->l_visited == 0
++	      && dep->l_main_map == 0)
++	    dfs_traversal (rpo, dep, do_reldeps);
++	}
++    }
++
++  if (__glibc_unlikely (do_reldeps != NULL && map->l_reldeps != NULL))
++    {
++      /* Indicate that we encountered relocation dependencies during
++	 traversal.  */
++      *do_reldeps = true;
++
++      for (int m = map->l_reldeps->act - 1; m >= 0; m--)
++	{
++	  struct link_map *dep = map->l_reldeps->list[m];
++	  if (dep->l_visited == 0
++	      && dep->l_main_map == 0)
++	    dfs_traversal (rpo, dep, do_reldeps);
++	}
++    }
++
++  *rpo -= 1;
++  **rpo = map;
++}
++
++/* Topologically sort array MAPS according to dependencies of the contained
++   objects.  */
++
++static void
++_dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
++		   unsigned int skip __attribute__ ((unused)), bool for_fini)
++{
++  for (int i = nmaps - 1; i >= 0; i--)
++    maps[i]->l_visited = 0;
++
++  /* We apply DFS traversal for each of maps[i] until the whole total order
++     is found and we're at the start of the Reverse-Postorder (RPO) sequence,
++     which is a topological sort.
++
++     We go from maps[nmaps - 1] backwards towards maps[0] at this level.
++     Due to the breadth-first search (BFS) ordering we receive, going
++     backwards usually gives a more shallow depth-first recursion depth,
++     adding more stack usage safety. Also, combined with the natural
++     processing order of l_initfini[] at each node during DFS, this maintains
++     an ordering closer to the original link ordering in the sorting results
++     under most simpler cases.
++
++     Another reason we order the top level backwards, it that maps[0] is
++     usually exactly the main object of which we're in the midst of
++     _dl_map_object_deps() processing, and maps[0]->l_initfini[] is still
++     blank. If we start the traversal from maps[0], since having no
++     dependencies yet filled in, maps[0] will always be immediately
++     incorrectly placed at the last place in the order (first in reverse).
++     Adjusting the order so that maps[0] is last traversed naturally avoids
++     this problem.
++
++     Further, the old "optimization" of skipping the main object at maps[0]
++     from the call-site (i.e. _dl_sort_maps(maps+1,nmaps-1)) is in general
++     no longer valid, since traversing along object dependency-links
++     may "find" the main object even when it is not included in the initial
++     order (e.g. a dlopen()'ed shared object can have circular dependencies
++     linked back to itself). In such a case, traversing N-1 objects will
++     create a N-object result, and raise problems.
++
++     To summarize, just passing in the full list, and iterating from back
++     to front makes things much more straightforward.  */
++
++  /* Array to hold RPO sorting results, before we copy back to maps[].  */
++  struct link_map *rpo[nmaps];
++
++  /* The 'head' position during each DFS iteration. Note that we start at
++     one past the last element due to first-decrement-then-store (see the
++     bottom of above dfs_traversal() routine).  */
++  struct link_map **rpo_head = &rpo[nmaps];
++
++  bool do_reldeps = false;
++  bool *do_reldeps_ref = (for_fini ? &do_reldeps : NULL);
++
++  for (int i = nmaps - 1; i >= 0; i--)
++    {
++      dfs_traversal (&rpo_head, maps[i], do_reldeps_ref);
++
++      /* We can break early if all objects are already placed.  */
++      if (rpo_head == rpo)
++	goto end;
++    }
++  assert (rpo_head == rpo);
++
++ end:
++  /* Here we may do a second pass of sorting, using only l_initfini[]
++     static dependency links. This is avoided if !FOR_FINI or if we didn't
++     find any reldeps in the first DFS traversal.
++
++     The reason we do this is: while it is unspecified how circular
++     dependencies should be handled, the presumed reasonable behavior is to
++     have destructors to respect static dependency links as much as possible,
++     overriding reldeps if needed. And the first sorting pass, which takes
++     l_initfini/l_reldeps links equally, may not preserve this priority.
++
++     Hence we do a 2nd sorting pass, taking only DT_NEEDED links into account
++     (see how the do_reldeps argument to dfs_traversal() is NULL below).  */
++  if (do_reldeps)
++    {
++      for (int i = nmaps - 1; i >= 0; i--)
++	rpo[i]->l_visited = 0;
++
++      struct link_map **maps_head = &maps[nmaps];
++      for (int i = nmaps - 1; i >= 0; i--)
++	{
++	  dfs_traversal (&maps_head, rpo[i], NULL);
++
++	  /* We can break early if all objects are already placed.
++	     The below memcpy is not needed in the do_reldeps case here,
++	     since we wrote back to maps[] during DFS traversal.  */
++	  if (maps_head == maps)
++	    return;
++	}
++      assert (maps_head == maps);
++      return;
++    }
++
++  memcpy (maps, rpo, sizeof (struct link_map *) * nmaps);
++}
++
++void
++_dl_sort_maps_init (void)
++{
++  int32_t algorithm = TUNABLE_GET (glibc, rtld, dynamic_sort, int32_t, NULL);
++  GLRO(dl_dso_sort_algo) = algorithm == 1 ? dso_sort_algorithm_original
++					  : dso_sort_algorithm_dfs;
++}
++
++void
++_dl_sort_maps (struct link_map **maps, unsigned int nmaps,
++	       unsigned int skip, bool for_fini)
++{
++  /* It can be tempting to use a static function pointer to store and call
++     the current selected sorting algorithm routine, but experimentation
++     shows that current processors still do not handle indirect branches
++     that efficiently, plus a static function pointer will involve
++     PTR_MANGLE/DEMANGLE, further impairing performance of small, common
++     input cases. A simple if-case with direct function calls appears to
++     be the fastest.  */
++  if (__glibc_likely (GLRO(dl_dso_sort_algo) == dso_sort_algorithm_original))
++    _dl_sort_maps_original (maps, nmaps, skip, for_fini);
++  else
++    _dl_sort_maps_dfs (maps, nmaps, skip, for_fini);
++}
++
++#endif /* HAVE_TUNABLES.  */
+diff --git a/elf/dl-support.c b/elf/dl-support.c
+index e9943e889ef447ad..ae03aec9764e29d3 100644
+--- a/elf/dl-support.c
++++ b/elf/dl-support.c
+@@ -155,6 +155,8 @@ size_t _dl_phnum;
+ uint64_t _dl_hwcap __attribute__ ((nocommon));
+ uint64_t _dl_hwcap2 __attribute__ ((nocommon));
+ 
++enum dso_sort_algorithm _dl_dso_sort_algo;
++
+ /* The value of the FPU control word the kernel will preset in hardware.  */
+ fpu_control_t _dl_fpu_control = _FPU_DEFAULT;
+ 
+diff --git a/elf/dl-sysdep.c b/elf/dl-sysdep.c
+index 998c5d52bcab8193..4e8a986541fc4c09 100644
+--- a/elf/dl-sysdep.c
++++ b/elf/dl-sysdep.c
+@@ -223,6 +223,9 @@ _dl_sysdep_start (void **start_argptr,
+ 
+   __tunables_init (_environ);
+ 
++  /* Initialize DSO sorting algorithm after tunables.  */
++  _dl_sort_maps_init ();
++
+ #ifdef DL_SYSDEP_INIT
+   DL_SYSDEP_INIT;
+ #endif
+diff --git a/elf/dl-tunables.list b/elf/dl-tunables.list
+index 6408a8e5ae92d2c6..54ef2a921310b229 100644
+--- a/elf/dl-tunables.list
++++ b/elf/dl-tunables.list
+@@ -140,4 +140,13 @@ glibc {
+       default: 512
+     }
+   }
++
++  rtld {
++    dynamic_sort {
++      type: INT_32
++      minval: 1
++      maxval: 2
++      default: 1
++    }
++  }
+ }
+diff --git a/elf/dso-sort-tests-1.def b/elf/dso-sort-tests-1.def
+index 873ddf55d91155c6..5f7f18ef270bc12d 100644
+--- a/elf/dso-sort-tests-1.def
++++ b/elf/dso-sort-tests-1.def
+@@ -62,5 +62,5 @@ output: b>a>{}<a<b
+ # The below expected outputs are what the two algorithms currently produce
+ # respectively, for regression testing purposes.
+ tst-bz15311: {+a;+e;+f;+g;+d;%d;-d;-g;-f;-e;-a};a->b->c->d;d=>[ba];c=>a;b=>e=>a;c=>f=>b;d=>g=>c
+-xfail_output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<a<c<d<g<f<b<e];}
++output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<a<c<d<g<f<b<e];}
+ output(glibc.rtld.dynamic_sort=2): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<g<f<a<b<c<d<e];}
+diff --git a/elf/rtld.c b/elf/rtld.c
+index b47e84ca2fb6f03c..cd2cc4024a3581c2 100644
+--- a/elf/rtld.c
++++ b/elf/rtld.c
+@@ -1453,6 +1453,9 @@ dl_main (const ElfW(Phdr) *phdr,
+       main_map->l_name = (char *) "";
+       *user_entry = main_map->l_entry;
+ 
++      /* Set bit indicating this is the main program map.  */
++      main_map->l_main_map = 1;
++
+ #ifdef HAVE_AUX_VECTOR
+       /* Adjust the on-stack auxiliary vector so that it looks like the
+ 	 binary was executed directly.  */
+diff --git a/elf/tst-rtld-list-tunables.exp b/elf/tst-rtld-list-tunables.exp
+index 4f3f7ee4e30a2b42..118afc271057afd4 100644
+--- a/elf/tst-rtld-list-tunables.exp
++++ b/elf/tst-rtld-list-tunables.exp
+@@ -10,5 +10,6 @@ glibc.malloc.tcache_max: 0x0 (min: 0x0, max: 0x[f]+)
+ glibc.malloc.tcache_unsorted_limit: 0x0 (min: 0x0, max: 0x[f]+)
+ glibc.malloc.top_pad: 0x0 (min: 0x0, max: 0x[f]+)
+ glibc.malloc.trim_threshold: 0x0 (min: 0x0, max: 0x[f]+)
++glibc.rtld.dynamic_sort: 1 (min: 1, max: 2)
+ glibc.rtld.nns: 0x4 (min: 0x1, max: 0x10)
+ glibc.rtld.optional_static_tls: 0x200 (min: 0x0, max: 0x[f]+)
+diff --git a/include/link.h b/include/link.h
+index dd491989beb41353..041ff5f753a9ee11 100644
+--- a/include/link.h
++++ b/include/link.h
+@@ -181,6 +181,11 @@ struct link_map
+     unsigned int l_init_called:1; /* Nonzero if DT_INIT function called.  */
+     unsigned int l_global:1;	/* Nonzero if object in _dl_global_scope.  */
+     unsigned int l_reserved:2;	/* Reserved for internal use.  */
++    unsigned int l_main_map:1;  /* Nonzero for the map of the main program.  */
++    unsigned int l_visited:1;   /* Used internally for map dependency
++				   graph traversal.  */
++    unsigned int l_map_used:1;  /* These two bits are used during traversal */
++    unsigned int l_map_done:1;  /* of maps in _dl_close_worker. */
+     unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
+ 					to by `l_phdr' is allocated.  */
+     unsigned int l_soname_added:1; /* Nonzero if the SONAME is for sure in
+diff --git a/manual/tunables.texi b/manual/tunables.texi
+index 43272cf885d1e3e6..c3f96cdc85208926 100644
+--- a/manual/tunables.texi
++++ b/manual/tunables.texi
+@@ -303,6 +303,17 @@ changed once allocated at process startup.  The default allocation of
+ optional static TLS is 512 bytes and is allocated in every thread.
+ @end deftp
+ 
++@deftp Tunable glibc.rtld.dynamic_sort
++Sets the algorithm to use for DSO sorting, valid values are @samp{1} and
++@samp{2}.  For value of @samp{1}, an older O(n^3) algorithm is used, which is
++long time tested, but may have performance issues when dependencies between
++shared objects contain cycles due to circular dependencies.  When set to the
++value of @samp{2}, a different algorithm is used, which implements a
++topological sort through depth-first search, and does not exhibit the
++performance issues of @samp{1}.
++
++The default value of this tunable is @samp{1}.
++@end deftp
+ 
+ @node Elision Tunables
+ @section Elision Tunables
+diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
+index 5e56550a4d556fa7..9f09a4a280396659 100644
+--- a/sysdeps/generic/ldsodefs.h
++++ b/sysdeps/generic/ldsodefs.h
+@@ -240,6 +240,13 @@ enum allowmask
+   };
+ 
+ 
++/* DSO sort algorithm to use (check dl-sort-maps.c).  */
++enum dso_sort_algorithm
++  {
++    dso_sort_algorithm_original,
++    dso_sort_algorithm_dfs
++  };
++
+ struct audit_ifaces
+ {
+   void (*activity) (uintptr_t *, unsigned int);
+@@ -633,6 +640,8 @@ struct rtld_global_ro
+      platforms.  */
+   EXTERN uint64_t _dl_hwcap2;
+ 
++  EXTERN enum dso_sort_algorithm _dl_dso_sort_algo;
++
+ #ifdef SHARED
+   /* We add a function table to _rtld_global which is then used to
+      call the function instead of going through the PLT.  The result
+@@ -1049,7 +1058,7 @@ extern void _dl_fini (void) attribute_hidden;
+ 
+ /* Sort array MAPS according to dependencies of the contained objects.  */
+ extern void _dl_sort_maps (struct link_map **maps, unsigned int nmaps,
+-			   char *used, bool for_fini) attribute_hidden;
++			   unsigned int skip, bool for_fini) attribute_hidden;
+ 
+ /* The dynamic linker calls this function before and having changing
+    any shared object mappings.  The `r_state' member of `struct r_debug'
+@@ -1167,6 +1176,9 @@ extern struct link_map * _dl_get_dl_main_map (void)
+ # endif
+ #endif
+ 
++/* Initialize the DSO sort algorithm to use.  */
++extern void _dl_sort_maps_init (void) attribute_hidden;
++
+ /* Initialization of libpthread for statically linked applications.
+    If libpthread is not linked in, this is an empty function.  */
+ void __pthread_initialize_minimal (void) weak_function;
diff --git a/SOURCES/glibc-rh1159809-4.patch b/SOURCES/glibc-rh1159809-4.patch
new file mode 100644
index 0000000..e47b934
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-4.patch
@@ -0,0 +1,25 @@
+commit d3bf2f5927d51258a51ac7fde04f4805f8ee294a
+Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date:   Wed Nov 3 09:19:30 2021 -0300
+
+    elf: Do not run DSO sorting if tunables is not enabled
+    
+    Since the argorithm selection requires tunables.
+    
+    Checked on x86_64-linux-gnu with --enable-tunables=no.
+
+diff --git a/elf/Makefile b/elf/Makefile
+index e92f62f279566684..3b5e1f59e6696a2b 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -998,8 +998,10 @@ include $(objpfx)$(1).generated-makefile
+ endef
+ 
+ # Generate from each testcase description file
++ifeq (yes,$(have-tunables))
+ $(eval $(call include_dsosort_tests,dso-sort-tests-1.def))
+ $(eval $(call include_dsosort_tests,dso-sort-tests-2.def))
++endif
+ 
+ check-abi: $(objpfx)check-abi-ld.out
+ tests-special += $(objpfx)check-abi-ld.out
diff --git a/SOURCES/glibc-rh1159809-5.patch b/SOURCES/glibc-rh1159809-5.patch
new file mode 100644
index 0000000..dcd7703
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-5.patch
@@ -0,0 +1,45 @@
+commit 1f67d8286b5da9266a138198ef1f15c27cbb0010
+Author: H.J. Lu <hjl.tools@gmail.com>
+Date:   Mon Nov 15 16:28:39 2021 -0800
+
+    elf: Use a temporary file to generate Makefile fragments [BZ #28550]
+    
+    1. Use a temporary file to generate Makefile fragments for DSO sorting
+    tests and use -include on them.
+    2. Add Makefile fragments to postclean-generated so that a "make clean"
+    removes the autogenerated fragments and a subsequent "make" regenerates
+    them.
+    
+    This partially fixes BZ #28550.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/elf/Makefile b/elf/Makefile
+index 3b5e1f59e6696a2b..22a8060f7d3bb1a1 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -986,6 +986,7 @@ tests-special += \
+   # tests-special
+ endif
+ 
++ifndef avoid-generated
+ # DSO sorting tests:
+ # The dso-ordering-test.py script generates testcase source files in $(objpfx),
+ # creating a $(objpfx)<testcase-name>-dir for each testcase, and creates a
+@@ -993,9 +994,14 @@ endif
+ define include_dsosort_tests
+ $(objpfx)$(1).generated-makefile: $(1)
+ 	$(PYTHON) $(..)scripts/dso-ordering-test.py \
+-	--description-file $$< --objpfx $(objpfx) --output-makefile $$@
+-include $(objpfx)$(1).generated-makefile
++	--description-file $$< --objpfx $(objpfx) --output-makefile $$@T
++	mv $$@T $$@
++-include $(objpfx)$(1).generated-makefile
+ endef
++endif
++
++postclean-generated += $(objpfx)/dso-sort-tests-2.generated-makefile \
++		       $(objpfx)/dso-sort-tests-2.generated-makefile
+ 
+ # Generate from each testcase description file
+ ifeq (yes,$(have-tunables))
diff --git a/SOURCES/glibc-rh1159809-6.patch b/SOURCES/glibc-rh1159809-6.patch
new file mode 100644
index 0000000..df6d0e2
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-6.patch
@@ -0,0 +1,49 @@
+commit 0884724a95b60452ad483dbe086d237d02ba624d
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Tue Dec 14 12:37:44 2021 +0100
+
+    elf: Use new dependency sorting algorithm by default
+    
+    The default has to change eventually, and there are no known failures
+    that require a delay.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/elf/dl-tunables.list b/elf/dl-tunables.list
+index 54ef2a921310b229..f11ca5b3e8b09b43 100644
+--- a/elf/dl-tunables.list
++++ b/elf/dl-tunables.list
+@@ -146,7 +146,7 @@ glibc {
+       type: INT_32
+       minval: 1
+       maxval: 2
+-      default: 1
++      default: 2
+     }
+   }
+ }
+diff --git a/elf/tst-rtld-list-tunables.exp b/elf/tst-rtld-list-tunables.exp
+index 118afc271057afd4..478ee8ab091685eb 100644
+--- a/elf/tst-rtld-list-tunables.exp
++++ b/elf/tst-rtld-list-tunables.exp
+@@ -10,6 +10,6 @@ glibc.malloc.tcache_max: 0x0 (min: 0x0, max: 0x[f]+)
+ glibc.malloc.tcache_unsorted_limit: 0x0 (min: 0x0, max: 0x[f]+)
+ glibc.malloc.top_pad: 0x0 (min: 0x0, max: 0x[f]+)
+ glibc.malloc.trim_threshold: 0x0 (min: 0x0, max: 0x[f]+)
+-glibc.rtld.dynamic_sort: 1 (min: 1, max: 2)
++glibc.rtld.dynamic_sort: 2 (min: 1, max: 2)
+ glibc.rtld.nns: 0x4 (min: 0x1, max: 0x10)
+ glibc.rtld.optional_static_tls: 0x200 (min: 0x0, max: 0x[f]+)
+diff --git a/manual/tunables.texi b/manual/tunables.texi
+index c3f96cdc85208926..7b70e80391ee87f7 100644
+--- a/manual/tunables.texi
++++ b/manual/tunables.texi
+@@ -312,7 +312,7 @@ value of @samp{2}, a different algorithm is used, which implements a
+ topological sort through depth-first search, and does not exhibit the
+ performance issues of @samp{1}.
+ 
+-The default value of this tunable is @samp{1}.
++The default value of this tunable is @samp{2}.
+ @end deftp
+ 
+ @node Elision Tunables
diff --git a/SOURCES/glibc-rh1159809-7.patch b/SOURCES/glibc-rh1159809-7.patch
new file mode 100644
index 0000000..c396a5c
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-7.patch
@@ -0,0 +1,357 @@
+commit 3a0588ae48fb35384a6bd33f9b66403badfa1262
+Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date:   Tue Feb 8 15:22:49 2022 -0300
+
+    elf: Fix DFS sorting algorithm for LD_TRACE_LOADED_OBJECTS with missing libraries (BZ #28868)
+
+    On _dl_map_object the underlying file is not opened in trace mode
+    (in other cases where the underlying file can't be opened,
+    _dl_map_object  quits with an error).  If there any missing libraries
+    being processed, they will not be considered on final nlist size
+    passed on _dl_sort_maps later in the function.  And it is then used by
+    _dl_sort_maps_dfs on the stack allocated working maps:
+
+    222   /* Array to hold RPO sorting results, before we copy back to  maps[].  */
+    223   struct link_map *rpo[nmaps];
+    224
+    225   /* The 'head' position during each DFS iteration. Note that we start at
+    226      one past the last element due to first-decrement-then-store (see the
+    227      bottom of above dfs_traversal() routine).  */
+    228   struct link_map **rpo_head = &rpo[nmaps];
+
+    However while transversing the 'l_initfini' on dfs_traversal it will
+    still consider the l_faked maps and thus update rpo more times than the
+    allocated working 'rpo', overflowing the stack object.
+
+    As suggested in bugzilla, one option would be to avoid sorting the maps
+    for trace mode.  However I think ignoring l_faked object does make
+    sense (there is one less constraint to call the sorting function), it
+    allows a slight less stack usage for trace, and it is slight simpler
+    solution.
+
+    The tests does trigger the stack overflow, however I tried to make
+    it more generic to check different scenarios or missing objects.
+
+    Checked on x86_64-linux-gnu.
+
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+Conflicts:
+	elf/Makefile
+	  (differences in backported tests)
+
+diff --git a/elf/Makefile b/elf/Makefile
+index 22a8060f7d3bb1a1..634c3113227d64a6 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -584,6 +584,11 @@ modules-names = \
+   libmarkermod5-3 \
+   libmarkermod5-4 \
+   libmarkermod5-5 \
++  libtracemod1-1 \
++  libtracemod2-1 \
++  libtracemod3-1 \
++  libtracemod4-1 \
++  libtracemod5-1 \
+   ltglobmod1 \
+   ltglobmod2 \
+   neededobj1 \
+@@ -983,6 +988,11 @@ tests-special += \
+   $(objpfx)tst-initorder2-cmp.out \
+   $(objpfx)tst-unused-dep-cmp.out \
+   $(objpfx)tst-unused-dep.out \
++  $(objpfx)tst-trace1.out \
++  $(objpfx)tst-trace2.out \
++  $(objpfx)tst-trace3.out \
++  $(objpfx)tst-trace4.out \
++  $(objpfx)tst-trace5.out \
+   # tests-special
+ endif
+ 
+@@ -2619,6 +2629,51 @@ $(objpfx)tst-rtld-run-static.out: $(objpfx)/ldconfig
+ 
+ $(objpfx)tst-dlmopen-gethostbyname: $(libdl)
+ $(objpfx)tst-dlmopen-gethostbyname.out: $(objpfx)tst-dlmopen-gethostbyname-mod.so
++
++LDFLAGS-libtracemod1-1.so += -Wl,-soname,libtracemod1.so
++LDFLAGS-libtracemod2-1.so += -Wl,-soname,libtracemod2.so
++LDFLAGS-libtracemod3-1.so += -Wl,-soname,libtracemod3.so
++LDFLAGS-libtracemod4-1.so += -Wl,-soname,libtracemod4.so
++LDFLAGS-libtracemod5-1.so += -Wl,-soname,libtracemod5.so
++
++$(objpfx)libtracemod1-1.so: $(objpfx)libtracemod2-1.so \
++			    $(objpfx)libtracemod3-1.so
++$(objpfx)libtracemod2-1.so: $(objpfx)libtracemod4-1.so \
++			    $(objpfx)libtracemod5-1.so
++
++define libtracemod-x
++$(objpfx)libtracemod$(1)/libtracemod$(1).so: $(objpfx)libtracemod$(1)-1.so
++	$$(make-target-directory)
++	cp $$< $$@
++endef
++libtracemod-suffixes = 1 2 3 4 5
++$(foreach i,$(libtracemod-suffixes), $(eval $(call libtracemod-x,$(i))))
++
++define tst-trace-skeleton
++$(objpfx)tst-trace$(1).out: $(objpfx)libtracemod1/libtracemod1.so \
++			    $(objpfx)libtracemod2/libtracemod2.so \
++			    $(objpfx)libtracemod3/libtracemod3.so \
++			    $(objpfx)libtracemod4/libtracemod4.so \
++			    $(objpfx)libtracemod5/libtracemod5.so \
++			    $(..)scripts/tst-ld-trace.py \
++			    tst-trace$(1).exp
++	${ $(PYTHON) $(..)scripts/tst-ld-trace.py \
++	    "$(test-wrapper-env) $(elf-objpfx)$(rtld-installed-name) \
++	    --library-path $(common-objpfx):$(strip $(2)) \
++	    $(objpfx)libtracemod1/libtracemod1.so" tst-trace$(1).exp \
++	} > $$@; $$(evaluate-test)
++endef
++
++$(eval $(call tst-trace-skeleton,1,))
++$(eval $(call tst-trace-skeleton,2,\
++	$(objpfx)libtracemod2))
++$(eval $(call tst-trace-skeleton,3,\
++	$(objpfx)libtracemod2:$(objpfx)libtracemod3))
++$(eval $(call tst-trace-skeleton,4,\
++	$(objpfx)libtracemod2:$(objpfx)libtracemod3:$(objpfx)libtracemod4))
++$(eval $(call tst-trace-skeleton,5,\
++	$(objpfx)libtracemod2:$(objpfx)libtracemod3:$(objpfx)libtracemod4:$(objpfx)libtracemod5))
++
+ $(objpfx)tst-audit-tlsdesc: $(objpfx)tst-audit-tlsdesc-mod1.so \
+ 			    $(objpfx)tst-audit-tlsdesc-mod2.so \
+ 			    $(shared-thread-library)
+diff --git a/elf/dl-deps.c b/elf/dl-deps.c
+index 9365d54c8e03e5f4..9ff589c8562b2dd1 100644
+--- a/elf/dl-deps.c
++++ b/elf/dl-deps.c
+@@ -489,6 +489,8 @@ _dl_map_object_deps (struct link_map *map,
+ 
+   for (nlist = 0, runp = known; runp; runp = runp->next)
+     {
++      /* _dl_sort_maps ignores l_faked object, so it is safe to not consider
++	 them for nlist.  */
+       if (__builtin_expect (trace_mode, 0) && runp->map->l_faked)
+ 	/* This can happen when we trace the loading.  */
+ 	--map->l_searchlist.r_nlist;
+diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
+index 398a08f28c4d9ff1..99354dc08a010dd3 100644
+--- a/elf/dl-sort-maps.c
++++ b/elf/dl-sort-maps.c
+@@ -140,7 +140,9 @@ static void
+ dfs_traversal (struct link_map ***rpo, struct link_map *map,
+ 	       bool *do_reldeps)
+ {
+-  if (map->l_visited)
++  /* _dl_map_object_deps ignores l_faked objects when calculating the
++     number of maps before calling _dl_sort_maps, ignore them as well.  */
++  if (map->l_visited || map->l_faked)
+     return;
+ 
+   map->l_visited = 1;
+diff --git a/elf/libtracemod1-1.c b/elf/libtracemod1-1.c
+new file mode 100644
+index 0000000000000000..7c89c9a5a40b9668
+--- /dev/null
++++ b/elf/libtracemod1-1.c
+@@ -0,0 +1 @@
++/* Empty  */
+diff --git a/elf/libtracemod2-1.c b/elf/libtracemod2-1.c
+new file mode 100644
+index 0000000000000000..7c89c9a5a40b9668
+--- /dev/null
++++ b/elf/libtracemod2-1.c
+@@ -0,0 +1 @@
++/* Empty  */
+diff --git a/elf/libtracemod3-1.c b/elf/libtracemod3-1.c
+new file mode 100644
+index 0000000000000000..7c89c9a5a40b9668
+--- /dev/null
++++ b/elf/libtracemod3-1.c
+@@ -0,0 +1 @@
++/* Empty  */
+diff --git a/elf/libtracemod4-1.c b/elf/libtracemod4-1.c
+new file mode 100644
+index 0000000000000000..7c89c9a5a40b9668
+--- /dev/null
++++ b/elf/libtracemod4-1.c
+@@ -0,0 +1 @@
++/* Empty  */
+diff --git a/elf/libtracemod5-1.c b/elf/libtracemod5-1.c
+new file mode 100644
+index 0000000000000000..7c89c9a5a40b9668
+--- /dev/null
++++ b/elf/libtracemod5-1.c
+@@ -0,0 +1 @@
++/* Empty  */
+diff --git a/elf/tst-trace1.exp b/elf/tst-trace1.exp
+new file mode 100644
+index 0000000000000000..4a6f5211a68fe2c8
+--- /dev/null
++++ b/elf/tst-trace1.exp
+@@ -0,0 +1,4 @@
++ld 1
++libc 1
++libtracemod2.so 0
++libtracemod3.so 0
+diff --git a/elf/tst-trace2.exp b/elf/tst-trace2.exp
+new file mode 100644
+index 0000000000000000..e13506e2eb9aeca2
+--- /dev/null
++++ b/elf/tst-trace2.exp
+@@ -0,0 +1,6 @@
++ld 1
++libc 1
++libtracemod2.so 1
++libtracemod3.so 0
++libtracemod4.so 0
++libtracemod5.so 0
+diff --git a/elf/tst-trace3.exp b/elf/tst-trace3.exp
+new file mode 100644
+index 0000000000000000..e574549d12a53d72
+--- /dev/null
++++ b/elf/tst-trace3.exp
+@@ -0,0 +1,6 @@
++ld 1
++libc 1
++libtracemod2.so 1
++libtracemod3.so 1
++libtracemod4.so 0
++libtracemod5.so 0
+diff --git a/elf/tst-trace4.exp b/elf/tst-trace4.exp
+new file mode 100644
+index 0000000000000000..31ca97b35bde0009
+--- /dev/null
++++ b/elf/tst-trace4.exp
+@@ -0,0 +1,6 @@
++ld 1
++libc 1
++libtracemod2.so 1
++libtracemod3.so 1
++libtracemod4.so 1
++libtracemod5.so 0
+diff --git a/elf/tst-trace5.exp b/elf/tst-trace5.exp
+new file mode 100644
+index 0000000000000000..5d7d95372656396f
+--- /dev/null
++++ b/elf/tst-trace5.exp
+@@ -0,0 +1,6 @@
++ld 1
++libc 1
++libtracemod2.so 1
++libtracemod3.so 1
++libtracemod4.so 1
++libtracemod5.so 1
+diff --git a/scripts/tst-ld-trace.py b/scripts/tst-ld-trace.py
+new file mode 100755
+index 0000000000000000..f5a402800377f44b
+--- /dev/null
++++ b/scripts/tst-ld-trace.py
+@@ -0,0 +1,108 @@
++#!/usr/bin/python3
++# Dump the output of LD_TRACE_LOADED_OBJECTS in architecture neutral format.
++# Copyright (C) 2022 Free Software Foundation, Inc.
++# Copyright The GNU Toolchain Authors.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++import argparse
++import os
++import subprocess
++import sys
++
++try:
++    subprocess.run
++except:
++    class _CompletedProcess:
++        def __init__(self, args, returncode, stdout=None, stderr=None):
++            self.args = args
++            self.returncode = returncode
++            self.stdout = stdout
++            self.stderr = stderr
++
++    def _run(*popenargs, input=None, timeout=None, check=False, **kwargs):
++        assert(timeout is None)
++        with subprocess.Popen(*popenargs, **kwargs) as process:
++            try:
++                stdout, stderr = process.communicate(input)
++            except:
++                process.kill()
++                process.wait()
++                raise
++            returncode = process.poll()
++            if check and returncode:
++                raise subprocess.CalledProcessError(returncode, popenargs)
++        return _CompletedProcess(popenargs, returncode, stdout, stderr)
++
++    subprocess.run = _run
++
++def is_vdso(lib):
++    return lib.startswith('linux-gate') or lib.startswith('linux-vdso')
++
++
++def parse_trace(cmd, fref):
++    new_env = os.environ.copy()
++    new_env['LD_TRACE_LOADED_OBJECTS'] = '1'
++    trace_out = subprocess.run(cmd, stdout=subprocess.PIPE, check=True,
++                               universal_newlines=True, env=new_env).stdout
++    trace = []
++    for line in trace_out.splitlines():
++        line = line.strip()
++        if is_vdso(line):
++            continue
++        fields = line.split('=>' if '=>' in line else ' ')
++        lib = os.path.basename(fields[0].strip())
++        if lib.startswith('ld'):
++            lib = 'ld'
++        elif lib.startswith('libc'):
++            lib = 'libc'
++        found = 1 if fields[1].strip() != 'not found' else 0
++        trace += ['{} {}'.format(lib, found)]
++    trace = sorted(trace)
++
++    reference = sorted(line.replace('\n','') for line in fref.readlines())
++
++    ret = 0 if trace == reference else 1
++    if ret != 0:
++        for i in reference:
++            if i not in trace:
++                print("Only in {}: {}".format(fref.name, i))
++        for i in trace:
++            if i not in reference:
++                print("Only in trace: {}".format(i))
++
++    sys.exit(ret)
++
++
++def get_parser():
++    parser = argparse.ArgumentParser(description=__doc__)
++    parser.add_argument('command',
++                        help='comand to run')
++    parser.add_argument('reference',
++                        help='reference file to compare')
++    return parser
++
++
++def main(argv):
++    parser = get_parser()
++    opts = parser.parse_args(argv)
++    with open(opts.reference, 'r') as fref:
++        # Remove the initial 'env' command.
++        parse_trace(opts.command.split()[1:], fref)
++
++
++if __name__ == '__main__':
++    main(sys.argv[1:])
diff --git a/SOURCES/glibc-rh1159809-8.patch b/SOURCES/glibc-rh1159809-8.patch
new file mode 100644
index 0000000..4c76782
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-8.patch
@@ -0,0 +1,36 @@
+commit a2211c76c3b994099fd58a06d6072d7495d699cd
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Fri Mar 18 18:18:35 2022 +0100
+
+    scripts/dso-ordering-test.py: Fix C&P error in * callrefs processing
+    
+    The elf/dso-sort-tests-src subdirectory is not changed by this commit,
+    so it seems that the cut-and-paste error was not material.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/scripts/dso-ordering-test.py b/scripts/dso-ordering-test.py
+index bde0406be9da14fc..ee476c810c76f1b0 100644
+--- a/scripts/dso-ordering-test.py
++++ b/scripts/dso-ordering-test.py
+@@ -551,17 +551,17 @@ def process_testcase(t):
+         if obj in t.deps:
+             deps = t.deps[obj]
+             if '*' in deps:
+-                t.deps[obj].remove('*')
++                deps.remove('*')
+                 t.add_deps([obj], non_dep_tgt_objs)
+         if obj in t.callrefs:
+             deps = t.callrefs[obj]
+             if '*' in deps:
+-                t.deps[obj].remove('*')
++                deps.remove('*')
+                 t.add_callrefs([obj], non_dep_tgt_objs)
+     if "#" in t.deps:
+         deps = t.deps["#"]
+         if '*' in deps:
+-            t.deps["#"].remove('*')
++            deps.remove('*')
+             t.add_deps(["#"], non_dep_tgt_objs)
+ 
+     # If no main program was specified in dependency description, make a
diff --git a/SOURCES/glibc-rh1159809-9.patch b/SOURCES/glibc-rh1159809-9.patch
new file mode 100644
index 0000000..b621c3e
--- /dev/null
+++ b/SOURCES/glibc-rh1159809-9.patch
@@ -0,0 +1,37 @@
+commit 183d99737298bb3200f0610fdcd1c7549c8ed560
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Tue Sep 6 07:38:10 2022 +0200
+
+    scripts/dso-ordering-test.py: Generate program run-time dependencies
+    
+    The main program needs to depend on all shared objects, even objects
+    that have link-time dependencies among shared objects.  Filtering
+    out shared objects that already have an link-time dependencies is not
+    necessary here; make will do this automatically.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/scripts/dso-ordering-test.py b/scripts/dso-ordering-test.py
+index ee476c810c76f1b0..43b5ec4d920ad6a3 100644
+--- a/scripts/dso-ordering-test.py
++++ b/scripts/dso-ordering-test.py
+@@ -707,13 +707,12 @@ def process_testcase(t):
+                 "\t$(compile.c) $(OUTPUT_OPTION)\n")
+         makefile.write (rule)
+ 
+-        not_depended_objs = find_objs_not_depended_on(test_descr)
+-        if not_depended_objs:
+-            depstr = ""
+-            for dep in not_depended_objs:
+-                depstr += (" $(objpfx)" + test_subdir + "/"
+-                           + test_name + "-" + dep + ".so")
+-            makefile.write("$(objpfx)%s.out:%s\n" % (base_test_name, depstr))
++        # Ensure that all shared objects are built before running the
++        # test, whether there link-time dependencies or not.
++        depobjs = ["$(objpfx){}/{}-{}.so".format(test_subdir, test_name, dep)
++                   for dep in test_descr.objs]
++        makefile.write("$(objpfx){}.out: {}\n".format(
++            base_test_name, " ".join(depobjs)))
+ 
+         # Add main executable to test-srcs
+         makefile.write("test-srcs += %s/%s\n" % (test_subdir, test_name))
diff --git a/SOURCES/glibc-rh1871383-1.patch b/SOURCES/glibc-rh1871383-1.patch
new file mode 100644
index 0000000..67c88fd
--- /dev/null
+++ b/SOURCES/glibc-rh1871383-1.patch
@@ -0,0 +1,245 @@
+From a1a486d70ebcc47a686ff5846875eacad0940e41 Mon Sep 17 00:00:00 2001
+From: Eyal Itkin <eyalit@checkpoint.com>
+Date: Fri, 20 Mar 2020 21:19:17 +0200
+Subject: Add Safe-Linking to fastbins and tcache
+
+Safe-Linking is a security mechanism that protects single-linked
+lists (such as the fastbin and tcache) from being tampered by attackers.
+The mechanism makes use of randomness from ASLR (mmap_base), and when
+combined with chunk alignment integrity checks, it protects the "next"
+pointers from being hijacked by an attacker.
+
+While Safe-Unlinking protects double-linked lists (such as the small
+bins), there wasn't any similar protection for attacks against
+single-linked lists. This solution protects against 3 common attacks:
+  * Partial pointer override: modifies the lower bytes (Little Endian)
+  * Full pointer override: hijacks the pointer to an attacker's location
+  * Unaligned chunks: pointing the list to an unaligned address
+
+The design assumes an attacker doesn't know where the heap is located,
+and uses the ASLR randomness to "sign" the single-linked pointers. We
+mark the pointer as P and the location in which it is stored as L, and
+the calculation will be:
+  * PROTECT(P) := (L >> PAGE_SHIFT) XOR (P)
+  * *L = PROTECT(P)
+
+This way, the random bits from the address L (which start at the bit
+in the PAGE_SHIFT position), will be merged with LSB of the stored
+protected pointer. This protection layer prevents an attacker from
+modifying the pointer into a controlled value.
+
+An additional check that the chunks are MALLOC_ALIGNed adds an
+important layer:
+  * Attackers can't point to illegal (unaligned) memory addresses
+  * Attackers must guess correctly the alignment bits
+
+On standard 32 bit Linux machines, an attack will directly fail 7
+out of 8 times, and on 64 bit machines it will fail 15 out of 16
+times.
+
+This proposed patch was benchmarked and it's effect on the overall
+performance of the heap was negligible and couldn't be distinguished
+from the default variance between tests on the vanilla version. A
+similar protection was added to Chromium's version of TCMalloc
+in 2012, and according to their documentation it had an overhead of
+less than 2%.
+
+Reviewed-by: DJ Delorie <dj@redhat.com>
+Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+Reviewed-by: Adhemerval Zacnella <adhemerval.zanella@linaro.org>
+
+diff --git a/malloc/malloc.c b/malloc/malloc.c
+index f7cd29bc2f..1282863681 100644
+--- a/malloc/malloc.c
++++ b/malloc/malloc.c
+@@ -327,6 +327,18 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
+ # define MAX_TCACHE_COUNT UINT16_MAX
+ #endif
+ 
++/* Safe-Linking:
++   Use randomness from ASLR (mmap_base) to protect single-linked lists
++   of Fast-Bins and TCache.  That is, mask the "next" pointers of the
++   lists' chunks, and also perform allocation alignment checks on them.
++   This mechanism reduces the risk of pointer hijacking, as was done with
++   Safe-Unlinking in the double-linked lists of Small-Bins.
++   It assumes a minimum page size of 4096 bytes (12 bits).  Systems with
++   larger pages provide less entropy, although the pointer mangling
++   still works.  */
++#define PROTECT_PTR(pos, ptr) \
++  ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
++#define REVEAL_PTR(ptr)  PROTECT_PTR (&ptr, ptr)
+ 
+ /*
+   REALLOC_ZERO_BYTES_FREES should be set if a call to
+@@ -2157,12 +2169,15 @@ do_check_malloc_state (mstate av)
+ 
+       while (p != 0)
+         {
++	  if (__glibc_unlikely (!aligned_OK (p)))
++	    malloc_printerr ("do_check_malloc_state(): " \
++			     "unaligned fastbin chunk detected");
+           /* each chunk claims to be inuse */
+           do_check_inuse_chunk (av, p);
+           total += chunksize (p);
+           /* chunk belongs in this bin */
+           assert (fastbin_index (chunksize (p)) == i);
+-          p = p->fd;
++	  p = REVEAL_PTR (p->fd);
+         }
+     }
+ 
+@@ -2923,7 +2938,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx)
+      detect a double free.  */
+   e->key = tcache;
+ 
+-  e->next = tcache->entries[tc_idx];
++  e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
+   tcache->entries[tc_idx] = e;
+   ++(tcache->counts[tc_idx]);
+ }
+@@ -2934,9 +2949,11 @@ static __always_inline void *
+ tcache_get (size_t tc_idx)
+ {
+   tcache_entry *e = tcache->entries[tc_idx];
+-  tcache->entries[tc_idx] = e->next;
++  tcache->entries[tc_idx] = REVEAL_PTR (e->next);
+   --(tcache->counts[tc_idx]);
+   e->key = NULL;
++  if (__glibc_unlikely (!aligned_OK (e)))
++    malloc_printerr ("malloc(): unaligned tcache chunk detected");
+   return (void *) e;
+ }
+ 
+@@ -2960,7 +2977,10 @@ tcache_thread_shutdown (void)
+       while (tcache_tmp->entries[i])
+ 	{
+ 	  tcache_entry *e = tcache_tmp->entries[i];
+-	  tcache_tmp->entries[i] = e->next;
++      if (__glibc_unlikely (!aligned_OK (e)))
++	malloc_printerr ("tcache_thread_shutdown(): " \
++			 "unaligned tcache chunk detected");
++	  tcache_tmp->entries[i] = REVEAL_PTR (e->next);
+ 	  __libc_free (e);
+ 	}
+     }
+@@ -3570,8 +3590,11 @@ _int_malloc (mstate av, size_t bytes)
+       victim = pp;					\
+       if (victim == NULL)				\
+ 	break;						\
++      pp = REVEAL_PTR (victim->fd);                                     \
++      if (__glibc_unlikely (!aligned_OK (pp)))                          \
++	malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
+     }							\
+-  while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
++  while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
+ 	 != victim);					\
+ 
+   if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
+@@ -3583,8 +3606,11 @@ _int_malloc (mstate av, size_t bytes)
+ 
+       if (victim != NULL)
+ 	{
++	  if (__glibc_unlikely (!aligned_OK (victim)))
++	    malloc_printerr ("malloc(): unaligned fastbin chunk detected");
++
+ 	  if (SINGLE_THREAD_P)
+-	    *fb = victim->fd;
++	    *fb = REVEAL_PTR (victim->fd);
+ 	  else
+ 	    REMOVE_FB (fb, pp, victim);
+ 	  if (__glibc_likely (victim != NULL))
+@@ -3605,8 +3631,10 @@ _int_malloc (mstate av, size_t bytes)
+ 		  while (tcache->counts[tc_idx] < mp_.tcache_count
+ 			 && (tc_victim = *fb) != NULL)
+ 		    {
++		      if (__glibc_unlikely (!aligned_OK (tc_victim)))
++			malloc_printerr ("malloc(): unaligned fastbin chunk detected");
+ 		      if (SINGLE_THREAD_P)
+-			*fb = tc_victim->fd;
++			*fb = REVEAL_PTR (tc_victim->fd);
+ 		      else
+ 			{
+ 			  REMOVE_FB (fb, pp, tc_victim);
+@@ -4196,11 +4224,15 @@ _int_free (mstate av, mchunkptr p, int have_lock)
+ 	    LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
+ 	    for (tmp = tcache->entries[tc_idx];
+ 		 tmp;
+-		 tmp = tmp->next)
++		 tmp = REVEAL_PTR (tmp->next))
++        {
++	      if (__glibc_unlikely (!aligned_OK (tmp)))
++		malloc_printerr ("free(): unaligned chunk detected in tcache 2");
+ 	      if (tmp == e)
+ 		malloc_printerr ("free(): double free detected in tcache 2");
+ 	    /* If we get here, it was a coincidence.  We've wasted a
+ 	       few cycles, but don't abort.  */
++        }
+ 	  }
+ 
+ 	if (tcache->counts[tc_idx] < mp_.tcache_count)
+@@ -4264,7 +4296,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
+ 	   add (i.e., double free).  */
+ 	if (__builtin_expect (old == p, 0))
+ 	  malloc_printerr ("double free or corruption (fasttop)");
+-	p->fd = old;
++	p->fd = PROTECT_PTR (&p->fd, old);
+ 	*fb = p;
+       }
+     else
+@@ -4274,7 +4306,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
+ 	     add (i.e., double free).  */
+ 	  if (__builtin_expect (old == p, 0))
+ 	    malloc_printerr ("double free or corruption (fasttop)");
+-	  p->fd = old2 = old;
++	  old2 = old;
++	  p->fd = PROTECT_PTR (&p->fd, old);
+ 	}
+       while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
+ 	     != old2);
+@@ -4472,13 +4505,17 @@ static void malloc_consolidate(mstate av)
+     if (p != 0) {
+       do {
+ 	{
++	  if (__glibc_unlikely (!aligned_OK (p)))
++	    malloc_printerr ("malloc_consolidate(): " \
++			     "unaligned fastbin chunk detected");
++
+ 	  unsigned int idx = fastbin_index (chunksize (p));
+ 	  if ((&fastbin (av, idx)) != fb)
+ 	    malloc_printerr ("malloc_consolidate(): invalid chunk size");
+ 	}
+ 
+ 	check_inuse_chunk(av, p);
+-	nextp = p->fd;
++	nextp = REVEAL_PTR (p->fd);
+ 
+ 	/* Slightly streamlined version of consolidation code in free() */
+ 	size = chunksize (p);
+@@ -4896,8 +4933,13 @@ int_mallinfo (mstate av, struct mallinfo *m)
+ 
+   for (i = 0; i < NFASTBINS; ++i)
+     {
+-      for (p = fastbin (av, i); p != 0; p = p->fd)
++      for (p = fastbin (av, i);
++	   p != 0;
++	   p = REVEAL_PTR (p->fd))
+         {
++	  if (__glibc_unlikely (!aligned_OK (p)))
++	    malloc_printerr ("int_mallinfo(): " \
++			     "unaligned fastbin chunk detected");
+           ++nfastblocks;
+           fastavail += chunksize (p);
+         }
+@@ -5437,8 +5479,11 @@ __malloc_info (int options, FILE *fp)
+ 
+ 	      while (p != NULL)
+ 		{
++		  if (__glibc_unlikely (!aligned_OK (p)))
++		    malloc_printerr ("__malloc_info(): " \
++				     "unaligned fastbin chunk detected");
+ 		  ++nthissize;
+-		  p = p->fd;
++		  p = REVEAL_PTR (p->fd);
+ 		}
+ 
+ 	      fastavail += nthissize * thissize;
diff --git a/SOURCES/glibc-rh1871383-2.patch b/SOURCES/glibc-rh1871383-2.patch
new file mode 100644
index 0000000..0313dbb
--- /dev/null
+++ b/SOURCES/glibc-rh1871383-2.patch
@@ -0,0 +1,87 @@
+From 768358b6a80742f6be68ecd9f952f4b60614df96 Mon Sep 17 00:00:00 2001
+From: Eyal Itkin <eyalit@checkpoint.com>
+Date: Tue, 31 Mar 2020 01:55:13 -0400
+Subject: Typo fixes and CR cleanup in Safe-Linking
+
+Removed unneeded '\' chars from end of lines and fixed some
+indentation issues that were introduced in the original
+Safe-Linking patch.
+
+Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+
+diff --git a/malloc/malloc.c b/malloc/malloc.c
+index 1282863681..0e4acb22f6 100644
+--- a/malloc/malloc.c
++++ b/malloc/malloc.c
+@@ -2170,7 +2170,7 @@ do_check_malloc_state (mstate av)
+       while (p != 0)
+         {
+ 	  if (__glibc_unlikely (!aligned_OK (p)))
+-	    malloc_printerr ("do_check_malloc_state(): " \
++	    malloc_printerr ("do_check_malloc_state(): "
+ 			     "unaligned fastbin chunk detected");
+           /* each chunk claims to be inuse */
+           do_check_inuse_chunk (av, p);
+@@ -2977,9 +2977,9 @@ tcache_thread_shutdown (void)
+       while (tcache_tmp->entries[i])
+ 	{
+ 	  tcache_entry *e = tcache_tmp->entries[i];
+-      if (__glibc_unlikely (!aligned_OK (e)))
+-	malloc_printerr ("tcache_thread_shutdown(): " \
+-			 "unaligned tcache chunk detected");
++	  if (__glibc_unlikely (!aligned_OK (e)))
++	    malloc_printerr ("tcache_thread_shutdown(): "
++			     "unaligned tcache chunk detected");
+ 	  tcache_tmp->entries[i] = REVEAL_PTR (e->next);
+ 	  __libc_free (e);
+ 	}
+@@ -4225,14 +4225,14 @@ _int_free (mstate av, mchunkptr p, int have_lock)
+ 	    for (tmp = tcache->entries[tc_idx];
+ 		 tmp;
+ 		 tmp = REVEAL_PTR (tmp->next))
+-        {
+-	      if (__glibc_unlikely (!aligned_OK (tmp)))
+-		malloc_printerr ("free(): unaligned chunk detected in tcache 2");
+-	      if (tmp == e)
+-		malloc_printerr ("free(): double free detected in tcache 2");
+-	    /* If we get here, it was a coincidence.  We've wasted a
+-	       few cycles, but don't abort.  */
+-        }
++	      {
++		if (__glibc_unlikely (!aligned_OK (tmp)))
++		  malloc_printerr ("free(): unaligned chunk detected in tcache 2");
++		if (tmp == e)
++		  malloc_printerr ("free(): double free detected in tcache 2");
++		/* If we get here, it was a coincidence.  We've wasted a
++		   few cycles, but don't abort.  */
++	      }
+ 	  }
+ 
+ 	if (tcache->counts[tc_idx] < mp_.tcache_count)
+@@ -4506,7 +4506,7 @@ static void malloc_consolidate(mstate av)
+       do {
+ 	{
+ 	  if (__glibc_unlikely (!aligned_OK (p)))
+-	    malloc_printerr ("malloc_consolidate(): " \
++	    malloc_printerr ("malloc_consolidate(): "
+ 			     "unaligned fastbin chunk detected");
+ 
+ 	  unsigned int idx = fastbin_index (chunksize (p));
+@@ -4938,7 +4938,7 @@ int_mallinfo (mstate av, struct mallinfo *m)
+ 	   p = REVEAL_PTR (p->fd))
+         {
+ 	  if (__glibc_unlikely (!aligned_OK (p)))
+-	    malloc_printerr ("int_mallinfo(): " \
++	    malloc_printerr ("int_mallinfo(): "
+ 			     "unaligned fastbin chunk detected");
+           ++nfastblocks;
+           fastavail += chunksize (p);
+@@ -5480,7 +5480,7 @@ __malloc_info (int options, FILE *fp)
+ 	      while (p != NULL)
+ 		{
+ 		  if (__glibc_unlikely (!aligned_OK (p)))
+-		    malloc_printerr ("__malloc_info(): " \
++		    malloc_printerr ("__malloc_info(): "
+ 				     "unaligned fastbin chunk detected");
+ 		  ++nthissize;
+ 		  p = REVEAL_PTR (p->fd);
diff --git a/SOURCES/glibc-rh1871383-3.patch b/SOURCES/glibc-rh1871383-3.patch
new file mode 100644
index 0000000..e5a18e8
--- /dev/null
+++ b/SOURCES/glibc-rh1871383-3.patch
@@ -0,0 +1,100 @@
+From 49c3c37651e2d2ec4ff8ce21252bbbc08a9d6639 Mon Sep 17 00:00:00 2001
+From: Eyal Itkin <eyalit@checkpoint.com>
+Date: Tue, 31 Mar 2020 02:00:14 -0400
+Subject: Fix alignment bug in Safe-Linking
+
+Alignment checks should be performed on the user's buffer and NOT
+on the mchunkptr as was done before. This caused bugs in 32 bit
+versions, because: 2*sizeof(t) != MALLOC_ALIGNMENT.
+
+As the tcache works on users' buffers it uses the aligned_OK()
+check, and the rest work on mchunkptr and therefore check using
+misaligned_chunk().
+
+Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+
+diff --git a/malloc/malloc.c b/malloc/malloc.c
+index 0e4acb22f6..6acb5ad43a 100644
+--- a/malloc/malloc.c
++++ b/malloc/malloc.c
+@@ -2169,7 +2169,7 @@ do_check_malloc_state (mstate av)
+ 
+       while (p != 0)
+         {
+-	  if (__glibc_unlikely (!aligned_OK (p)))
++	  if (__glibc_unlikely (misaligned_chunk (p)))
+ 	    malloc_printerr ("do_check_malloc_state(): "
+ 			     "unaligned fastbin chunk detected");
+           /* each chunk claims to be inuse */
+@@ -2949,11 +2949,11 @@ static __always_inline void *
+ tcache_get (size_t tc_idx)
+ {
+   tcache_entry *e = tcache->entries[tc_idx];
++  if (__glibc_unlikely (!aligned_OK (e)))
++    malloc_printerr ("malloc(): unaligned tcache chunk detected");
+   tcache->entries[tc_idx] = REVEAL_PTR (e->next);
+   --(tcache->counts[tc_idx]);
+   e->key = NULL;
+-  if (__glibc_unlikely (!aligned_OK (e)))
+-    malloc_printerr ("malloc(): unaligned tcache chunk detected");
+   return (void *) e;
+ }
+ 
+@@ -3591,7 +3591,7 @@ _int_malloc (mstate av, size_t bytes)
+       if (victim == NULL)				\
+ 	break;						\
+       pp = REVEAL_PTR (victim->fd);                                     \
+-      if (__glibc_unlikely (!aligned_OK (pp)))                          \
++      if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp)))       \
+ 	malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
+     }							\
+   while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
+@@ -3606,8 +3606,8 @@ _int_malloc (mstate av, size_t bytes)
+ 
+       if (victim != NULL)
+ 	{
+-	  if (__glibc_unlikely (!aligned_OK (victim)))
+-	    malloc_printerr ("malloc(): unaligned fastbin chunk detected");
++	  if (__glibc_unlikely (misaligned_chunk (victim)))
++	    malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
+ 
+ 	  if (SINGLE_THREAD_P)
+ 	    *fb = REVEAL_PTR (victim->fd);
+@@ -3631,8 +3631,8 @@ _int_malloc (mstate av, size_t bytes)
+ 		  while (tcache->counts[tc_idx] < mp_.tcache_count
+ 			 && (tc_victim = *fb) != NULL)
+ 		    {
+-		      if (__glibc_unlikely (!aligned_OK (tc_victim)))
+-			malloc_printerr ("malloc(): unaligned fastbin chunk detected");
++		      if (__glibc_unlikely (misaligned_chunk (tc_victim)))
++			malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
+ 		      if (SINGLE_THREAD_P)
+ 			*fb = REVEAL_PTR (tc_victim->fd);
+ 		      else
+@@ -4505,7 +4505,7 @@ static void malloc_consolidate(mstate av)
+     if (p != 0) {
+       do {
+ 	{
+-	  if (__glibc_unlikely (!aligned_OK (p)))
++	  if (__glibc_unlikely (misaligned_chunk (p)))
+ 	    malloc_printerr ("malloc_consolidate(): "
+ 			     "unaligned fastbin chunk detected");
+ 
+@@ -4937,7 +4937,7 @@ int_mallinfo (mstate av, struct mallinfo *m)
+ 	   p != 0;
+ 	   p = REVEAL_PTR (p->fd))
+         {
+-	  if (__glibc_unlikely (!aligned_OK (p)))
++	  if (__glibc_unlikely (misaligned_chunk (p)))
+ 	    malloc_printerr ("int_mallinfo(): "
+ 			     "unaligned fastbin chunk detected");
+           ++nfastblocks;
+@@ -5479,7 +5479,7 @@ __malloc_info (int options, FILE *fp)
+ 
+ 	      while (p != NULL)
+ 		{
+-		  if (__glibc_unlikely (!aligned_OK (p)))
++		  if (__glibc_unlikely (misaligned_chunk (p)))
+ 		    malloc_printerr ("__malloc_info(): "
+ 				     "unaligned fastbin chunk detected");
+ 		  ++nthissize;
diff --git a/SOURCES/glibc-rh1871383-4.patch b/SOURCES/glibc-rh1871383-4.patch
new file mode 100644
index 0000000..cac8349
--- /dev/null
+++ b/SOURCES/glibc-rh1871383-4.patch
@@ -0,0 +1,215 @@
+From 6310d570bf20348135d09e1f9de84a9ae7d06f83 Mon Sep 17 00:00:00 2001
+From: Eyal Itkin <eyalit@checkpoint.com>
+Date: Thu, 2 Apr 2020 07:26:35 -0400
+Subject: Add tests for Safe-Linking
+
+Adding the test "tst-safe-linking" for testing that Safe-Linking works
+as expected. The test checks these 3 main flows:
+ * tcache protection
+ * fastbin protection
+ * malloc_consolidate() correctness
+
+As there is a random chance of 1/16 that of the alignment will remain
+correct, the test checks each flow up to 10 times, using different random
+values for the pointer corruption. As a result, the chance for a false
+failure of a given tested flow is 2**(-40), thus highly unlikely.
+
+Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+
+diff --git a/malloc/Makefile b/malloc/Makefile
+index 984045b5b9..e22cbde22d 100644
+--- a/malloc/Makefile
++++ b/malloc/Makefile
+@@ -39,6 +39,7 @@ tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
+ 	 tst-malloc-too-large \
+ 	 tst-malloc-stats-cancellation \
+ 	 tst-tcfree1 tst-tcfree2 tst-tcfree3 \
++	 tst-safe-linking \
+ 
+ tests-static := \
+ 	 tst-interpose-static-nothread \
+diff --git a/malloc/tst-safe-linking.c b/malloc/tst-safe-linking.c
+new file mode 100644
+index 0000000000..067b6c09cf
+--- /dev/null
++++ b/malloc/tst-safe-linking.c
+@@ -0,0 +1,179 @@
++/* Test reporting of Safe-Linking caught errors.
++   Copyright (C) 2020 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++#include <signal.h>
++#include <stdint.h>
++#include <stdlib.h>
++#include <memory.h>
++#include <string.h>
++#include <time.h>
++#include <stdbool.h>
++#include <support/capture_subprocess.h>
++#include <support/check.h>
++
++/* Run CALLBACK and check that the data on standard error equals
++   EXPECTED.  */
++static void
++check (const char *test, void (*callback) (void *),
++       const char *expected)
++{
++  int i, rand_mask;
++  bool success = false;
++  /* There is a chance of 1/16 that a corrupted pointer will be aligned.
++     Try multiple times so that statistical failure will be improbable.  */
++  for (i = 0; i < 10 && !success; ++i)
++    {
++      rand_mask = rand () & 0xFF;
++      struct support_capture_subprocess result
++	= support_capture_subprocess (callback, &rand_mask);
++      /* Did not crash, could happen.  Try again.  */
++      if (strlen (result.err.buffer) == 0)
++	continue;
++      /* Crashed, must be the expected result.  */
++      if (strcmp (result.err.buffer, expected) != 0)
++	{
++	  support_record_failure ();
++	  printf ("error: test %s unexpected standard error data\n"
++	          "  expected: %s\n"
++	          "  actual:   %s\n",
++	          test, expected, result.err.buffer);
++	}
++      TEST_VERIFY (WIFSIGNALED (result.status));
++      if (WIFSIGNALED (result.status))
++	TEST_VERIFY (WTERMSIG (result.status) == SIGABRT);
++      support_capture_subprocess_free (&result);
++      success = true;
++    }
++  TEST_VERIFY (success);
++}
++
++/* Implementation details must be kept in sync with malloc.  */
++#define TCACHE_FILL_COUNT               7
++#define TCACHE_ALLOC_SIZE               0x20
++#define MALLOC_CONSOLIDATE_SIZE         256*1024
++
++/* Try corrupting the tcache list.  */
++static void
++test_tcache (void *closure)
++{
++  int mask = ((int *)closure)[0];
++  size_t size = TCACHE_ALLOC_SIZE;
++
++  /* Populate the tcache list.  */
++  void * volatile a = malloc (size);
++  void * volatile b = malloc (size);
++  void * volatile c = malloc (size);
++  free (a);
++  free (b);
++  free (c);
++
++  /* Corrupt the pointer with a random value, and avoid optimizations.  */
++  printf ("Before: c=%p, c[0]=%p\n", c, ((void **)c)[0]);
++  memset (c, mask & 0xFF, size);
++  printf ("After: c=%p, c[0]=%p\n", c, ((void **)c)[0]);
++
++  c = malloc (size);
++  /* This line will trigger the Safe-Linking check.  */
++  b = malloc (size);
++  printf ("b=%p\n", b);
++}
++
++/* Try corrupting the fastbin list.  */
++static void
++test_fastbin (void *closure)
++{
++  int i;
++  int mask = ((int *)closure)[0];
++  size_t size = TCACHE_ALLOC_SIZE;
++
++  /* Take the tcache out of the game.  */
++  for (i = 0; i < TCACHE_FILL_COUNT; ++i)
++    {
++      void * volatile p = calloc (1, size);
++      free (p);
++    }
++
++  /* Populate the fastbin list.  */
++  void * volatile a = calloc (1, size);
++  void * volatile b = calloc (1, size);
++  void * volatile c = calloc (1, size);
++  free (a);
++  free (b);
++  free (c);
++
++  /* Corrupt the pointer with a random value, and avoid optimizations.  */
++  printf ("Before: c=%p, c[0]=%p\n", c, ((void **)c)[0]);
++  memset (c, mask & 0xFF, size);
++  printf ("After: c=%p, c[0]=%p\n", c, ((void **)c)[0]);
++
++  c = calloc (1, size);
++  /* This line will trigger the Safe-Linking check.  */
++  b = calloc (1, size);
++  printf ("b=%p\n", b);
++}
++
++/* Try corrupting the fastbin list and trigger a consolidate.  */
++static void
++test_fastbin_consolidate (void *closure)
++{
++  int i;
++  int mask = ((int*)closure)[0];
++  size_t size = TCACHE_ALLOC_SIZE;
++
++  /* Take the tcache out of the game.  */
++  for (i = 0; i < TCACHE_FILL_COUNT; ++i)
++    {
++      void * volatile p = calloc (1, size);
++      free (p);
++    }
++
++  /* Populate the fastbin list.  */
++  void * volatile a = calloc (1, size);
++  void * volatile b = calloc (1, size);
++  void * volatile c = calloc (1, size);
++  free (a);
++  free (b);
++  free (c);
++
++  /* Corrupt the pointer with a random value, and avoid optimizations.  */
++  printf ("Before: c=%p, c[0]=%p\n", c, ((void **)c)[0]);
++  memset (c, mask & 0xFF, size);
++  printf ("After: c=%p, c[0]=%p\n", c, ((void **)c)[0]);
++
++  /* This line will trigger the Safe-Linking check.  */
++  b = malloc (MALLOC_CONSOLIDATE_SIZE);
++  printf ("b=%p\n", b);
++}
++
++static int
++do_test (void)
++{
++  /* Seed the random for the test.  */
++  srand (time (NULL));
++
++  check ("test_tcache", test_tcache,
++         "malloc(): unaligned tcache chunk detected\n");
++  check ("test_fastbin", test_fastbin,
++         "malloc(): unaligned fastbin chunk detected 2\n");
++  check ("test_fastbin_consolidate", test_fastbin_consolidate,
++         "malloc_consolidate(): unaligned fastbin chunk detected\n");
++
++  return 0;
++}
++
++#include <support/test-driver.c>
diff --git a/SOURCES/glibc-rh1871383-5.patch b/SOURCES/glibc-rh1871383-5.patch
new file mode 100644
index 0000000..bc51a1e
--- /dev/null
+++ b/SOURCES/glibc-rh1871383-5.patch
@@ -0,0 +1,35 @@
+From b9cde4e3aa1ff338da7064daf1386b2f4a7351ba Mon Sep 17 00:00:00 2001
+From: DJ Delorie <dj@redhat.com>
+Date: Sat, 4 Apr 2020 01:44:56 -0400
+Subject: malloc: ensure set_max_fast never stores zero [BZ #25733]
+
+The code for set_max_fast() stores an "impossibly small value"
+instead of zero, when the parameter is zero.  However, for
+small values of the parameter (ex: 1 or 2) the computation
+results in a zero being stored anyway.
+
+This patch checks for the parameter being small enough for the
+computation to result in zero instead, so that a zero is never
+stored.
+
+key values which result in zero being stored:
+
+x86-64:  1..7  (or other 64-bit)
+i686:    1..11
+armhfp:  1..3  (or other 32-bit)
+
+Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+
+diff --git a/malloc/malloc.c b/malloc/malloc.c
+index 6acb5ad43a..ee87ddbbf9 100644
+--- a/malloc/malloc.c
++++ b/malloc/malloc.c
+@@ -1632,7 +1632,7 @@ static INTERNAL_SIZE_T global_max_fast;
+  */
+ 
+ #define set_max_fast(s) \
+-  global_max_fast = (((s) == 0)						      \
++  global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ)	\
+                      ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
+ 
+ static inline INTERNAL_SIZE_T
diff --git a/SOURCES/glibc-rh1871383-6.patch b/SOURCES/glibc-rh1871383-6.patch
new file mode 100644
index 0000000..b21971a
--- /dev/null
+++ b/SOURCES/glibc-rh1871383-6.patch
@@ -0,0 +1,35 @@
+From 0e00b35704e67c499c3abfbd5b6224a13d38b012 Mon Sep 17 00:00:00 2001
+From: "W. Hashimoto" <ssmallkirby@gmail.com>
+Date: Fri, 11 Dec 2020 16:59:10 -0500
+Subject: malloc: Detect infinite-loop in _int_free when freeing tcache
+ [BZ#27052]
+
+If linked-list of tcache contains a loop, it invokes infinite
+loop in _int_free when freeing tcache. The PoC which invokes
+such infinite loop is on the Bugzilla(#27052). This loop
+should terminate when the loop exceeds mp_.tcache_count and
+the program should abort. The affected glibc version is
+2.29 or later.
+
+Reviewed-by: DJ Delorie <dj@redhat.com>
+
+diff --git a/malloc/malloc.c b/malloc/malloc.c
+index 5b87bdb081..ec2d934595 100644
+--- a/malloc/malloc.c
++++ b/malloc/malloc.c
+@@ -4224,11 +4224,14 @@ _int_free (mstate av, mchunkptr p, int have_lock)
+ 	if (__glibc_unlikely (e->key == tcache))
+ 	  {
+ 	    tcache_entry *tmp;
++	    size_t cnt = 0;
+ 	    LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
+ 	    for (tmp = tcache->entries[tc_idx];
+ 		 tmp;
+-		 tmp = REVEAL_PTR (tmp->next))
++		 tmp = REVEAL_PTR (tmp->next), ++cnt)
+ 	      {
++		if (cnt >= mp_.tcache_count)
++		  malloc_printerr ("free(): too many chunks detected in tcache");
+ 		if (__glibc_unlikely (!aligned_OK (tmp)))
+ 		  malloc_printerr ("free(): unaligned chunk detected in tcache 2");
+ 		if (tmp == e)
diff --git a/SOURCES/glibc-rh1871383-7.patch b/SOURCES/glibc-rh1871383-7.patch
new file mode 100644
index 0000000..61d11c4
--- /dev/null
+++ b/SOURCES/glibc-rh1871383-7.patch
@@ -0,0 +1,133 @@
+From fc859c304898a5ec72e0ba5269ed136ed0ea10e1 Mon Sep 17 00:00:00 2001
+From: Siddhesh Poyarekar <siddhesh@sourceware.org>
+Date: Wed, 7 Jul 2021 23:02:46 +0530
+Subject: Harden tcache double-free check
+
+The tcache allocator layer uses the tcache pointer as a key to
+identify a block that may be freed twice.  Since this is in the
+application data area, an attacker exploiting a use-after-free could
+potentially get access to the entire tcache structure through this
+key.  A detailed write-up was provided by Awarau here:
+
+https://awaraucom.wordpress.com/2020/07/19/house-of-io-remastered/
+
+Replace this static pointer use for key checking with one that is
+generated at malloc initialization.  The first attempt is through
+getrandom with a fallback to random_bits(), which is a simple
+pseudo-random number generator based on the clock.  The fallback ought
+to be sufficient since the goal of the randomness is only to make the
+key arbitrary enough that it is very unlikely to collide with user
+data.
+
+Co-authored-by: Eyal Itkin <eyalit@checkpoint.com>
+
+[note: context for arena.c chunk #2 changed to accomodate missing
+tagging support code - DJ]
+
+diff -rup a/malloc/arena.c b/malloc/arena.c
+--- a/malloc/arena.c	2022-09-16 01:09:02.003843024 -0400
++++ b/malloc/arena.c	2022-09-16 01:25:51.879994057 -0400
+@@ -286,6 +286,10 @@ extern struct dl_open_hook *_dl_open_hoo
+ libc_hidden_proto (_dl_open_hook);
+ #endif
+ 
++#if USE_TCACHE
++static void tcache_key_initialize (void);
++#endif
++
+ static void
+ ptmalloc_init (void)
+ {
+@@ -294,6 +298,10 @@ ptmalloc_init (void)
+ 
+   __malloc_initialized = 0;
+ 
++#if USE_TCACHE
++  tcache_key_initialize ();
++#endif
++
+ #ifdef SHARED
+   /* In case this libc copy is in a non-default namespace, never use brk.
+      Likewise if dlopened from statically linked program.  */
+diff -rup a/malloc/malloc.c b/malloc/malloc.c
+--- a/malloc/malloc.c	2022-09-16 01:09:05.491977387 -0400
++++ b/malloc/malloc.c	2022-09-16 01:25:51.883994213 -0400
+@@ -247,6 +247,10 @@
+ /* For SINGLE_THREAD_P.  */
+ #include <sysdep-cancel.h>
+ 
++/* For tcache double-free check.  */
++#include <random-bits.h>
++#include <sys/random.h>
++
+ /*
+   Debugging:
+ 
+@@ -2924,7 +2928,7 @@ typedef struct tcache_entry
+ {
+   struct tcache_entry *next;
+   /* This field exists to detect double frees.  */
+-  struct tcache_perthread_struct *key;
++  uintptr_t key;
+ } tcache_entry;
+ 
+ /* There is one of these for each thread, which contains the
+@@ -2941,6 +2945,31 @@ typedef struct tcache_perthread_struct
+ static __thread bool tcache_shutting_down = false;
+ static __thread tcache_perthread_struct *tcache = NULL;
+ 
++/* Process-wide key to try and catch a double-free in the same thread.  */
++static uintptr_t tcache_key;
++
++/* The value of tcache_key does not really have to be a cryptographically
++   secure random number.  It only needs to be arbitrary enough so that it does
++   not collide with values present in applications.  If a collision does happen
++   consistently enough, it could cause a degradation in performance since the
++   entire list is checked to check if the block indeed has been freed the
++   second time.  The odds of this happening are exceedingly low though, about 1
++   in 2^wordsize.  There is probably a higher chance of the performance
++   degradation being due to a double free where the first free happened in a
++   different thread; that's a case this check does not cover.  */
++static void
++tcache_key_initialize (void)
++{
++  if (__getrandom (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
++      != sizeof (tcache_key))
++    {
++      tcache_key = random_bits ();
++#if __WORDSIZE == 64
++      tcache_key = (tcache_key << 32) | random_bits ();
++#endif
++    }
++}
++
+ /* Caller must ensure that we know tc_idx is valid and there's room
+    for more chunks.  */
+ static __always_inline void
+@@ -2950,7 +2979,7 @@ tcache_put (mchunkptr chunk, size_t tc_i
+ 
+   /* Mark this chunk as "in the tcache" so the test in _int_free will
+      detect a double free.  */
+-  e->key = tcache;
++  e->key = tcache_key;
+ 
+   e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
+   tcache->entries[tc_idx] = e;
+@@ -2967,7 +2996,7 @@ tcache_get (size_t tc_idx)
+     malloc_printerr ("malloc(): unaligned tcache chunk detected");
+   tcache->entries[tc_idx] = REVEAL_PTR (e->next);
+   --(tcache->counts[tc_idx]);
+-  e->key = NULL;
++  e->key = 0;
+   return (void *) e;
+ }
+ 
+@@ -4231,7 +4260,7 @@ _int_free (mstate av, mchunkptr p, int h
+ 	   trust it (it also matches random payload data at a 1 in
+ 	   2^<size_t> chance), so verify it's not an unlikely
+ 	   coincidence before aborting.  */
+-	if (__glibc_unlikely (e->key == tcache))
++	if (__glibc_unlikely (e->key == tcache_key))
+ 	  {
+ 	    tcache_entry *tmp;
+ 	    size_t cnt = 0;
diff --git a/SOURCES/glibc-rh2109510-1.patch b/SOURCES/glibc-rh2109510-1.patch
new file mode 100644
index 0000000..52b069e
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-1.patch
@@ -0,0 +1,27 @@
+commit 97f8225d22ef727ae9935cc231643efdc430d530
+Author: Zack Weinberg <zackw@panix.com>
+Date:   Thu Mar 14 09:44:22 2019 -0400
+
+    scripts/check-obsolete-constructs.py: Process all headers as UTF-8.
+    
+    A few of our installed headers contain UTF-8 in comments.
+    check-obsolete-constructs opened files without explicitly specifying
+    their encoding, so it would barf on these headers if “make check” was
+    run in a non-UTF-8 locale.
+    
+            * scripts/check-obsolete-constructs.py (HeaderChecker.check):
+            Specify encoding="utf-8" when opening headers to check.
+
+diff --git a/scripts/check-obsolete-constructs.py b/scripts/check-obsolete-constructs.py
+index ce5c72251f4d7cc0..89d21dea6e788783 100755
+--- a/scripts/check-obsolete-constructs.py
++++ b/scripts/check-obsolete-constructs.py
+@@ -437,7 +437,7 @@ class HeaderChecker:
+     def check(self, fname):
+         self.fname = fname
+         try:
+-            with open(fname, "rt") as fp:
++            with open(fname, "rt", encoding="utf-8") as fp:
+                 contents = fp.read()
+         except OSError as e:
+             sys.stderr.write("{}: {}\n".format(fname, e.strerror))
diff --git a/SOURCES/glibc-rh2109510-10.patch b/SOURCES/glibc-rh2109510-10.patch
new file mode 100644
index 0000000..31291df
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-10.patch
@@ -0,0 +1,1449 @@
+commit 30035d67728a846fa39749cd162afd278ac654c4
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Mon Apr 11 11:28:08 2022 +0200
+
+    scripts: Add glibcelf.py module
+    
+    Hopefully, this will lead to tests that are easier to maintain.  The
+    current approach of parsing readelf -W output using regular expressions
+    is not necessarily easier than parsing the ELF data directly.
+    
+    This module is still somewhat incomplete (e.g., coverage of relocation
+    types and versioning information is missing), but it is sufficient to
+    perform basic symbol analysis or program header analysis.
+    
+    The EM_* mapping for architecture-specific constant classes (e.g.,
+    SttX86_64) is not yet implemented.  The classes are defined for the
+    benefit of elf/tst-glibcelf.py.
+    
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+Conflicts:
+	elf/Makefile
+	  (prelink removal upstream)
+
+diff --git a/elf/Makefile b/elf/Makefile
+index 44966b9dfef15463..89ce4f5196e5eb39 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -967,6 +967,13 @@ tests-special += $(objpfx)tst-prelink-cmp.out
+ endif
+ endif
+ 
++tests-special += $(objpfx)tst-glibcelf.out
++$(objpfx)tst-glibcelf.out: tst-glibcelf.py elf.h $(..)/scripts/glibcelf.py \
++  $(..)/scripts/glibcextract.py
++	PYTHONPATH=$(..)scripts $(PYTHON) tst-glibcelf.py \
++          --cc="$(CC) $(patsubst -DMODULE_NAME=%,-DMODULE_NAME=testsuite,$(CPPFLAGS))" \
++	  < /dev/null > $@ 2>&1; $(evaluate-test)
++
+ # The test requires shared _and_ PIE because the executable
+ # unit test driver must be able to link with the shared object
+ # that is going to eventually go into an installed DSO.
+diff --git a/elf/tst-glibcelf.py b/elf/tst-glibcelf.py
+new file mode 100644
+index 0000000000000000..bf15a3bad4479e08
+--- /dev/null
++++ b/elf/tst-glibcelf.py
+@@ -0,0 +1,260 @@
++#!/usr/bin/python3
++# Verify scripts/glibcelf.py contents against elf/elf.h.
++# Copyright (C) 2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++import argparse
++import enum
++import sys
++
++import glibcelf
++import glibcextract
++
++errors_encountered = 0
++
++def error(message):
++    global errors_encountered
++    sys.stdout.write('error: {}\n'.format(message))
++    errors_encountered += 1
++
++# The enum constants in glibcelf are expected to have exactly these
++# prefixes.
++expected_constant_prefixes = tuple(
++    'ELFCLASS ELFDATA EM_ ET_ DT_ PF_ PT_ SHF_ SHN_ SHT_ STB_ STT_'.split())
++
++def find_constant_prefix(name):
++    """Returns a matching prefix from expected_constant_prefixes or None."""
++    for prefix in expected_constant_prefixes:
++        if name.startswith(prefix):
++            return prefix
++    return None
++
++def find_enum_types():
++    """A generator for OpenIntEnum and IntFlag classes in glibcelf."""
++    for obj in vars(glibcelf).values():
++        if isinstance(obj, type) and obj.__bases__[0] in (
++                glibcelf._OpenIntEnum, enum.Enum, enum.IntFlag):
++            yield obj
++
++def check_duplicates():
++    """Verifies that enum types do not have duplicate values.
++
++    Different types must have different member names, too.
++
++    """
++    global_seen = {}
++    for typ in find_enum_types():
++        seen = {}
++        last = None
++        for (name, e) in typ.__members__.items():
++            if e.value in seen:
++                error('{} has {}={} and {}={}'.format(
++                    typ, seen[e.value], e.value, name, e.value))
++                last = e
++            else:
++                seen[e.value] = name
++                if last is not None and last.value > e.value:
++                    error('{} has {}={} after {}={}'.format(
++                        typ, name, e.value, last.name, last.value))
++                if name in global_seen:
++                    error('{} used in {} and {}'.format(
++                        name, global_seen[name], typ))
++                else:
++                    global_seen[name] = typ
++
++def check_constant_prefixes():
++    """Check that the constant prefixes match expected_constant_prefixes."""
++    seen = set()
++    for typ in find_enum_types():
++        typ_prefix = None
++        for val in typ:
++            prefix = find_constant_prefix(val.name)
++            if prefix is None:
++                error('constant {!r} for {} has unknown prefix'.format(
++                    val, typ))
++                break
++            elif typ_prefix is None:
++                typ_prefix = prefix
++                seen.add(typ_prefix)
++            elif prefix != typ_prefix:
++                error('prefix {!r} for constant {!r}, expected {!r}'.format(
++                    prefix, val, typ_prefix))
++        if typ_prefix is None:
++            error('empty enum type {}'.format(typ))
++
++    for prefix in sorted(set(expected_constant_prefixes) - seen):
++        error('missing constant prefix {!r}'.format(prefix))
++    # Reverse difference is already covered inside the loop.
++
++def find_elf_h_constants(cc):
++    """Returns a dictionary of relevant constants from <elf.h>."""
++    return glibcextract.compute_macro_consts(
++        source_text='#include <elf.h>',
++        cc=cc,
++        macro_re='|'.join(
++            prefix + '.*' for prefix in expected_constant_prefixes))
++
++# The first part of the pair is a name of an <elf.h> constant that is
++# dropped from glibcelf.  The second part is the constant as it is
++# used in <elf.h>.
++glibcelf_skipped_aliases = (
++    ('EM_ARC_A5', 'EM_ARC_COMPACT'),
++    ('PF_PARISC_SBP', 'PF_HP_SBP')
++)
++
++# Constants that provide little value and are not included in
++# glibcelf: *LO*/*HI* range constants, *NUM constants counting the
++# number of constants.  Also includes the alias names from
++# glibcelf_skipped_aliases.
++glibcelf_skipped_constants = frozenset(
++    [e[0] for e in glibcelf_skipped_aliases]) | frozenset("""
++DT_AARCH64_NUM
++DT_ADDRNUM
++DT_ADDRRNGHI
++DT_ADDRRNGLO
++DT_ALPHA_NUM
++DT_ENCODING
++DT_EXTRANUM
++DT_HIOS
++DT_HIPROC
++DT_IA_64_NUM
++DT_LOOS
++DT_LOPROC
++DT_MIPS_NUM
++DT_NUM
++DT_PPC64_NUM
++DT_PPC_NUM
++DT_PROCNUM
++DT_SPARC_NUM
++DT_VALNUM
++DT_VALRNGHI
++DT_VALRNGLO
++DT_VERSIONTAGNUM
++ELFCLASSNUM
++ELFDATANUM
++ET_HIOS
++ET_HIPROC
++ET_LOOS
++ET_LOPROC
++ET_NUM
++PF_MASKOS
++PF_MASKPROC
++PT_HIOS
++PT_HIPROC
++PT_HISUNW
++PT_LOOS
++PT_LOPROC
++PT_LOSUNW
++SHF_MASKOS
++SHF_MASKPROC
++SHN_HIOS
++SHN_HIPROC
++SHN_HIRESERVE
++SHN_LOOS
++SHN_LOPROC
++SHN_LORESERVE
++SHT_HIOS
++SHT_HIPROC
++SHT_HIPROC
++SHT_HISUNW
++SHT_HIUSER
++SHT_LOOS
++SHT_LOPROC
++SHT_LOSUNW
++SHT_LOUSER
++SHT_NUM
++STB_HIOS
++STB_HIPROC
++STB_LOOS
++STB_LOPROC
++STB_NUM
++STT_HIOS
++STT_HIPROC
++STT_LOOS
++STT_LOPROC
++STT_NUM
++""".strip().split())
++
++def check_constant_values(cc):
++    """Checks the values of <elf.h> constants against glibcelf."""
++
++    glibcelf_constants = {
++        e.name: e for typ in find_enum_types() for e in typ}
++    elf_h_constants = find_elf_h_constants(cc=cc)
++
++    missing_in_glibcelf = (set(elf_h_constants) - set(glibcelf_constants)
++                           - glibcelf_skipped_constants)
++    for name in sorted(missing_in_glibcelf):
++        error('constant {} is missing from glibcelf'.format(name))
++
++    unexpected_in_glibcelf = \
++        set(glibcelf_constants) & glibcelf_skipped_constants
++    for name in sorted(unexpected_in_glibcelf):
++        error('constant {} is supposed to be filtered from glibcelf'.format(
++            name))
++
++    missing_in_elf_h = set(glibcelf_constants) - set(elf_h_constants)
++    for name in sorted(missing_in_elf_h):
++        error('constant {} is missing from <elf.h>'.format(name))
++
++    expected_in_elf_h = glibcelf_skipped_constants - set(elf_h_constants)
++    for name in expected_in_elf_h:
++        error('filtered constant {} is missing from <elf.h>'.format(name))
++
++    for alias_name, name_in_glibcelf in glibcelf_skipped_aliases:
++        if name_in_glibcelf not in glibcelf_constants:
++            error('alias value {} for {} not in glibcelf'.format(
++                name_in_glibcelf, alias_name))
++        elif (int(elf_h_constants[alias_name])
++              != glibcelf_constants[name_in_glibcelf].value):
++            error('<elf.h> has {}={}, glibcelf has {}={}'.format(
++                alias_name, elf_h_constants[alias_name],
++                name_in_glibcelf, glibcelf_constants[name_in_glibcelf]))
++
++    # Check for value mismatches:
++    for name in sorted(set(glibcelf_constants) & set(elf_h_constants)):
++        glibcelf_value = glibcelf_constants[name].value
++        elf_h_value = int(elf_h_constants[name])
++        # On 32-bit architectures <elf.h> as some constants that are
++        # parsed as signed, while they are unsigned in glibcelf.  So
++        # far, this only affects some flag constants, so special-case
++        # them here.
++        if (glibcelf_value != elf_h_value
++            and not (isinstance(glibcelf_constants[name], enum.IntFlag)
++                     and glibcelf_value == 1 << 31
++                     and elf_h_value == -(1 << 31))):
++            error('{}: glibcelf has {!r}, <elf.h> has {!r}'.format(
++                name, glibcelf_value, elf_h_value))
++
++def main():
++    """The main entry point."""
++    parser = argparse.ArgumentParser(
++        description="Check glibcelf.py and elf.h against each other.")
++    parser.add_argument('--cc', metavar='CC',
++                        help='C compiler (including options) to use')
++    args = parser.parse_args()
++
++    check_duplicates()
++    check_constant_prefixes()
++    check_constant_values(cc=args.cc)
++
++    if errors_encountered > 0:
++        print("note: errors encountered:", errors_encountered)
++        sys.exit(1)
++
++if __name__ == '__main__':
++    main()
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+new file mode 100644
+index 0000000000000000..8f7d0ca184845714
+--- /dev/null
++++ b/scripts/glibcelf.py
+@@ -0,0 +1,1135 @@
++#!/usr/bin/python3
++# ELF support functionality for Python.
++# Copyright (C) 2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++"""Basic ELF parser.
++
++Use Image.readfile(path) to read an ELF file into memory and begin
++parsing it.
++
++"""
++
++import collections
++import enum
++import struct
++
++class _OpenIntEnum(enum.IntEnum):
++    """Integer enumeration that supports arbitrary int values."""
++    @classmethod
++    def _missing_(cls, value):
++        # See enum.IntFlag._create_pseudo_member_.  This allows
++        # creating of enum constants with arbitrary integer values.
++        pseudo_member = int.__new__(cls, value)
++        pseudo_member._name_ = None
++        pseudo_member._value_ = value
++        return pseudo_member
++
++    def __repr__(self):
++        name = self._name_
++        if name is not None:
++            # The names have prefixes like SHT_, implying their type.
++            return name
++        return '{}({})'.format(self.__class__.__name__, self._value_)
++
++    def __str__(self):
++        name = self._name_
++        if name is not None:
++            return name
++        return str(self._value_)
++
++class ElfClass(_OpenIntEnum):
++    """ELF word size.  Type of EI_CLASS values."""
++    ELFCLASSNONE = 0
++    ELFCLASS32 = 1
++    ELFCLASS64 = 2
++
++class ElfData(_OpenIntEnum):
++    """ELF endianess.  Type of EI_DATA values."""
++    ELFDATANONE = 0
++    ELFDATA2LSB = 1
++    ELFDATA2MSB = 2
++
++class Machine(_OpenIntEnum):
++    """ELF machine type.  Type of values in Ehdr.e_machine field."""
++    EM_NONE = 0
++    EM_M32 = 1
++    EM_SPARC = 2
++    EM_386 = 3
++    EM_68K = 4
++    EM_88K = 5
++    EM_IAMCU = 6
++    EM_860 = 7
++    EM_MIPS = 8
++    EM_S370 = 9
++    EM_MIPS_RS3_LE = 10
++    EM_PARISC = 15
++    EM_VPP500 = 17
++    EM_SPARC32PLUS = 18
++    EM_960 = 19
++    EM_PPC = 20
++    EM_PPC64 = 21
++    EM_S390 = 22
++    EM_SPU = 23
++    EM_V800 = 36
++    EM_FR20 = 37
++    EM_RH32 = 38
++    EM_RCE = 39
++    EM_ARM = 40
++    EM_FAKE_ALPHA = 41
++    EM_SH = 42
++    EM_SPARCV9 = 43
++    EM_TRICORE = 44
++    EM_ARC = 45
++    EM_H8_300 = 46
++    EM_H8_300H = 47
++    EM_H8S = 48
++    EM_H8_500 = 49
++    EM_IA_64 = 50
++    EM_MIPS_X = 51
++    EM_COLDFIRE = 52
++    EM_68HC12 = 53
++    EM_MMA = 54
++    EM_PCP = 55
++    EM_NCPU = 56
++    EM_NDR1 = 57
++    EM_STARCORE = 58
++    EM_ME16 = 59
++    EM_ST100 = 60
++    EM_TINYJ = 61
++    EM_X86_64 = 62
++    EM_PDSP = 63
++    EM_PDP10 = 64
++    EM_PDP11 = 65
++    EM_FX66 = 66
++    EM_ST9PLUS = 67
++    EM_ST7 = 68
++    EM_68HC16 = 69
++    EM_68HC11 = 70
++    EM_68HC08 = 71
++    EM_68HC05 = 72
++    EM_SVX = 73
++    EM_ST19 = 74
++    EM_VAX = 75
++    EM_CRIS = 76
++    EM_JAVELIN = 77
++    EM_FIREPATH = 78
++    EM_ZSP = 79
++    EM_MMIX = 80
++    EM_HUANY = 81
++    EM_PRISM = 82
++    EM_AVR = 83
++    EM_FR30 = 84
++    EM_D10V = 85
++    EM_D30V = 86
++    EM_V850 = 87
++    EM_M32R = 88
++    EM_MN10300 = 89
++    EM_MN10200 = 90
++    EM_PJ = 91
++    EM_OPENRISC = 92
++    EM_ARC_COMPACT = 93
++    EM_XTENSA = 94
++    EM_VIDEOCORE = 95
++    EM_TMM_GPP = 96
++    EM_NS32K = 97
++    EM_TPC = 98
++    EM_SNP1K = 99
++    EM_ST200 = 100
++    EM_IP2K = 101
++    EM_MAX = 102
++    EM_CR = 103
++    EM_F2MC16 = 104
++    EM_MSP430 = 105
++    EM_BLACKFIN = 106
++    EM_SE_C33 = 107
++    EM_SEP = 108
++    EM_ARCA = 109
++    EM_UNICORE = 110
++    EM_EXCESS = 111
++    EM_DXP = 112
++    EM_ALTERA_NIOS2 = 113
++    EM_CRX = 114
++    EM_XGATE = 115
++    EM_C166 = 116
++    EM_M16C = 117
++    EM_DSPIC30F = 118
++    EM_CE = 119
++    EM_M32C = 120
++    EM_TSK3000 = 131
++    EM_RS08 = 132
++    EM_SHARC = 133
++    EM_ECOG2 = 134
++    EM_SCORE7 = 135
++    EM_DSP24 = 136
++    EM_VIDEOCORE3 = 137
++    EM_LATTICEMICO32 = 138
++    EM_SE_C17 = 139
++    EM_TI_C6000 = 140
++    EM_TI_C2000 = 141
++    EM_TI_C5500 = 142
++    EM_TI_ARP32 = 143
++    EM_TI_PRU = 144
++    EM_MMDSP_PLUS = 160
++    EM_CYPRESS_M8C = 161
++    EM_R32C = 162
++    EM_TRIMEDIA = 163
++    EM_QDSP6 = 164
++    EM_8051 = 165
++    EM_STXP7X = 166
++    EM_NDS32 = 167
++    EM_ECOG1X = 168
++    EM_MAXQ30 = 169
++    EM_XIMO16 = 170
++    EM_MANIK = 171
++    EM_CRAYNV2 = 172
++    EM_RX = 173
++    EM_METAG = 174
++    EM_MCST_ELBRUS = 175
++    EM_ECOG16 = 176
++    EM_CR16 = 177
++    EM_ETPU = 178
++    EM_SLE9X = 179
++    EM_L10M = 180
++    EM_K10M = 181
++    EM_AARCH64 = 183
++    EM_AVR32 = 185
++    EM_STM8 = 186
++    EM_TILE64 = 187
++    EM_TILEPRO = 188
++    EM_MICROBLAZE = 189
++    EM_CUDA = 190
++    EM_TILEGX = 191
++    EM_CLOUDSHIELD = 192
++    EM_COREA_1ST = 193
++    EM_COREA_2ND = 194
++    EM_ARCV2 = 195
++    EM_OPEN8 = 196
++    EM_RL78 = 197
++    EM_VIDEOCORE5 = 198
++    EM_78KOR = 199
++    EM_56800EX = 200
++    EM_BA1 = 201
++    EM_BA2 = 202
++    EM_XCORE = 203
++    EM_MCHP_PIC = 204
++    EM_INTELGT = 205
++    EM_KM32 = 210
++    EM_KMX32 = 211
++    EM_EMX16 = 212
++    EM_EMX8 = 213
++    EM_KVARC = 214
++    EM_CDP = 215
++    EM_COGE = 216
++    EM_COOL = 217
++    EM_NORC = 218
++    EM_CSR_KALIMBA = 219
++    EM_Z80 = 220
++    EM_VISIUM = 221
++    EM_FT32 = 222
++    EM_MOXIE = 223
++    EM_AMDGPU = 224
++    EM_RISCV = 243
++    EM_BPF = 247
++    EM_CSKY = 252
++    EM_NUM = 253
++    EM_ALPHA = 0x9026
++
++class Et(_OpenIntEnum):
++    """ELF file type.  Type of ET_* values and the Ehdr.e_type field."""
++    ET_NONE = 0
++    ET_REL = 1
++    ET_EXEC = 2
++    ET_DYN = 3
++    ET_CORE = 4
++
++class Shn(_OpenIntEnum):
++    """ELF reserved section indices."""
++    SHN_UNDEF = 0
++    SHN_BEFORE = 0xff00
++    SHN_AFTER = 0xff01
++    SHN_ABS = 0xfff1
++    SHN_COMMON = 0xfff2
++    SHN_XINDEX = 0xffff
++
++class ShnMIPS(enum.Enum):
++    """Supplemental SHN_* constants for EM_MIPS."""
++    SHN_MIPS_ACOMMON = 0xff00
++    SHN_MIPS_TEXT = 0xff01
++    SHN_MIPS_DATA = 0xff02
++    SHN_MIPS_SCOMMON = 0xff03
++    SHN_MIPS_SUNDEFINED = 0xff04
++
++class ShnPARISC(enum.Enum):
++    """Supplemental SHN_* constants for EM_PARISC."""
++    SHN_PARISC_ANSI_COMMON = 0xff00
++    SHN_PARISC_HUGE_COMMON = 0xff01
++
++class Sht(_OpenIntEnum):
++    """ELF section types.  Type of SHT_* values."""
++    SHT_NULL = 0
++    SHT_PROGBITS = 1
++    SHT_SYMTAB = 2
++    SHT_STRTAB = 3
++    SHT_RELA = 4
++    SHT_HASH = 5
++    SHT_DYNAMIC = 6
++    SHT_NOTE = 7
++    SHT_NOBITS = 8
++    SHT_REL = 9
++    SHT_SHLIB = 10
++    SHT_DYNSYM = 11
++    SHT_INIT_ARRAY = 14
++    SHT_FINI_ARRAY = 15
++    SHT_PREINIT_ARRAY = 16
++    SHT_GROUP = 17
++    SHT_SYMTAB_SHNDX = 18
++    SHT_GNU_ATTRIBUTES = 0x6ffffff5
++    SHT_GNU_HASH = 0x6ffffff6
++    SHT_GNU_LIBLIST = 0x6ffffff7
++    SHT_CHECKSUM = 0x6ffffff8
++    SHT_SUNW_move = 0x6ffffffa
++    SHT_SUNW_COMDAT = 0x6ffffffb
++    SHT_SUNW_syminfo = 0x6ffffffc
++    SHT_GNU_verdef = 0x6ffffffd
++    SHT_GNU_verneed = 0x6ffffffe
++    SHT_GNU_versym = 0x6fffffff
++
++class ShtALPHA(enum.Enum):
++    """Supplemental SHT_* constants for EM_ALPHA."""
++    SHT_ALPHA_DEBUG = 0x70000001
++    SHT_ALPHA_REGINFO = 0x70000002
++
++class ShtARM(enum.Enum):
++    """Supplemental SHT_* constants for EM_ARM."""
++    SHT_ARM_EXIDX = 0x70000001
++    SHT_ARM_PREEMPTMAP = 0x70000002
++    SHT_ARM_ATTRIBUTES = 0x70000003
++
++class ShtCSKY(enum.Enum):
++    """Supplemental SHT_* constants for EM_CSKY."""
++    SHT_CSKY_ATTRIBUTES = 0x70000001
++
++class ShtIA_64(enum.Enum):
++    """Supplemental SHT_* constants for EM_IA_64."""
++    SHT_IA_64_EXT = 0x70000000
++    SHT_IA_64_UNWIND = 0x70000001
++
++class ShtMIPS(enum.Enum):
++    """Supplemental SHT_* constants for EM_MIPS."""
++    SHT_MIPS_LIBLIST = 0x70000000
++    SHT_MIPS_MSYM = 0x70000001
++    SHT_MIPS_CONFLICT = 0x70000002
++    SHT_MIPS_GPTAB = 0x70000003
++    SHT_MIPS_UCODE = 0x70000004
++    SHT_MIPS_DEBUG = 0x70000005
++    SHT_MIPS_REGINFO = 0x70000006
++    SHT_MIPS_PACKAGE = 0x70000007
++    SHT_MIPS_PACKSYM = 0x70000008
++    SHT_MIPS_RELD = 0x70000009
++    SHT_MIPS_IFACE = 0x7000000b
++    SHT_MIPS_CONTENT = 0x7000000c
++    SHT_MIPS_OPTIONS = 0x7000000d
++    SHT_MIPS_SHDR = 0x70000010
++    SHT_MIPS_FDESC = 0x70000011
++    SHT_MIPS_EXTSYM = 0x70000012
++    SHT_MIPS_DENSE = 0x70000013
++    SHT_MIPS_PDESC = 0x70000014
++    SHT_MIPS_LOCSYM = 0x70000015
++    SHT_MIPS_AUXSYM = 0x70000016
++    SHT_MIPS_OPTSYM = 0x70000017
++    SHT_MIPS_LOCSTR = 0x70000018
++    SHT_MIPS_LINE = 0x70000019
++    SHT_MIPS_RFDESC = 0x7000001a
++    SHT_MIPS_DELTASYM = 0x7000001b
++    SHT_MIPS_DELTAINST = 0x7000001c
++    SHT_MIPS_DELTACLASS = 0x7000001d
++    SHT_MIPS_DWARF = 0x7000001e
++    SHT_MIPS_DELTADECL = 0x7000001f
++    SHT_MIPS_SYMBOL_LIB = 0x70000020
++    SHT_MIPS_EVENTS = 0x70000021
++    SHT_MIPS_TRANSLATE = 0x70000022
++    SHT_MIPS_PIXIE = 0x70000023
++    SHT_MIPS_XLATE = 0x70000024
++    SHT_MIPS_XLATE_DEBUG = 0x70000025
++    SHT_MIPS_WHIRL = 0x70000026
++    SHT_MIPS_EH_REGION = 0x70000027
++    SHT_MIPS_XLATE_OLD = 0x70000028
++    SHT_MIPS_PDR_EXCEPTION = 0x70000029
++    SHT_MIPS_XHASH = 0x7000002b
++
++class ShtPARISC(enum.Enum):
++    """Supplemental SHT_* constants for EM_PARISC."""
++    SHT_PARISC_EXT = 0x70000000
++    SHT_PARISC_UNWIND = 0x70000001
++    SHT_PARISC_DOC = 0x70000002
++
++class Pf(enum.IntFlag):
++    """Program header flags.  Type of Phdr.p_flags values."""
++    PF_X = 1
++    PF_W = 2
++    PF_R = 4
++
++class PfARM(enum.IntFlag):
++    """Supplemental PF_* flags for EM_ARM."""
++    PF_ARM_SB = 0x10000000
++    PF_ARM_PI = 0x20000000
++    PF_ARM_ABS = 0x40000000
++
++class PfPARISC(enum.IntFlag):
++    """Supplemental PF_* flags for EM_PARISC."""
++    PF_HP_PAGE_SIZE = 0x00100000
++    PF_HP_FAR_SHARED = 0x00200000
++    PF_HP_NEAR_SHARED = 0x00400000
++    PF_HP_CODE = 0x01000000
++    PF_HP_MODIFY = 0x02000000
++    PF_HP_LAZYSWAP = 0x04000000
++    PF_HP_SBP = 0x08000000
++
++class PfIA_64(enum.IntFlag):
++    """Supplemental PF_* flags for EM_IA_64."""
++    PF_IA_64_NORECOV = 0x80000000
++
++class PfMIPS(enum.IntFlag):
++    """Supplemental PF_* flags for EM_MIPS."""
++    PF_MIPS_LOCAL = 0x10000000
++
++class Shf(enum.IntFlag):
++    """Section flags.  Type of Shdr.sh_type values."""
++    SHF_WRITE = 1 << 0
++    SHF_ALLOC = 1 << 1
++    SHF_EXECINSTR = 1 << 2
++    SHF_MERGE = 1 << 4
++    SHF_STRINGS = 1 << 5
++    SHF_INFO_LINK = 1 << 6
++    SHF_LINK_ORDER = 1 << 7
++    SHF_OS_NONCONFORMING = 256
++    SHF_GROUP = 1 << 9
++    SHF_TLS = 1 << 10
++    SHF_COMPRESSED = 1 << 11
++    SHF_GNU_RETAIN = 1 << 21
++    SHF_ORDERED = 1 << 30
++    SHF_EXCLUDE = 1 << 31
++
++class ShfALPHA(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_ALPHA."""
++    SHF_ALPHA_GPREL = 0x10000000
++
++class ShfARM(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_ARM."""
++    SHF_ARM_ENTRYSECT = 0x10000000
++    SHF_ARM_COMDEF = 0x80000000
++
++class ShfIA_64(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_IA_64."""
++    SHF_IA_64_SHORT  = 0x10000000
++    SHF_IA_64_NORECOV = 0x20000000
++
++class ShfMIPS(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_MIPS."""
++    SHF_MIPS_GPREL = 0x10000000
++    SHF_MIPS_MERGE = 0x20000000
++    SHF_MIPS_ADDR = 0x40000000
++    SHF_MIPS_STRINGS = 0x80000000
++    SHF_MIPS_NOSTRIP = 0x08000000
++    SHF_MIPS_LOCAL = 0x04000000
++    SHF_MIPS_NAMES = 0x02000000
++    SHF_MIPS_NODUPE = 0x01000000
++
++class ShfPARISC(enum.IntFlag):
++    """Supplemental SHF_* constants for EM_PARISC."""
++    SHF_PARISC_SHORT = 0x20000000
++    SHF_PARISC_HUGE = 0x40000000
++    SHF_PARISC_SBP = 0x80000000
++
++class Stb(_OpenIntEnum):
++    """ELF symbol binding type."""
++    STB_LOCAL = 0
++    STB_GLOBAL = 1
++    STB_WEAK = 2
++    STB_GNU_UNIQUE = 10
++    STB_MIPS_SPLIT_COMMON = 13
++
++class Stt(_OpenIntEnum):
++    """ELF symbol type."""
++    STT_NOTYPE = 0
++    STT_OBJECT = 1
++    STT_FUNC = 2
++    STT_SECTION = 3
++    STT_FILE = 4
++    STT_COMMON = 5
++    STT_TLS = 6
++    STT_GNU_IFUNC = 10
++
++class SttARM(enum.Enum):
++    """Supplemental STT_* constants for EM_ARM."""
++    STT_ARM_TFUNC = 13
++    STT_ARM_16BIT = 15
++
++class SttPARISC(enum.Enum):
++    """Supplemental STT_* constants for EM_PARISC."""
++    STT_HP_OPAQUE = 11
++    STT_HP_STUB = 12
++    STT_PARISC_MILLICODE = 13
++
++class SttSPARC(enum.Enum):
++    """Supplemental STT_* constants for EM_SPARC."""
++    STT_SPARC_REGISTER = 13
++
++class SttX86_64(enum.Enum):
++    """Supplemental STT_* constants for EM_X86_64."""
++    SHT_X86_64_UNWIND = 0x70000001
++
++class Pt(_OpenIntEnum):
++    """ELF program header types.  Type of Phdr.p_type."""
++    PT_NULL = 0
++    PT_LOAD = 1
++    PT_DYNAMIC = 2
++    PT_INTERP = 3
++    PT_NOTE = 4
++    PT_SHLIB = 5
++    PT_PHDR = 6
++    PT_TLS = 7
++    PT_NUM = 8
++    PT_GNU_EH_FRAME = 0x6474e550
++    PT_GNU_STACK = 0x6474e551
++    PT_GNU_RELRO = 0x6474e552
++    PT_GNU_PROPERTY = 0x6474e553
++    PT_SUNWBSS = 0x6ffffffa
++    PT_SUNWSTACK = 0x6ffffffb
++
++class PtARM(enum.Enum):
++    """Supplemental PT_* constants for EM_ARM."""
++    PT_ARM_EXIDX = 0x70000001
++
++class PtIA_64(enum.Enum):
++    """Supplemental PT_* constants for EM_IA_64."""
++    PT_IA_64_HP_OPT_ANOT = 0x60000012
++    PT_IA_64_HP_HSL_ANOT = 0x60000013
++    PT_IA_64_HP_STACK = 0x60000014
++    PT_IA_64_ARCHEXT = 0x70000000
++    PT_IA_64_UNWIND = 0x70000001
++
++class PtMIPS(enum.Enum):
++    """Supplemental PT_* constants for EM_MIPS."""
++    PT_MIPS_REGINFO = 0x70000000
++    PT_MIPS_RTPROC = 0x70000001
++    PT_MIPS_OPTIONS = 0x70000002
++    PT_MIPS_ABIFLAGS = 0x70000003
++
++class PtPARISC(enum.Enum):
++    """Supplemental PT_* constants for EM_PARISC."""
++    PT_HP_TLS = 0x60000000
++    PT_HP_CORE_NONE = 0x60000001
++    PT_HP_CORE_VERSION = 0x60000002
++    PT_HP_CORE_KERNEL = 0x60000003
++    PT_HP_CORE_COMM = 0x60000004
++    PT_HP_CORE_PROC = 0x60000005
++    PT_HP_CORE_LOADABLE = 0x60000006
++    PT_HP_CORE_STACK = 0x60000007
++    PT_HP_CORE_SHM = 0x60000008
++    PT_HP_CORE_MMF = 0x60000009
++    PT_HP_PARALLEL = 0x60000010
++    PT_HP_FASTBIND = 0x60000011
++    PT_HP_OPT_ANNOT = 0x60000012
++    PT_HP_HSL_ANNOT = 0x60000013
++    PT_HP_STACK = 0x60000014
++    PT_PARISC_ARCHEXT = 0x70000000
++    PT_PARISC_UNWIND = 0x70000001
++
++class Dt(_OpenIntEnum):
++    """ELF dynamic segment tags.  Type of Dyn.d_val."""
++    DT_NULL = 0
++    DT_NEEDED = 1
++    DT_PLTRELSZ = 2
++    DT_PLTGOT = 3
++    DT_HASH = 4
++    DT_STRTAB = 5
++    DT_SYMTAB = 6
++    DT_RELA = 7
++    DT_RELASZ = 8
++    DT_RELAENT = 9
++    DT_STRSZ = 10
++    DT_SYMENT = 11
++    DT_INIT = 12
++    DT_FINI = 13
++    DT_SONAME = 14
++    DT_RPATH = 15
++    DT_SYMBOLIC = 16
++    DT_REL = 17
++    DT_RELSZ = 18
++    DT_RELENT = 19
++    DT_PLTREL = 20
++    DT_DEBUG = 21
++    DT_TEXTREL = 22
++    DT_JMPREL = 23
++    DT_BIND_NOW = 24
++    DT_INIT_ARRAY = 25
++    DT_FINI_ARRAY = 26
++    DT_INIT_ARRAYSZ = 27
++    DT_FINI_ARRAYSZ = 28
++    DT_RUNPATH = 29
++    DT_FLAGS = 30
++    DT_PREINIT_ARRAY = 32
++    DT_PREINIT_ARRAYSZ = 33
++    DT_SYMTAB_SHNDX = 34
++    DT_GNU_PRELINKED = 0x6ffffdf5
++    DT_GNU_CONFLICTSZ = 0x6ffffdf6
++    DT_GNU_LIBLISTSZ = 0x6ffffdf7
++    DT_CHECKSUM = 0x6ffffdf8
++    DT_PLTPADSZ = 0x6ffffdf9
++    DT_MOVEENT = 0x6ffffdfa
++    DT_MOVESZ = 0x6ffffdfb
++    DT_FEATURE_1 = 0x6ffffdfc
++    DT_POSFLAG_1 = 0x6ffffdfd
++    DT_SYMINSZ = 0x6ffffdfe
++    DT_SYMINENT = 0x6ffffdff
++    DT_GNU_HASH = 0x6ffffef5
++    DT_TLSDESC_PLT = 0x6ffffef6
++    DT_TLSDESC_GOT = 0x6ffffef7
++    DT_GNU_CONFLICT = 0x6ffffef8
++    DT_GNU_LIBLIST = 0x6ffffef9
++    DT_CONFIG = 0x6ffffefa
++    DT_DEPAUDIT = 0x6ffffefb
++    DT_AUDIT = 0x6ffffefc
++    DT_PLTPAD = 0x6ffffefd
++    DT_MOVETAB = 0x6ffffefe
++    DT_SYMINFO = 0x6ffffeff
++    DT_VERSYM = 0x6ffffff0
++    DT_RELACOUNT = 0x6ffffff9
++    DT_RELCOUNT = 0x6ffffffa
++    DT_FLAGS_1 = 0x6ffffffb
++    DT_VERDEF = 0x6ffffffc
++    DT_VERDEFNUM = 0x6ffffffd
++    DT_VERNEED = 0x6ffffffe
++    DT_VERNEEDNUM = 0x6fffffff
++    DT_AUXILIARY = 0x7ffffffd
++    DT_FILTER = 0x7fffffff
++
++class DtAARCH64(enum.Enum):
++    """Supplemental DT_* constants for EM_AARCH64."""
++    DT_AARCH64_BTI_PLT = 0x70000001
++    DT_AARCH64_PAC_PLT = 0x70000003
++    DT_AARCH64_VARIANT_PCS = 0x70000005
++
++class DtALPHA(enum.Enum):
++    """Supplemental DT_* constants for EM_ALPHA."""
++    DT_ALPHA_PLTRO = 0x70000000
++
++class DtALTERA_NIOS2(enum.Enum):
++    """Supplemental DT_* constants for EM_ALTERA_NIOS2."""
++    DT_NIOS2_GP = 0x70000002
++
++class DtIA_64(enum.Enum):
++    """Supplemental DT_* constants for EM_IA_64."""
++    DT_IA_64_PLT_RESERVE = 0x70000000
++
++class DtMIPS(enum.Enum):
++    """Supplemental DT_* constants for EM_MIPS."""
++    DT_MIPS_RLD_VERSION = 0x70000001
++    DT_MIPS_TIME_STAMP = 0x70000002
++    DT_MIPS_ICHECKSUM = 0x70000003
++    DT_MIPS_IVERSION = 0x70000004
++    DT_MIPS_FLAGS = 0x70000005
++    DT_MIPS_BASE_ADDRESS = 0x70000006
++    DT_MIPS_MSYM = 0x70000007
++    DT_MIPS_CONFLICT = 0x70000008
++    DT_MIPS_LIBLIST = 0x70000009
++    DT_MIPS_LOCAL_GOTNO = 0x7000000a
++    DT_MIPS_CONFLICTNO = 0x7000000b
++    DT_MIPS_LIBLISTNO = 0x70000010
++    DT_MIPS_SYMTABNO = 0x70000011
++    DT_MIPS_UNREFEXTNO = 0x70000012
++    DT_MIPS_GOTSYM = 0x70000013
++    DT_MIPS_HIPAGENO = 0x70000014
++    DT_MIPS_RLD_MAP = 0x70000016
++    DT_MIPS_DELTA_CLASS = 0x70000017
++    DT_MIPS_DELTA_CLASS_NO = 0x70000018
++    DT_MIPS_DELTA_INSTANCE = 0x70000019
++    DT_MIPS_DELTA_INSTANCE_NO = 0x7000001a
++    DT_MIPS_DELTA_RELOC = 0x7000001b
++    DT_MIPS_DELTA_RELOC_NO = 0x7000001c
++    DT_MIPS_DELTA_SYM = 0x7000001d
++    DT_MIPS_DELTA_SYM_NO = 0x7000001e
++    DT_MIPS_DELTA_CLASSSYM = 0x70000020
++    DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021
++    DT_MIPS_CXX_FLAGS = 0x70000022
++    DT_MIPS_PIXIE_INIT = 0x70000023
++    DT_MIPS_SYMBOL_LIB = 0x70000024
++    DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025
++    DT_MIPS_LOCAL_GOTIDX = 0x70000026
++    DT_MIPS_HIDDEN_GOTIDX = 0x70000027
++    DT_MIPS_PROTECTED_GOTIDX = 0x70000028
++    DT_MIPS_OPTIONS = 0x70000029
++    DT_MIPS_INTERFACE = 0x7000002a
++    DT_MIPS_DYNSTR_ALIGN = 0x7000002b
++    DT_MIPS_INTERFACE_SIZE = 0x7000002c
++    DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 0x7000002d
++    DT_MIPS_PERF_SUFFIX = 0x7000002e
++    DT_MIPS_COMPACT_SIZE = 0x7000002f
++    DT_MIPS_GP_VALUE = 0x70000030
++    DT_MIPS_AUX_DYNAMIC = 0x70000031
++    DT_MIPS_PLTGOT = 0x70000032
++    DT_MIPS_RWPLT = 0x70000034
++    DT_MIPS_RLD_MAP_REL = 0x70000035
++    DT_MIPS_XHASH = 0x70000036
++
++class DtPPC(enum.Enum):
++    """Supplemental DT_* constants for EM_PPC."""
++    DT_PPC_GOT = 0x70000000
++    DT_PPC_OPT = 0x70000001
++
++class DtPPC64(enum.Enum):
++    """Supplemental DT_* constants for EM_PPC64."""
++    DT_PPC64_GLINK = 0x70000000
++    DT_PPC64_OPD = 0x70000001
++    DT_PPC64_OPDSZ = 0x70000002
++    DT_PPC64_OPT = 0x70000003
++
++class DtSPARC(enum.Enum):
++    """Supplemental DT_* constants for EM_SPARC."""
++    DT_SPARC_REGISTER = 0x70000001
++
++class StInfo:
++    """ELF symbol binding and type.  Type of the Sym.st_info field."""
++    def __init__(self, arg0, arg1=None):
++        if isinstance(arg0, int) and arg1 is None:
++            self.bind = Stb(arg0 >> 4)
++            self.type = Stt(arg0 & 15)
++        else:
++            self.bind = Stb(arg0)
++            self.type = Stt(arg1)
++
++    def value(self):
++        """Returns the raw value for the bind/type combination."""
++        return (self.bind.value() << 4) | (self.type.value())
++
++# Type in an ELF file.  Used for deserialization.
++_Layout = collections.namedtuple('_Layout', 'unpack size')
++
++def _define_layouts(baseclass: type, layout32: str, layout64: str,
++                    types=None, fields32=None):
++    """Assign variants dict to baseclass.
++
++    The variants dict is indexed by (ElfClass, ElfData) pairs, and its
++    values are _Layout instances.
++
++    """
++    struct32 = struct.Struct(layout32)
++    struct64 = struct.Struct(layout64)
++
++    # Check that the struct formats yield the right number of components.
++    for s in (struct32, struct64):
++        example = s.unpack(b' ' * s.size)
++        if len(example) != len(baseclass._fields):
++            raise ValueError('{!r} yields wrong field count: {} != {}'.format(
++                s.format, len(example),  len(baseclass._fields)))
++
++    # Check that field names in types are correct.
++    if types is None:
++        types = ()
++    for n in types:
++        if n not in baseclass._fields:
++            raise ValueError('{} does not have field {!r}'.format(
++                baseclass.__name__, n))
++
++    if fields32 is not None \
++       and set(fields32) != set(baseclass._fields):
++        raise ValueError('{!r} is not a permutation of the fields {!r}'.format(
++            fields32, baseclass._fields))
++
++    def unique_name(name, used_names = (set((baseclass.__name__,))
++                                        | set(baseclass._fields)
++                                        | {n.__name__
++                                           for n in (types or {}).values()})):
++        """Find a name that is not used for a class or field name."""
++        candidate = name
++        n = 0
++        while candidate in used_names:
++            n += 1
++            candidate = '{}{}'.format(name, n)
++        used_names.add(candidate)
++        return candidate
++
++    blob_name = unique_name('blob')
++    struct_unpack_name = unique_name('struct_unpack')
++    comps_name = unique_name('comps')
++
++    layouts = {}
++    for (bits, elfclass, layout, fields) in (
++            (32, ElfClass.ELFCLASS32, layout32, fields32),
++            (64, ElfClass.ELFCLASS64, layout64, None),
++    ):
++        for (elfdata, structprefix, funcsuffix) in (
++                (ElfData.ELFDATA2LSB, '<', 'LE'),
++                (ElfData.ELFDATA2MSB, '>', 'BE'),
++        ):
++            env = {
++                baseclass.__name__: baseclass,
++                struct_unpack_name: struct.unpack,
++            }
++
++            # Add the type converters.
++            if types:
++                for cls in types.values():
++                    env[cls.__name__] = cls
++
++            funcname = ''.join(
++                ('unpack_', baseclass.__name__, str(bits), funcsuffix))
++
++            code = '''
++def {funcname}({blob_name}):
++'''.format(funcname=funcname, blob_name=blob_name)
++
++            indent = ' ' * 4
++            unpack_call = '{}({!r}, {})'.format(
++                struct_unpack_name, structprefix + layout, blob_name)
++            field_names = ', '.join(baseclass._fields)
++            if types is None and fields is None:
++                code += '{}return {}({})\n'.format(
++                    indent, baseclass.__name__, unpack_call)
++            else:
++                # Destructuring tuple assignment.
++                if fields is None:
++                    code += '{}{} = {}\n'.format(
++                        indent, field_names, unpack_call)
++                else:
++                    # Use custom field order.
++                    code += '{}{} = {}\n'.format(
++                        indent, ', '.join(fields), unpack_call)
++
++                # Perform the type conversions.
++                for n in baseclass._fields:
++                    if n in types:
++                        code += '{}{} = {}({})\n'.format(
++                            indent, n, types[n].__name__, n)
++                # Create the named tuple.
++                code += '{}return {}({})\n'.format(
++                    indent, baseclass.__name__, field_names)
++
++            exec(code, env)
++            layouts[(elfclass, elfdata)] = _Layout(
++                env[funcname], struct.calcsize(layout))
++    baseclass.layouts = layouts
++
++
++# Corresponds to EI_* indices into Elf*_Ehdr.e_indent.
++class Ident(collections.namedtuple('Ident',
++    'ei_mag ei_class ei_data ei_version ei_osabi ei_abiversion ei_pad')):
++
++    def __new__(cls, *args):
++        """Construct an object from a blob or its constituent fields."""
++        if len(args) == 1:
++            return cls.unpack(args[0])
++        return cls.__base__.__new__(cls, *args)
++
++    @staticmethod
++    def unpack(blob: memoryview) -> 'Ident':
++        """Parse raws data into a tuple."""
++        ei_mag, ei_class, ei_data, ei_version, ei_osabi, ei_abiversion, \
++            ei_pad = struct.unpack('4s5B7s', blob)
++        return Ident(ei_mag, ElfClass(ei_class), ElfData(ei_data),
++                     ei_version, ei_osabi, ei_abiversion, ei_pad)
++    size = 16
++
++# Corresponds to Elf32_Ehdr and Elf64_Ehdr.
++Ehdr = collections.namedtuple('Ehdr',
++   'e_ident e_type e_machine e_version e_entry e_phoff e_shoff e_flags'
++    + ' e_ehsize e_phentsize e_phnum e_shentsize e_shnum e_shstrndx')
++_define_layouts(Ehdr,
++                layout32='16s2H5I6H',
++                layout64='16s2HI3QI6H',
++                types=dict(e_ident=Ident,
++                           e_machine=Machine,
++                           e_type=Et,
++                           e_shstrndx=Shn))
++
++# Corresponds to Elf32_Phdr and Elf64_Pdhr.  Order follows the latter.
++Phdr = collections.namedtuple('Phdr',
++    'p_type p_flags p_offset p_vaddr p_paddr p_filesz p_memsz p_align')
++_define_layouts(Phdr,
++                layout32='8I',
++                fields32=('p_type', 'p_offset', 'p_vaddr', 'p_paddr',
++                          'p_filesz', 'p_memsz', 'p_flags', 'p_align'),
++                layout64='2I6Q',
++            types=dict(p_type=Pt, p_flags=Pf))
++
++
++# Corresponds to Elf32_Shdr and Elf64_Shdr.
++class Shdr(collections.namedtuple('Shdr',
++    'sh_name sh_type sh_flags sh_addr sh_offset sh_size sh_link sh_info'
++    + ' sh_addralign sh_entsize')):
++    def resolve(self, strtab: 'StringTable') -> 'Shdr':
++        """Resolve sh_name using a string table."""
++        return self.__class__(strtab.get(self[0]), *self[1:])
++_define_layouts(Shdr,
++                layout32='10I',
++                layout64='2I4Q2I2Q',
++                types=dict(sh_type=Sht,
++                           sh_flags=Shf,
++                           sh_link=Shn))
++
++# Corresponds to Elf32_Dyn and Elf64_Dyn.  The nesting through the
++# d_un union is skipped, and d_ptr is missing (its representation in
++# Python would be identical to d_val).
++Dyn = collections.namedtuple('Dyn', 'd_tag d_val')
++_define_layouts(Dyn,
++                layout32='2i',
++                layout64='2q',
++                types=dict(d_tag=Dt))
++
++# Corresponds to Elf32_Sym and Elf64_Sym.
++class Sym(collections.namedtuple('Sym',
++    'st_name st_info st_other st_shndx st_value st_size')):
++    def resolve(self, strtab: 'StringTable') -> 'Sym':
++        """Resolve st_name using a string table."""
++        return self.__class__(strtab.get(self[0]), *self[1:])
++_define_layouts(Sym,
++                layout32='3I2BH',
++                layout64='I2BH2Q',
++                fields32=('st_name', 'st_value', 'st_size', 'st_info',
++                          'st_other', 'st_shndx'),
++                types=dict(st_shndx=Shn,
++                           st_info=StInfo))
++
++# Corresponds to Elf32_Rel and Elf64_Rel.
++Rel = collections.namedtuple('Rel', 'r_offset r_info')
++_define_layouts(Rel,
++                layout32='2I',
++                layout64='2Q')
++
++# Corresponds to Elf32_Rel and Elf64_Rel.
++Rela = collections.namedtuple('Rela', 'r_offset r_info r_addend')
++_define_layouts(Rela,
++                layout32='3I',
++                layout64='3Q')
++
++class StringTable:
++    """ELF string table."""
++    def __init__(self, blob):
++        """Create a new string table backed by the data in the blob.
++
++        blob: a memoryview-like object
++
++        """
++        self.blob = blob
++
++    def get(self, index) -> bytes:
++        """Returns the null-terminated byte string at the index."""
++        blob = self.blob
++        endindex = index
++        while True:
++            if blob[endindex] == 0:
++                return bytes(blob[index:endindex])
++            endindex += 1
++
++class Image:
++    """ELF image parser."""
++    def __init__(self, image):
++        """Create an ELF image from binary image data.
++
++        image: a memoryview-like object that supports efficient range
++        subscripting.
++
++        """
++        self.image = image
++        ident = self.read(Ident, 0)
++        classdata = (ident.ei_class, ident.ei_data)
++        # Set self.Ehdr etc. to the subtypes with the right parsers.
++        for typ in (Ehdr, Phdr, Shdr, Dyn, Sym, Rel, Rela):
++            setattr(self, typ.__name__, typ.layouts.get(classdata, None))
++
++        if self.Ehdr is not None:
++            self.ehdr = self.read(self.Ehdr, 0)
++            self._shdr_num = self._compute_shdr_num()
++        else:
++            self.ehdr = None
++            self._shdr_num = 0
++
++        self._section = {}
++        self._stringtab = {}
++
++        if self._shdr_num > 0:
++            self._shdr_strtab = self._find_shdr_strtab()
++        else:
++            self._shdr_strtab = None
++
++    @staticmethod
++    def readfile(path: str) -> 'Image':
++        """Reads the ELF file at the specified path."""
++        with open(path, 'rb') as inp:
++            return Image(memoryview(inp.read()))
++
++    def _compute_shdr_num(self) -> int:
++        """Computes the actual number of section headers."""
++        shnum = self.ehdr.e_shnum
++        if shnum == 0:
++            if self.ehdr.e_shoff == 0 or self.ehdr.e_shentsize == 0:
++                # No section headers.
++                return 0
++            # Otherwise the extension mechanism is used (which may be
++            # needed because e_shnum is just 16 bits).
++            return self.read(self.Shdr, self.ehdr.e_shoff).sh_size
++        return shnum
++
++    def _find_shdr_strtab(self) -> StringTable:
++        """Finds the section header string table (maybe via extensions)."""
++        shstrndx = self.ehdr.e_shstrndx
++        if shstrndx == Shn.SHN_XINDEX:
++            shstrndx = self.read(self.Shdr, self.ehdr.e_shoff).sh_link
++        return self._find_stringtab(shstrndx)
++
++    def read(self, typ: type, offset:int ):
++        """Reads an object at a specific offset.
++
++        The type must have been enhanced using _define_variants.
++
++        """
++        return typ.unpack(self.image[offset: offset + typ.size])
++
++    def phdrs(self) -> Phdr:
++        """Generator iterating over the program headers."""
++        if self.ehdr is None:
++            return
++        size = self.ehdr.e_phentsize
++        if size != self.Phdr.size:
++            raise ValueError('Unexpected Phdr size in ELF header: {} != {}'
++                             .format(size, self.Phdr.size))
++
++        offset = self.ehdr.e_phoff
++        for _ in range(self.ehdr.e_phnum):
++            yield self.read(self.Phdr, offset)
++            offset += size
++
++    def shdrs(self, resolve: bool=True) -> Shdr:
++        """Generator iterating over the section headers.
++
++        If resolve, section names are automatically translated
++        using the section header string table.
++
++        """
++        if self._shdr_num == 0:
++            return
++
++        size = self.ehdr.e_shentsize
++        if size != self.Shdr.size:
++            raise ValueError('Unexpected Shdr size in ELF header: {} != {}'
++                             .format(size, self.Shdr.size))
++
++        offset = self.ehdr.e_shoff
++        for _ in range(self._shdr_num):
++            shdr = self.read(self.Shdr, offset)
++            if resolve:
++                shdr = shdr.resolve(self._shdr_strtab)
++            yield shdr
++            offset += size
++
++    def dynamic(self) -> Dyn:
++        """Generator iterating over the dynamic segment."""
++        for phdr in self.phdrs():
++            if phdr.p_type == Pt.PT_DYNAMIC:
++                # Pick the first dynamic segment, like the loader.
++                if phdr.p_filesz == 0:
++                    # Probably separated debuginfo.
++                    return
++                offset = phdr.p_offset
++                end = offset + phdr.p_memsz
++                size = self.Dyn.size
++                while True:
++                    next_offset = offset + size
++                    if next_offset > end:
++                        raise ValueError(
++                            'Dynamic segment size {} is not a multiple of Dyn size {}'.format(
++                                phdr.p_memsz, size))
++                    yield self.read(self.Dyn, offset)
++                    if next_offset == end:
++                        return
++                    offset = next_offset
++
++    def syms(self, shdr: Shdr, resolve: bool=True) -> Sym:
++        """A generator iterating over a symbol table.
++
++        If resolve, symbol names are automatically translated using
++        the string table for the symbol table.
++
++        """
++        assert shdr.sh_type == Sht.SHT_SYMTAB
++        size = shdr.sh_entsize
++        if size != self.Sym.size:
++            raise ValueError('Invalid symbol table entry size {}'.format(size))
++        offset = shdr.sh_offset
++        end = shdr.sh_offset + shdr.sh_size
++        if resolve:
++            strtab = self._find_stringtab(shdr.sh_link)
++        while offset < end:
++            sym = self.read(self.Sym, offset)
++            if resolve:
++                sym = sym.resolve(strtab)
++            yield sym
++            offset += size
++        if offset != end:
++            raise ValueError('Symbol table is not a multiple of entry size')
++
++    def lookup_string(self, strtab_index: int, strtab_offset: int) -> bytes:
++        """Looks up a string in a string table identified by its link index."""
++        try:
++            strtab = self._stringtab[strtab_index]
++        except KeyError:
++            strtab = self._find_stringtab(strtab_index)
++        return strtab.get(strtab_offset)
++
++    def find_section(self, shndx: Shn) -> Shdr:
++        """Returns the section header for the indexed section.
++
++        The section name is not resolved.
++        """
++        try:
++            return self._section[shndx]
++        except KeyError:
++            pass
++        if shndx in Shn:
++            raise ValueError('Reserved section index {}'.format(shndx))
++        idx = shndx.value
++        if idx < 0 or idx > self._shdr_num:
++            raise ValueError('Section index {} out of range [0, {})'.format(
++                idx, self._shdr_num))
++        shdr = self.read(
++            self.Shdr, self.ehdr.e_shoff + idx * self.Shdr.size)
++        self._section[shndx] = shdr
++        return shdr
++
++    def _find_stringtab(self, sh_link: int) -> StringTable:
++        if sh_link in self._stringtab:
++            return self._stringtab
++        if sh_link < 0 or sh_link >= self._shdr_num:
++            raise ValueError('Section index {} out of range [0, {})'.format(
++                sh_link, self._shdr_num))
++        shdr = self.read(
++            self.Shdr, self.ehdr.e_shoff + sh_link * self.Shdr.size)
++        if shdr.sh_type != Sht.SHT_STRTAB:
++            raise ValueError(
++                'Section {} is not a string table: {}'.format(
++                    sh_link, shdr.sh_type))
++        strtab = StringTable(
++            self.image[shdr.sh_offset:shdr.sh_offset + shdr.sh_size])
++        # This could retrain essentially arbitrary amounts of data,
++        # but caching string tables seems important for performance.
++        self._stringtab[sh_link] = strtab
++        return strtab
++
++
++__all__ = [name for name in dir() if name[0].isupper()]
diff --git a/SOURCES/glibc-rh2109510-11.patch b/SOURCES/glibc-rh2109510-11.patch
new file mode 100644
index 0000000..c7e08fc
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-11.patch
@@ -0,0 +1,409 @@
+commit 198abcbb94618730dae1b3f4393efaa49e0ec8c7
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Mon Apr 11 11:30:31 2022 +0200
+
+    Default to --with-default-link=no (bug 25812)
+
+    This is necessary to place the libio vtables into the RELRO segment.
+    New tests elf/tst-relro-ldso and elf/tst-relro-libc are added to
+    verify that this is what actually happens.
+
+    The new tests fail on ia64 due to lack of (default) RELRO support
+    inbutils, so they are XFAILed there.
+
+Conflicts:
+	elf/Makefile
+	  (missing valgrind smoke test)
+
+diff --git a/INSTALL b/INSTALL
+index b3a4370f592c5047..b69672b283c0b774 100644
+--- a/INSTALL
++++ b/INSTALL
+@@ -90,6 +90,12 @@ if 'CFLAGS' is specified it must enable optimization.  For example:
+      library will still be usable, but functionality may be lost--for
+      example, you can't build a shared libc with old binutils.
+ 
++'--with-default-link=FLAG'
++     With '--with-default-link=yes', the build system does not use a
++     custom linker script for linking shared objects.  The default for
++     FLAG is the opposite, 'no', because the custom linker script is
++     needed for full RELRO protection.
++
+ '--with-nonshared-cflags=CFLAGS'
+      Use additional compiler flags CFLAGS to build the parts of the
+      library which are always statically linked into applications and
+diff --git a/configure b/configure
+index 8b3681d2e28310c8..c794cea4359b3da3 100755
+--- a/configure
++++ b/configure
+@@ -3339,7 +3339,7 @@ fi
+ if test "${with_default_link+set}" = set; then :
+   withval=$with_default_link; use_default_link=$withval
+ else
+-  use_default_link=default
++  use_default_link=no
+ fi
+ 
+ 
+@@ -5965,69 +5965,6 @@ fi
+ $as_echo "$libc_cv_hashstyle" >&6; }
+ 
+ 
+-# The linker's default -shared behavior is good enough if it
+-# does these things that our custom linker scripts ensure that
+-# all allocated NOTE sections come first.
+-if test "$use_default_link" = default; then
+-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sufficient default -shared layout" >&5
+-$as_echo_n "checking for sufficient default -shared layout... " >&6; }
+-if ${libc_cv_use_default_link+:} false; then :
+-  $as_echo_n "(cached) " >&6
+-else
+-    libc_cv_use_default_link=no
+-  cat > conftest.s <<\EOF
+-	  .section .note.a,"a",%note
+-	  .balign 4
+-	  .long 4,4,9
+-	  .string "GNU"
+-	  .string "foo"
+-	  .section .note.b,"a",%note
+-	  .balign 4
+-	  .long 4,4,9
+-	  .string "GNU"
+-	  .string "bar"
+-EOF
+-  if { ac_try='  ${CC-cc} $ASFLAGS -shared -o conftest.so conftest.s 1>&5'
+-  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+-  (eval $ac_try) 2>&5
+-  ac_status=$?
+-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }; } &&
+-       ac_try=`$READELF -S conftest.so | sed -n \
+-	 '${x;p;}
+-	  s/^ *\[ *[1-9][0-9]*\]  *\([^ ][^ ]*\)  *\([^ ][^ ]*\) .*$/\2 \1/
+-	  t a
+-	  b
+-	  : a
+-	  H'`
+-  then
+-    libc_seen_a=no libc_seen_b=no
+-    set -- $ac_try
+-    while test $# -ge 2 -a "$1" = NOTE; do
+-      case "$2" in
+-      .note.a) libc_seen_a=yes ;;
+-      .note.b) libc_seen_b=yes ;;
+-      esac
+-      shift 2
+-    done
+-    case "$libc_seen_a$libc_seen_b" in
+-    yesyes)
+-      libc_cv_use_default_link=yes
+-      ;;
+-    *)
+-      echo >&5 "\
+-$libc_seen_a$libc_seen_b from:
+-$ac_try"
+-      ;;
+-    esac
+-  fi
+-  rm -f conftest*
+-fi
+-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_use_default_link" >&5
+-$as_echo "$libc_cv_use_default_link" >&6; }
+-  use_default_link=$libc_cv_use_default_link
+-fi
+-
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOB_DAT reloc" >&5
+ $as_echo_n "checking for GLOB_DAT reloc... " >&6; }
+ if ${libc_cv_has_glob_dat+:} false; then :
+diff --git a/configure.ac b/configure.ac
+index 82d9ab2fb67145bb..52429d82344954b3 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -152,7 +152,7 @@ AC_ARG_WITH([default-link],
+ 	    AC_HELP_STRING([--with-default-link],
+ 			   [do not use explicit linker scripts]),
+ 	    [use_default_link=$withval],
+-	    [use_default_link=default])
++	    [use_default_link=no])
+ 
+ dnl Additional build flags injection.
+ AC_ARG_WITH([nonshared-cflags],
+@@ -1352,59 +1352,6 @@ fi
+ rm -f conftest*])
+ AC_SUBST(libc_cv_hashstyle)
+ 
+-# The linker's default -shared behavior is good enough if it
+-# does these things that our custom linker scripts ensure that
+-# all allocated NOTE sections come first.
+-if test "$use_default_link" = default; then
+-  AC_CACHE_CHECK([for sufficient default -shared layout],
+-		  libc_cv_use_default_link, [dnl
+-  libc_cv_use_default_link=no
+-  cat > conftest.s <<\EOF
+-	  .section .note.a,"a",%note
+-	  .balign 4
+-	  .long 4,4,9
+-	  .string "GNU"
+-	  .string "foo"
+-	  .section .note.b,"a",%note
+-	  .balign 4
+-	  .long 4,4,9
+-	  .string "GNU"
+-	  .string "bar"
+-EOF
+-  if AC_TRY_COMMAND([dnl
+-  ${CC-cc} $ASFLAGS -shared -o conftest.so conftest.s 1>&AS_MESSAGE_LOG_FD]) &&
+-       ac_try=`$READELF -S conftest.so | sed -n \
+-	 ['${x;p;}
+-	  s/^ *\[ *[1-9][0-9]*\]  *\([^ ][^ ]*\)  *\([^ ][^ ]*\) .*$/\2 \1/
+-	  t a
+-	  b
+-	  : a
+-	  H']`
+-  then
+-    libc_seen_a=no libc_seen_b=no
+-    set -- $ac_try
+-    while test $# -ge 2 -a "$1" = NOTE; do
+-      case "$2" in
+-      .note.a) libc_seen_a=yes ;;
+-      .note.b) libc_seen_b=yes ;;
+-      esac
+-      shift 2
+-    done
+-    case "$libc_seen_a$libc_seen_b" in
+-    yesyes)
+-      libc_cv_use_default_link=yes
+-      ;;
+-    *)
+-      echo >&AS_MESSAGE_LOG_FD "\
+-$libc_seen_a$libc_seen_b from:
+-$ac_try"
+-      ;;
+-    esac
+-  fi
+-  rm -f conftest*])
+-  use_default_link=$libc_cv_use_default_link
+-fi
+-
+ AC_CACHE_CHECK(for GLOB_DAT reloc,
+ 	       libc_cv_has_glob_dat, [dnl
+ cat > conftest.c <<EOF
+diff --git a/elf/Makefile b/elf/Makefile
+index 89ce4f5196e5eb39..1fdf40cbd49e233e 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -477,6 +477,40 @@ tests-execstack-yes = \
+   # tests-execstack-yes
+ endif
+ endif
++
++tests-special += $(objpfx)tst-relro-ldso.out $(objpfx)tst-relro-libc.out
++$(objpfx)tst-relro-ldso.out: tst-relro-symbols.py $(..)/scripts/glibcelf.py \
++  $(objpfx)ld.so
++	$(PYTHON) tst-relro-symbols.py $(objpfx)ld.so \
++	  --required=_rtld_global_ro \
++	  > $@ 2>&1; $(evaluate-test)
++# The optional symbols are present in libc only if the architecture has
++# the GLIBC_2.0 symbol set in libc.
++$(objpfx)tst-relro-libc.out: tst-relro-symbols.py $(..)/scripts/glibcelf.py \
++  $(common-objpfx)libc.so
++	$(PYTHON) tst-relro-symbols.py $(common-objpfx)libc.so \
++	    --required=_IO_cookie_jumps \
++	    --required=_IO_file_jumps \
++	    --required=_IO_file_jumps_maybe_mmap \
++	    --required=_IO_file_jumps_mmap \
++	    --required=_IO_helper_jumps \
++	    --required=_IO_mem_jumps \
++	    --required=_IO_obstack_jumps \
++	    --required=_IO_proc_jumps \
++	    --required=_IO_str_chk_jumps \
++	    --required=_IO_str_jumps \
++	    --required=_IO_strn_jumps \
++	    --required=_IO_wfile_jumps \
++	    --required=_IO_wfile_jumps_maybe_mmap \
++	    --required=_IO_wfile_jumps_mmap \
++	    --required=_IO_wmem_jumps \
++	    --required=_IO_wstr_jumps \
++	    --required=_IO_wstrn_jumps \
++	    --optional=_IO_old_cookie_jumps \
++	    --optional=_IO_old_file_jumps \
++	    --optional=_IO_old_proc_jumps \
++	  > $@ 2>&1; $(evaluate-test)
++
+ tests += $(tests-execstack-$(have-z-execstack))
+ ifeq ($(run-built-tests),yes)
+ tests-special += \
+diff --git a/elf/tst-relro-symbols.py b/elf/tst-relro-symbols.py
+new file mode 100644
+index 0000000000000000..368ea3349f86bd81
+--- /dev/null
++++ b/elf/tst-relro-symbols.py
+@@ -0,0 +1,137 @@
++#!/usr/bin/python3
++# Verify that certain symbols are covered by RELRO.
++# Copyright (C) 2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++"""Analyze a (shared) object to verify that certain symbols are
++present and covered by the PT_GNU_RELRO segment.
++
++"""
++
++import argparse
++import os.path
++import sys
++
++# Make available glibc Python modules.
++sys.path.append(os.path.join(
++    os.path.dirname(os.path.realpath(__file__)), os.path.pardir, 'scripts'))
++
++import glibcelf
++
++def find_relro(path: str, img: glibcelf.Image) -> (int, int):
++    """Discover the address range of the PT_GNU_RELRO segment."""
++    for phdr in img.phdrs():
++        if phdr.p_type == glibcelf.Pt.PT_GNU_RELRO:
++            # The computation is not entirely accurate because
++            # _dl_protect_relro in elf/dl-reloc.c rounds both the
++            # start end and downwards using the run-time page size.
++            return phdr.p_vaddr, phdr.p_vaddr + phdr.p_memsz
++    sys.stdout.write('{}: error: no PT_GNU_RELRO segment\n'.format(path))
++    sys.exit(1)
++
++def check_in_relro(kind, relro_begin, relro_end, name, start, size, error):
++    """Check if a section or symbol falls within in the RELRO segment."""
++    end = start + size - 1
++    if not (relro_begin <= start < end < relro_end):
++        error(
++            '{} {!r} of size {} at 0x{:x} is not in RELRO range [0x{:x}, 0x{:x})'.format(
++                kind, name.decode('UTF-8'), start, size,
++                relro_begin, relro_end))
++
++def get_parser():
++    """Return an argument parser for this script."""
++    parser = argparse.ArgumentParser(description=__doc__)
++    parser.add_argument('object', help='path to object file to check')
++    parser.add_argument('--required', metavar='NAME', default=(),
++                        help='required symbol names', nargs='*')
++    parser.add_argument('--optional', metavar='NAME', default=(),
++                        help='required symbol names', nargs='*')
++    return parser
++
++def main(argv):
++    """The main entry point."""
++    parser = get_parser()
++    opts = parser.parse_args(argv)
++    img = glibcelf.Image.readfile(opts.object)
++
++    required_symbols = frozenset([sym.encode('UTF-8')
++                                  for sym in opts.required])
++    optional_symbols = frozenset([sym.encode('UTF-8')
++                                  for sym in opts.optional])
++    check_symbols = required_symbols | optional_symbols
++
++    # Tracks the symbols in check_symbols that have been found.
++    symbols_found = set()
++
++    # Discover the extent of the RELRO segment.
++    relro_begin, relro_end = find_relro(opts.object, img)
++    symbol_table_found = False
++
++    errors = False
++    def error(msg: str) -> None:
++        """Record an error condition and write a message to standard output."""
++        nonlocal errors
++        errors = True
++        sys.stdout.write('{}: error: {}\n'.format(opts.object, msg))
++
++    # Iterate over section headers to find the symbol table.
++    for shdr in img.shdrs():
++        if shdr.sh_type == glibcelf.Sht.SHT_SYMTAB:
++            symbol_table_found = True
++            for sym in img.syms(shdr):
++                if sym.st_name in check_symbols:
++                    symbols_found.add(sym.st_name)
++
++                    # Validate symbol type, section, and size.
++                    if sym.st_info.type != glibcelf.Stt.STT_OBJECT:
++                        error('symbol {!r} has wrong type {}'.format(
++                            sym.st_name.decode('UTF-8'), sym.st_info.type))
++                    if sym.st_shndx in glibcelf.Shn:
++                        error('symbol {!r} has reserved section {}'.format(
++                            sym.st_name.decode('UTF-8'), sym.st_shndx))
++                        continue
++                    if sym.st_size == 0:
++                        error('symbol {!r} has size zero'.format(
++                            sym.st_name.decode('UTF-8')))
++                        continue
++
++                    check_in_relro('symbol', relro_begin, relro_end,
++                                   sym.st_name, sym.st_value, sym.st_size,
++                                   error)
++            continue # SHT_SYMTAB
++        if shdr.sh_name == b'.data.rel.ro' \
++           or shdr.sh_name.startswith(b'.data.rel.ro.'):
++            check_in_relro('section', relro_begin, relro_end,
++                           shdr.sh_name, shdr.sh_addr, shdr.sh_size,
++                           error)
++            continue
++
++    if required_symbols - symbols_found:
++        for sym in sorted(required_symbols - symbols_found):
++            error('symbol {!r} not found'.format(sym.decode('UTF-8')))
++
++    if errors:
++        sys.exit(1)
++
++    if not symbol_table_found:
++        sys.stdout.write(
++            '{}: warning: no symbol table found (stripped object)\n'.format(
++                opts.object))
++        sys.exit(77)
++
++if __name__ == '__main__':
++    main(sys.argv[1:])
+diff --git a/manual/install.texi b/manual/install.texi
+index c262fd56d0cef67b..a2c43bd692de7825 100644
+--- a/manual/install.texi
++++ b/manual/install.texi
+@@ -117,6 +117,12 @@ problem and suppress these constructs, so that the library will still be
+ usable, but functionality may be lost---for example, you can't build a
+ shared libc with old binutils.
+ 
++@item --with-default-link=@var{FLAG}
++With @code{--with-default-link=yes}, the build system does not use a
++custom linker script for linking shared objects.  The default for
++@var{FLAG} is the opposite, @samp{no}, because the custom linker script
++is needed for full RELRO protection.
++
+ @item --with-nonshared-cflags=@var{cflags}
+ Use additional compiler flags @var{cflags} to build the parts of the
+ library which are always statically linked into applications and
+diff --git a/sysdeps/unix/sysv/linux/ia64/Makefile b/sysdeps/unix/sysv/linux/ia64/Makefile
+index 97fc7df0b122d6a0..b1ad1ab7b1efa34c 100644
+--- a/sysdeps/unix/sysv/linux/ia64/Makefile
++++ b/sysdeps/unix/sysv/linux/ia64/Makefile
+@@ -1,3 +1,9 @@
++ifeq ($(subdir),elf)
++# ia64 does not support PT_GNU_RELRO.
++test-xfail-tst-relro-ldso = yes
++test-xfail-tst-relro-libc = yes
++endif
++
+ ifeq ($(subdir),misc)
+ sysdep_headers += sys/rse.h
+ endif
diff --git a/SOURCES/glibc-rh2109510-12.patch b/SOURCES/glibc-rh2109510-12.patch
new file mode 100644
index 0000000..a580b1b
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-12.patch
@@ -0,0 +1,26 @@
+commit b571f3adffdcbed23f35ea39b0ca43809dbb4f5b
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Fri Apr 22 19:34:52 2022 +0200
+
+    scripts/glibcelf.py: Mark as UNSUPPORTED on Python 3.5 and earlier
+    
+    enum.IntFlag and enum.EnumMeta._missing_ support are not part of
+    earlier Python versions.
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 8f7d0ca184845714..da0d5380f33a195e 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -28,6 +28,12 @@ import collections
+ import enum
+ import struct
+ 
++if not hasattr(enum, 'IntFlag'):
++    import sys
++    sys.stdout.write(
++        'warning: glibcelf.py needs Python 3.6 for enum support\n')
++    sys.exit(77)
++
+ class _OpenIntEnum(enum.IntEnum):
+     """Integer enumeration that supports arbitrary int values."""
+     @classmethod
diff --git a/SOURCES/glibc-rh2109510-13.patch b/SOURCES/glibc-rh2109510-13.patch
new file mode 100644
index 0000000..8589a81
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-13.patch
@@ -0,0 +1,30 @@
+Partial backport of the scripts/glibcelf.py part of:
+
+commit 4610b24f5e4e6d2c4b769594efa6d460943163bb
+Author: H.J. Lu <hjl.tools@gmail.com>
+Date:   Tue Mar 29 14:08:54 2022 -0700
+
+    elf: Define DT_RELR related macros and types
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index da0d5380f33a195e..f847b36c55c15b8a 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -304,6 +304,7 @@ class Sht(_OpenIntEnum):
+     SHT_PREINIT_ARRAY = 16
+     SHT_GROUP = 17
+     SHT_SYMTAB_SHNDX = 18
++    SHT_RELR = 19
+     SHT_GNU_ATTRIBUTES = 0x6ffffff5
+     SHT_GNU_HASH = 0x6ffffff6
+     SHT_GNU_LIBLIST = 0x6ffffff7
+@@ -593,6 +594,9 @@ class Dt(_OpenIntEnum):
+     DT_PREINIT_ARRAY = 32
+     DT_PREINIT_ARRAYSZ = 33
+     DT_SYMTAB_SHNDX = 34
++    DT_RELRSZ = 35
++    DT_RELR = 36
++    DT_RELRENT = 37
+     DT_GNU_PRELINKED = 0x6ffffdf5
+     DT_GNU_CONFLICTSZ = 0x6ffffdf6
+     DT_GNU_LIBLISTSZ = 0x6ffffdf7
diff --git a/SOURCES/glibc-rh2109510-14.patch b/SOURCES/glibc-rh2109510-14.patch
new file mode 100644
index 0000000..9448450
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-14.patch
@@ -0,0 +1,50 @@
+commit d055481ce39d03652ac60de5078889e15b6917ff
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Mon May 16 21:59:24 2022 +0200
+
+    scripts/glibcelf.py: Add *T_RISCV_* constants
+    
+    SHT_RISCV_ATTRIBUTES, PT_RISCV_ATTRIBUTES, DT_RISCV_VARIANT_CC were
+    added in commit 0b6c6750732483b4d59c2fcb45484079cd84157d
+    ("Update RISC-V specific ELF definitions").  This caused the
+    elf/tst-glibcelf consistency check to fail.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index f847b36c55c15b8a..07bef940433b4c99 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -385,6 +385,10 @@ class ShtPARISC(enum.Enum):
+     SHT_PARISC_UNWIND = 0x70000001
+     SHT_PARISC_DOC = 0x70000002
+ 
++class ShtRISCV(enum.Enum):
++    """Supplemental SHT_* constants for EM_RISCV."""
++    SHT_RISCV_ATTRIBUTES = 0x70000003
++
+ class Pf(enum.IntFlag):
+     """Program header flags.  Type of Phdr.p_flags values."""
+     PF_X = 1
+@@ -558,6 +562,10 @@ class PtPARISC(enum.Enum):
+     PT_PARISC_ARCHEXT = 0x70000000
+     PT_PARISC_UNWIND = 0x70000001
+ 
++class PtRISCV(enum.Enum):
++    """Supplemental PT_* constants for EM_RISCV."""
++    PT_RISCV_ATTRIBUTES = 0x70000003
++
+ class Dt(_OpenIntEnum):
+     """ELF dynamic segment tags.  Type of Dyn.d_val."""
+     DT_NULL = 0
+@@ -710,6 +718,10 @@ class DtPPC64(enum.Enum):
+     DT_PPC64_OPDSZ = 0x70000002
+     DT_PPC64_OPT = 0x70000003
+ 
++class DtRISCV(enum.Enum):
++    """Supplemental DT_* constants for EM_RISCV."""
++    DT_RISCV_VARIANT_CC = 0x70000001
++
+ class DtSPARC(enum.Enum):
+     """Supplemental DT_* constants for EM_SPARC."""
+     DT_SPARC_REGISTER = 0x70000001
diff --git a/SOURCES/glibc-rh2109510-15.patch b/SOURCES/glibc-rh2109510-15.patch
new file mode 100644
index 0000000..7979be8
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-15.patch
@@ -0,0 +1,26 @@
+commit 8521001731d6539382fa875f1cac9864c466ef27
+Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date:   Mon Jun 6 14:41:24 2022 -0300
+
+    scripts/glibcelf.py: Add PT_AARCH64_MEMTAG_MTE constant
+    
+    It was added in commit 603e5c8ba7257483c162cabb06eb6f79096429b6.
+    This caused the elf/tst-glibcelf consistency check to fail.
+    
+    Reviewed-by: Florian Weimer <fweimer@redhat.com>
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 07bef940433b4c99..47f95d07baefb4ae 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -523,6 +523,10 @@ class Pt(_OpenIntEnum):
+     PT_SUNWBSS = 0x6ffffffa
+     PT_SUNWSTACK = 0x6ffffffb
+ 
++class PtAARCH64(enum.Enum):
++    """Supplemental PT_* constants for EM_AARCH64."""
++    PT_AARCH64_MEMTAG_MTE = 0x70000002
++
+ class PtARM(enum.Enum):
+     """Supplemental PT_* constants for EM_ARM."""
+     PT_ARM_EXIDX = 0x70000001
diff --git a/SOURCES/glibc-rh2109510-16.patch b/SOURCES/glibc-rh2109510-16.patch
new file mode 100644
index 0000000..38416a0
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-16.patch
@@ -0,0 +1,22 @@
+Partial backport of the scripts/glibcelf.py part of:
+
+commit 2d83247d90c9f0bfee7f3f2505bc1b13b6f36c04
+Author: caiyinyu <caiyinyu@loongson.cn>
+Date:   Tue Jul 19 09:20:45 2022 +0800
+
+    LoongArch: Add relocations and ELF flags to elf.h and scripts/glibcelf.py
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 47f95d07baefb4ae..de0509130ed9ad47 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -252,7 +252,8 @@ class Machine(_OpenIntEnum):
+     EM_RISCV = 243
+     EM_BPF = 247
+     EM_CSKY = 252
+-    EM_NUM = 253
++    EM_LOONGARCH = 258
++    EM_NUM = 259
+     EM_ALPHA = 0x9026
+ 
+ class Et(_OpenIntEnum):
diff --git a/SOURCES/glibc-rh2109510-17.patch b/SOURCES/glibc-rh2109510-17.patch
new file mode 100644
index 0000000..a7e5a3a
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-17.patch
@@ -0,0 +1,78 @@
+commit bd13cb19f5e15e9e9a92a536e755fd93a97a67f6
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Fri Aug 19 11:16:32 2022 +0200
+
+    scripts/glibcelf.py: Add hashing support
+    
+    ELF and GNU hashes can now be computed using the elf_hash and
+    gnu_hash functions.
+    
+    Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+    Tested-by: Carlos O'Donell <carlos@redhat.com>
+
+diff --git a/elf/tst-glibcelf.py b/elf/tst-glibcelf.py
+index bf15a3bad4479e08..e5026e2289df206b 100644
+--- a/elf/tst-glibcelf.py
++++ b/elf/tst-glibcelf.py
+@@ -240,6 +240,24 @@ def check_constant_values(cc):
+             error('{}: glibcelf has {!r}, <elf.h> has {!r}'.format(
+                 name, glibcelf_value, elf_h_value))
+ 
++def check_hashes():
++    for name, expected_elf, expected_gnu in (
++            ('', 0, 0x1505),
++            ('PPPPPPPPPPPP', 0, 0x9f105c45),
++            ('GLIBC_2.0', 0xd696910, 0xf66c3dd5),
++            ('GLIBC_2.34', 0x69691b4, 0xc3f3f90c),
++            ('GLIBC_PRIVATE', 0x963cf85, 0x692a260)):
++        for convert in (lambda x: x, lambda x: x.encode('UTF-8')):
++            name = convert(name)
++            actual_elf = glibcelf.elf_hash(name)
++            if actual_elf != expected_elf:
++                error('elf_hash({!r}): {:x} != 0x{:x}'.format(
++                    name, actual_elf, expected_elf))
++            actual_gnu = glibcelf.gnu_hash(name)
++            if actual_gnu != expected_gnu:
++                error('gnu_hash({!r}): {:x} != 0x{:x}'.format(
++                    name, actual_gnu, expected_gnu))
++
+ def main():
+     """The main entry point."""
+     parser = argparse.ArgumentParser(
+@@ -251,6 +269,7 @@ def main():
+     check_duplicates()
+     check_constant_prefixes()
+     check_constant_values(cc=args.cc)
++    check_hashes()
+ 
+     if errors_encountered > 0:
+         print("note: errors encountered:", errors_encountered)
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index de0509130ed9ad47..5c8f46f590722384 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -1158,5 +1158,24 @@ class Image:
+         self._stringtab[sh_link] = strtab
+         return strtab
+ 
++def elf_hash(s):
++    """Computes the ELF hash of the string."""
++    acc = 0
++    for ch in s:
++        if type(ch) is not int:
++            ch = ord(ch)
++        acc = ((acc << 4) + ch) & 0xffffffff
++        top = acc & 0xf0000000
++        acc = (acc ^ (top >> 24)) & ~top
++    return acc
++
++def gnu_hash(s):
++    """Computes the GNU hash of the string."""
++    h = 5381
++    for ch in s:
++        if type(ch) is not int:
++            ch = ord(ch)
++        h = (h * 33 + ch) & 0xffffffff
++    return h
+ 
+ __all__ = [name for name in dir() if name[0].isupper()]
diff --git a/SOURCES/glibc-rh2109510-18.patch b/SOURCES/glibc-rh2109510-18.patch
new file mode 100644
index 0000000..83172fa
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-18.patch
@@ -0,0 +1,439 @@
+commit f40c7887d3cc9bb0b56576ed9edbe505ff8058c0
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Thu Sep 22 12:10:41 2022 +0200
+
+    scripts: Extract glibcpp.py from check-obsolete-constructs.py
+    
+    The C tokenizer is useful separately.
+    
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+diff --git a/scripts/check-obsolete-constructs.py b/scripts/check-obsolete-constructs.py
+index 89d21dea6e788783..7c7a092e440a3258 100755
+--- a/scripts/check-obsolete-constructs.py
++++ b/scripts/check-obsolete-constructs.py
+@@ -24,193 +24,14 @@
+ """
+ 
+ import argparse
+-import collections
++import os
+ import re
+ import sys
+ 
+-# Simplified lexical analyzer for C preprocessing tokens.
+-# Does not implement trigraphs.
+-# Does not implement backslash-newline in the middle of any lexical
+-#   item other than a string literal.
+-# Does not implement universal-character-names in identifiers.
+-# Treats prefixed strings (e.g. L"...") as two tokens (L and "...")
+-# Accepts non-ASCII characters only within comments and strings.
+-
+-# Caution: The order of the outermost alternation matters.
+-# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
+-# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
+-# be last.
+-# Caution: There should be no capturing groups other than the named
+-# captures in the outermost alternation.
+-
+-# For reference, these are all of the C punctuators as of C11:
+-#   [ ] ( ) { } , ; ? ~
+-#   ! != * *= / /= ^ ^= = ==
+-#   # ##
+-#   % %= %> %: %:%:
+-#   & &= &&
+-#   | |= ||
+-#   + += ++
+-#   - -= -- ->
+-#   . ...
+-#   : :>
+-#   < <% <: << <<= <=
+-#   > >= >> >>=
+-
+-# The BAD_* tokens are not part of the official definition of pp-tokens;
+-# they match unclosed strings, character constants, and block comments,
+-# so that the regex engine doesn't have to backtrack all the way to the
+-# beginning of a broken construct and then emit dozens of junk tokens.
+-
+-PP_TOKEN_RE_ = re.compile(r"""
+-    (?P<STRING>        \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
+-   |(?P<BAD_STRING>    \"(?:[^\"\\\r\n]|\\[ -~])*)
+-   |(?P<CHARCONST>     \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
+-   |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
+-   |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
+-   |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
+-   |(?P<LINE_COMMENT>  //[^\r\n]*)
+-   |(?P<IDENT>         [_a-zA-Z][_a-zA-Z0-9]*)
+-   |(?P<PP_NUMBER>     \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
+-   |(?P<PUNCTUATOR>
+-       [,;?~(){}\[\]]
+-     | [!*/^=]=?
+-     | \#\#?
+-     | %(?:[=>]|:(?:%:)?)?
+-     | &[=&]?
+-     |\|[=|]?
+-     |\+[=+]?
+-     | -[=->]?
+-     |\.(?:\.\.)?
+-     | :>?
+-     | <(?:[%:]|<(?:=|<=?)?)?
+-     | >(?:=|>=?)?)
+-   |(?P<ESCNL>         \\(?:\r|\n|\r\n))
+-   |(?P<WHITESPACE>    [ \t\n\r\v\f]+)
+-   |(?P<OTHER>         .)
+-""", re.DOTALL | re.VERBOSE)
+-
+-HEADER_NAME_RE_ = re.compile(r"""
+-    < [^>\r\n]+ >
+-  | " [^"\r\n]+ "
+-""", re.DOTALL | re.VERBOSE)
+-
+-ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
+-
+-# based on the sample code in the Python re documentation
+-Token_ = collections.namedtuple("Token", (
+-    "kind", "text", "line", "column", "context"))
+-Token_.__doc__ = """
+-   One C preprocessing token, comment, or chunk of whitespace.
+-   'kind' identifies the token type, which will be one of:
+-       STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
+-       PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
+-       or OTHER.  The BAD_* alternatives in PP_TOKEN_RE_ are
+-       handled within tokenize_c, below.
+-
+-   'text' is the sequence of source characters making up the token;
+-       no decoding whatsoever is performed.
+-
+-   'line' and 'column' give the position of the first character of the
+-      token within the source file.  They are both 1-based.
+-
+-   'context' indicates whether or not this token occurred within a
+-      preprocessing directive; it will be None for running text,
+-      '<null>' for the leading '#' of a directive line (because '#'
+-      all by itself on a line is a "null directive"), or the name of
+-      the directive for tokens within a directive line, starting with
+-      the IDENT for the name itself.
+-"""
+-
+-def tokenize_c(file_contents, reporter):
+-    """Yield a series of Token objects, one for each preprocessing
+-       token, comment, or chunk of whitespace within FILE_CONTENTS.
+-       The REPORTER object is expected to have one method,
+-       reporter.error(token, message), which will be called to
+-       indicate a lexical error at the position of TOKEN.
+-       If MESSAGE contains the four-character sequence '{!r}', that
+-       is expected to be replaced by repr(token.text).
+-    """
++# Make available glibc Python modules.
++sys.path.append(os.path.dirname(os.path.realpath(__file__)))
+ 
+-    Token = Token_
+-    PP_TOKEN_RE = PP_TOKEN_RE_
+-    ENDLINE_RE = ENDLINE_RE_
+-    HEADER_NAME_RE = HEADER_NAME_RE_
+-
+-    line_num = 1
+-    line_start = 0
+-    pos = 0
+-    limit = len(file_contents)
+-    directive = None
+-    at_bol = True
+-    while pos < limit:
+-        if directive == "include":
+-            mo = HEADER_NAME_RE.match(file_contents, pos)
+-            if mo:
+-                kind = "HEADER_NAME"
+-                directive = "after_include"
+-            else:
+-                mo = PP_TOKEN_RE.match(file_contents, pos)
+-                kind = mo.lastgroup
+-                if kind != "WHITESPACE":
+-                    directive = "after_include"
+-        else:
+-            mo = PP_TOKEN_RE.match(file_contents, pos)
+-            kind = mo.lastgroup
+-
+-        text = mo.group()
+-        line = line_num
+-        column = mo.start() - line_start
+-        adj_line_start = 0
+-        # only these kinds can contain a newline
+-        if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
+-                    "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
+-            for tmo in ENDLINE_RE.finditer(text):
+-                line_num += 1
+-                adj_line_start = tmo.end()
+-            if adj_line_start:
+-                line_start = mo.start() + adj_line_start
+-
+-        # Track whether or not we are scanning a preprocessing directive.
+-        if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
+-            at_bol = True
+-            directive = None
+-        else:
+-            if kind == "PUNCTUATOR" and text == "#" and at_bol:
+-                directive = "<null>"
+-            elif kind == "IDENT" and directive == "<null>":
+-                directive = text
+-            at_bol = False
+-
+-        # Report ill-formed tokens and rewrite them as their well-formed
+-        # equivalents, so downstream processing doesn't have to know about them.
+-        # (Rewriting instead of discarding provides better error recovery.)
+-        if kind == "BAD_BLOCK_COM":
+-            reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
+-                           "unclosed block comment")
+-            text += "*/"
+-            kind = "BLOCK_COMMENT"
+-        elif kind == "BAD_STRING":
+-            reporter.error(Token("BAD_STRING", "", line, column+1, ""),
+-                           "unclosed string")
+-            text += "\""
+-            kind = "STRING"
+-        elif kind == "BAD_CHARCONST":
+-            reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
+-                           "unclosed char constant")
+-            text += "'"
+-            kind = "CHARCONST"
+-
+-        tok = Token(kind, text, line, column+1,
+-                    "include" if directive == "after_include" else directive)
+-        # Do not complain about OTHER tokens inside macro definitions.
+-        # $ and @ appear in macros defined by headers intended to be
+-        # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
+-        if kind == "OTHER" and directive != "define":
+-            self.error(tok, "stray {!r} in program")
+-
+-        yield tok
+-        pos = mo.end()
++import glibcpp
+ 
+ #
+ # Base and generic classes for individual checks.
+@@ -446,7 +267,7 @@ class HeaderChecker:
+ 
+         typedef_checker = ObsoleteTypedefChecker(self, self.fname)
+ 
+-        for tok in tokenize_c(contents, self):
++        for tok in glibcpp.tokenize_c(contents, self):
+             typedef_checker.examine(tok)
+ 
+ def main():
+diff --git a/scripts/glibcpp.py b/scripts/glibcpp.py
+new file mode 100644
+index 0000000000000000..b44c6a4392dde8ce
+--- /dev/null
++++ b/scripts/glibcpp.py
+@@ -0,0 +1,212 @@
++#! /usr/bin/python3
++# Approximation to C preprocessing.
++# Copyright (C) 2019-2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++"""
++Simplified lexical analyzer for C preprocessing tokens.
++
++Does not implement trigraphs.
++
++Does not implement backslash-newline in the middle of any lexical
++item other than a string literal.
++
++Does not implement universal-character-names in identifiers.
++
++Treats prefixed strings (e.g. L"...") as two tokens (L and "...").
++
++Accepts non-ASCII characters only within comments and strings.
++"""
++
++import collections
++import re
++
++# Caution: The order of the outermost alternation matters.
++# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
++# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
++# be last.
++# Caution: There should be no capturing groups other than the named
++# captures in the outermost alternation.
++
++# For reference, these are all of the C punctuators as of C11:
++#   [ ] ( ) { } , ; ? ~
++#   ! != * *= / /= ^ ^= = ==
++#   # ##
++#   % %= %> %: %:%:
++#   & &= &&
++#   | |= ||
++#   + += ++
++#   - -= -- ->
++#   . ...
++#   : :>
++#   < <% <: << <<= <=
++#   > >= >> >>=
++
++# The BAD_* tokens are not part of the official definition of pp-tokens;
++# they match unclosed strings, character constants, and block comments,
++# so that the regex engine doesn't have to backtrack all the way to the
++# beginning of a broken construct and then emit dozens of junk tokens.
++
++PP_TOKEN_RE_ = re.compile(r"""
++    (?P<STRING>        \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
++   |(?P<BAD_STRING>    \"(?:[^\"\\\r\n]|\\[ -~])*)
++   |(?P<CHARCONST>     \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
++   |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
++   |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
++   |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
++   |(?P<LINE_COMMENT>  //[^\r\n]*)
++   |(?P<IDENT>         [_a-zA-Z][_a-zA-Z0-9]*)
++   |(?P<PP_NUMBER>     \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
++   |(?P<PUNCTUATOR>
++       [,;?~(){}\[\]]
++     | [!*/^=]=?
++     | \#\#?
++     | %(?:[=>]|:(?:%:)?)?
++     | &[=&]?
++     |\|[=|]?
++     |\+[=+]?
++     | -[=->]?
++     |\.(?:\.\.)?
++     | :>?
++     | <(?:[%:]|<(?:=|<=?)?)?
++     | >(?:=|>=?)?)
++   |(?P<ESCNL>         \\(?:\r|\n|\r\n))
++   |(?P<WHITESPACE>    [ \t\n\r\v\f]+)
++   |(?P<OTHER>         .)
++""", re.DOTALL | re.VERBOSE)
++
++HEADER_NAME_RE_ = re.compile(r"""
++    < [^>\r\n]+ >
++  | " [^"\r\n]+ "
++""", re.DOTALL | re.VERBOSE)
++
++ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
++
++# based on the sample code in the Python re documentation
++Token_ = collections.namedtuple("Token", (
++    "kind", "text", "line", "column", "context"))
++Token_.__doc__ = """
++   One C preprocessing token, comment, or chunk of whitespace.
++   'kind' identifies the token type, which will be one of:
++       STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
++       PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
++       or OTHER.  The BAD_* alternatives in PP_TOKEN_RE_ are
++       handled within tokenize_c, below.
++
++   'text' is the sequence of source characters making up the token;
++       no decoding whatsoever is performed.
++
++   'line' and 'column' give the position of the first character of the
++      token within the source file.  They are both 1-based.
++
++   'context' indicates whether or not this token occurred within a
++      preprocessing directive; it will be None for running text,
++      '<null>' for the leading '#' of a directive line (because '#'
++      all by itself on a line is a "null directive"), or the name of
++      the directive for tokens within a directive line, starting with
++      the IDENT for the name itself.
++"""
++
++def tokenize_c(file_contents, reporter):
++    """Yield a series of Token objects, one for each preprocessing
++       token, comment, or chunk of whitespace within FILE_CONTENTS.
++       The REPORTER object is expected to have one method,
++       reporter.error(token, message), which will be called to
++       indicate a lexical error at the position of TOKEN.
++       If MESSAGE contains the four-character sequence '{!r}', that
++       is expected to be replaced by repr(token.text).
++    """
++
++    Token = Token_
++    PP_TOKEN_RE = PP_TOKEN_RE_
++    ENDLINE_RE = ENDLINE_RE_
++    HEADER_NAME_RE = HEADER_NAME_RE_
++
++    line_num = 1
++    line_start = 0
++    pos = 0
++    limit = len(file_contents)
++    directive = None
++    at_bol = True
++    while pos < limit:
++        if directive == "include":
++            mo = HEADER_NAME_RE.match(file_contents, pos)
++            if mo:
++                kind = "HEADER_NAME"
++                directive = "after_include"
++            else:
++                mo = PP_TOKEN_RE.match(file_contents, pos)
++                kind = mo.lastgroup
++                if kind != "WHITESPACE":
++                    directive = "after_include"
++        else:
++            mo = PP_TOKEN_RE.match(file_contents, pos)
++            kind = mo.lastgroup
++
++        text = mo.group()
++        line = line_num
++        column = mo.start() - line_start
++        adj_line_start = 0
++        # only these kinds can contain a newline
++        if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
++                    "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
++            for tmo in ENDLINE_RE.finditer(text):
++                line_num += 1
++                adj_line_start = tmo.end()
++            if adj_line_start:
++                line_start = mo.start() + adj_line_start
++
++        # Track whether or not we are scanning a preprocessing directive.
++        if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
++            at_bol = True
++            directive = None
++        else:
++            if kind == "PUNCTUATOR" and text == "#" and at_bol:
++                directive = "<null>"
++            elif kind == "IDENT" and directive == "<null>":
++                directive = text
++            at_bol = False
++
++        # Report ill-formed tokens and rewrite them as their well-formed
++        # equivalents, so downstream processing doesn't have to know about them.
++        # (Rewriting instead of discarding provides better error recovery.)
++        if kind == "BAD_BLOCK_COM":
++            reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
++                           "unclosed block comment")
++            text += "*/"
++            kind = "BLOCK_COMMENT"
++        elif kind == "BAD_STRING":
++            reporter.error(Token("BAD_STRING", "", line, column+1, ""),
++                           "unclosed string")
++            text += "\""
++            kind = "STRING"
++        elif kind == "BAD_CHARCONST":
++            reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
++                           "unclosed char constant")
++            text += "'"
++            kind = "CHARCONST"
++
++        tok = Token(kind, text, line, column+1,
++                    "include" if directive == "after_include" else directive)
++        # Do not complain about OTHER tokens inside macro definitions.
++        # $ and @ appear in macros defined by headers intended to be
++        # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
++        if kind == "OTHER" and directive != "define":
++            self.error(tok, "stray {!r} in program")
++
++        yield tok
++        pos = mo.end()
diff --git a/SOURCES/glibc-rh2109510-19.patch b/SOURCES/glibc-rh2109510-19.patch
new file mode 100644
index 0000000..f77b415
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-19.patch
@@ -0,0 +1,598 @@
+commit e6e6184bed490403811771fa527eb95b4ae53c7c
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Thu Sep 22 12:10:41 2022 +0200
+
+    scripts: Enhance glibcpp to do basic macro processing
+
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+Conflicts:
+	support/Makefile
+	  (spurious tests sorting change upstream)
+
+diff --git a/scripts/glibcpp.py b/scripts/glibcpp.py
+index b44c6a4392dde8ce..455459a609eab120 100644
+--- a/scripts/glibcpp.py
++++ b/scripts/glibcpp.py
+@@ -33,7 +33,9 @@ Accepts non-ASCII characters only within comments and strings.
+ """
+ 
+ import collections
++import operator
+ import re
++import sys
+ 
+ # Caution: The order of the outermost alternation matters.
+ # STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
+@@ -210,3 +212,318 @@ def tokenize_c(file_contents, reporter):
+ 
+         yield tok
+         pos = mo.end()
++
++class MacroDefinition(collections.namedtuple('MacroDefinition',
++                                             'name_token args body error')):
++    """A preprocessor macro definition.
++
++    name_token is the Token_ for the name.
++
++    args is None for a macro that is not function-like.  Otherwise, it
++    is a tuple that contains the macro argument name tokens.
++
++    body is a tuple that contains the tokens that constitue the body
++    of the macro definition (excluding whitespace).
++
++    error is None if no error was detected, or otherwise a problem
++    description associated with this macro definition.
++
++    """
++
++    @property
++    def function(self):
++        """Return true if the macro is function-like."""
++        return self.args is not None
++
++    @property
++    def name(self):
++        """Return the name of the macro being defined."""
++        return self.name_token.text
++
++    @property
++    def line(self):
++        """Return the line number of the macro defintion."""
++        return self.name_token.line
++
++    @property
++    def args_lowered(self):
++        """Return the macro argument list as a list of strings"""
++        if self.function:
++            return [token.text for token in self.args]
++        else:
++            return None
++
++    @property
++    def body_lowered(self):
++        """Return the macro body as a list of strings."""
++        return [token.text for token in self.body]
++
++def macro_definitions(tokens):
++    """A generator for C macro definitions among tokens.
++
++    The generator yields MacroDefinition objects.
++
++    tokens must be iterable, yielding Token_ objects.
++
++    """
++
++    macro_name = None
++    macro_start = False # Set to false after macro name and one otken.
++    macro_args = None # Set to a list during the macro argument sequence.
++    in_macro_args = False # True while processing macro identifier-list.
++    error = None
++    body = []
++
++    for token in tokens:
++        if token.context == 'define' and macro_name is None \
++           and token.kind == 'IDENT':
++            # Starting up macro processing.
++            if macro_start:
++                # First identifier is the macro name.
++                macro_name = token
++            else:
++                # Next token is the name.
++                macro_start = True
++            continue
++
++        if macro_name is None:
++            # Drop tokens not in macro definitions.
++            continue
++
++        if token.context != 'define':
++            # End of the macro definition.
++            if in_macro_args and error is None:
++                error = 'macro definition ends in macro argument list'
++            yield MacroDefinition(macro_name, macro_args, tuple(body), error)
++            # No longer in a macro definition.
++            macro_name = None
++            macro_start = False
++            macro_args = None
++            in_macro_args = False
++            error = None
++            body.clear()
++            continue
++
++        if macro_start:
++            # First token after the macro name.
++            macro_start = False
++            if token.kind == 'PUNCTUATOR' and token.text == '(':
++                macro_args = []
++                in_macro_args = True
++            continue
++
++        if in_macro_args:
++            if token.kind == 'IDENT' \
++               or (token.kind == 'PUNCTUATOR' and token.text == '...'):
++                # Macro argument or ... placeholder.
++                macro_args.append(token)
++            if token.kind == 'PUNCTUATOR':
++                if token.text == ')':
++                    macro_args = tuple(macro_args)
++                    in_macro_args = False
++                elif token.text == ',':
++                    pass # Skip.  Not a full syntax check.
++                elif error is None:
++                    error = 'invalid punctuator in macro argument list: ' \
++                        + repr(token.text)
++            elif error is None:
++                error = 'invalid {} token in macro argument list'.format(
++                    token.kind)
++            continue
++
++        if token.kind not in ('WHITESPACE', 'BLOCK_COMMENT'):
++            body.append(token)
++
++    # Emit the macro in case the last line does not end with a newline.
++    if macro_name is not None:
++        if in_macro_args and error is None:
++            error = 'macro definition ends in macro argument list'
++        yield MacroDefinition(macro_name, macro_args, tuple(body), error)
++
++# Used to split UL etc. suffixes from numbers such as 123UL.
++RE_SPLIT_INTEGER_SUFFIX = re.compile(r'([^ullULL]+)([ullULL]*)')
++
++BINARY_OPERATORS = {
++    '+': operator.add,
++    '<<': operator.lshift,
++}
++
++# Use the general-purpose dict type if it is order-preserving.
++if (sys.version_info[0], sys.version_info[1]) <= (3, 6):
++    OrderedDict = collections.OrderedDict
++else:
++    OrderedDict = dict
++
++def macro_eval(macro_defs, reporter):
++    """Compute macro values
++
++    macro_defs is the output from macro_definitions.  reporter is an
++    object that accepts reporter.error(line_number, message) and
++    reporter.note(line_number, message) calls to report errors
++    and error context invocations.
++
++    The returned dict contains the values of macros which are not
++    function-like, pairing their names with their computed values.
++
++    The current implementation is incomplete.  It is deliberately not
++    entirely faithful to C, even in the implemented parts.  It checks
++    that macro replacements follow certain syntactic rules even if
++    they are never evaluated.
++
++    """
++
++    # Unevaluated macro definitions by name.
++    definitions = OrderedDict()
++    for md in macro_defs:
++        if md.name in definitions:
++            reporter.error(md.line, 'macro {} redefined'.format(md.name))
++            reporter.note(definitions[md.name].line,
++                          'location of previous definition')
++        else:
++            definitions[md.name] = md
++
++    # String to value mappings for fully evaluated macros.
++    evaluated = OrderedDict()
++
++    # String to macro definitions during evaluation.  Nice error
++    # reporting relies on determinstic iteration order.
++    stack = OrderedDict()
++
++    def eval_token(current, token):
++        """Evaluate one macro token.
++
++        Integers and strings are returned as such (the latter still
++        quoted).  Identifiers are expanded.
++
++        None indicates an empty expansion or an error.
++
++        """
++
++        if token.kind == 'PP_NUMBER':
++            value = None
++            m = RE_SPLIT_INTEGER_SUFFIX.match(token.text)
++            if m:
++                try:
++                    value = int(m.group(1), 0)
++                except ValueError:
++                    pass
++            if value is None:
++                reporter.error(token.line,
++                    'invalid number {!r} in definition of {}'.format(
++                        token.text, current.name))
++            return value
++
++        if token.kind == 'STRING':
++            return token.text
++
++        if token.kind == 'CHARCONST' and len(token.text) == 3:
++            return ord(token.text[1])
++
++        if token.kind == 'IDENT':
++            name = token.text
++            result = eval1(current, name)
++            if name not in evaluated:
++                evaluated[name] = result
++            return result
++
++        reporter.error(token.line,
++            'unrecognized {!r} in definition of {}'.format(
++                token.text, current.name))
++        return None
++
++
++    def eval1(current, name):
++        """Evaluate one name.
++
++        The name is looked up and the macro definition evaluated
++        recursively if necessary.  The current argument is the macro
++        definition being evaluated.
++
++        None as a return value indicates an error.
++
++        """
++
++        # Fast path if the value has already been evaluated.
++        if name in evaluated:
++            return evaluated[name]
++
++        try:
++            md = definitions[name]
++        except KeyError:
++            reporter.error(current.line,
++                'reference to undefined identifier {} in definition of {}'
++                           .format(name, current.name))
++            return None
++
++        if md.name in stack:
++            # Recursive macro definition.
++            md = stack[name]
++            reporter.error(md.line,
++                'macro definition {} refers to itself'.format(md.name))
++            for md1 in reversed(list(stack.values())):
++                if md1 is md:
++                    break
++                reporter.note(md1.line,
++                              'evaluated from {}'.format(md1.name))
++            return None
++
++        stack[md.name] = md
++        if md.function:
++            reporter.error(current.line,
++                'attempt to evaluate function-like macro {}'.format(name))
++            reporter.note(md.line, 'definition of {}'.format(md.name))
++            return None
++
++        try:
++            body = md.body
++            if len(body) == 0:
++                # Empty expansion.
++                return None
++
++            # Remove surrounding ().
++            if body[0].text == '(' and body[-1].text == ')':
++                body = body[1:-1]
++                had_parens = True
++            else:
++                had_parens = False
++
++            if len(body) == 1:
++                return eval_token(md, body[0])
++
++            # Minimal expression evaluator for binary operators.
++            op = body[1].text
++            if len(body) == 3 and op in BINARY_OPERATORS:
++                if not had_parens:
++                    reporter.error(body[1].line,
++                        'missing parentheses around {} expression'.format(op))
++                    reporter.note(md.line,
++                                  'in definition of macro {}'.format(md.name))
++
++                left = eval_token(md, body[0])
++                right = eval_token(md, body[2])
++
++                if type(left) != type(1):
++                    reporter.error(left.line,
++                        'left operand of {} is not an integer'.format(op))
++                    reporter.note(md.line,
++                                  'in definition of macro {}'.format(md.name))
++                if type(right) != type(1):
++                    reporter.error(left.line,
++                        'right operand of {} is not an integer'.format(op))
++                    reporter.note(md.line,
++                                  'in definition of macro {}'.format(md.name))
++                return BINARY_OPERATORS[op](left, right)
++
++            reporter.error(md.line,
++                'uninterpretable macro token sequence: {}'.format(
++                    ' '.join(md.body_lowered)))
++            return None
++        finally:
++            del stack[md.name]
++
++    # Start of main body of macro_eval.
++    for md in definitions.values():
++        name = md.name
++        if name not in evaluated and not md.function:
++            evaluated[name] = eval1(md, name)
++    return evaluated
+diff --git a/support/Makefile b/support/Makefile
+index 09b41b0d57e9239a..7749ac24f1ac3622 100644
+--- a/support/Makefile
++++ b/support/Makefile
+@@ -223,11 +223,11 @@ $(objpfx)true-container : $(libsupport)
+ tests = \
+   README-testing \
+   tst-support-namespace \
++  tst-support-process_state \
+   tst-support_blob_repeat \
+   tst-support_capture_subprocess \
+   tst-support_descriptors \
+   tst-support_format_dns_packet \
+-  tst-support-process_state \
+   tst-support_quote_blob \
+   tst-support_quote_string \
+   tst-support_record_failure \
+@@ -248,6 +248,12 @@ $(objpfx)tst-support_record_failure-2.out: tst-support_record_failure-2.sh \
+ 	$(evaluate-test)
+ endif
+ 
++tests-special += $(objpfx)tst-glibcpp.out
++
++$(objpfx)tst-glibcpp.out: tst-glibcpp.py $(..)scripts/glibcpp.py
++	PYTHONPATH=$(..)scripts $(PYTHON) tst-glibcpp.py > $@ 2>&1; \
++	$(evaluate-test)
++
+ $(objpfx)tst-support_format_dns_packet: $(common-objpfx)resolv/libresolv.so
+ 
+ tst-support_capture_subprocess-ARGS = -- $(host-test-program-cmd)
+diff --git a/support/tst-glibcpp.py b/support/tst-glibcpp.py
+new file mode 100644
+index 0000000000000000..a2db1916ccfce3c3
+--- /dev/null
++++ b/support/tst-glibcpp.py
+@@ -0,0 +1,217 @@
++#! /usr/bin/python3
++# Tests for scripts/glibcpp.py
++# Copyright (C) 2022 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <https://www.gnu.org/licenses/>.
++
++import inspect
++import sys
++
++import glibcpp
++
++# Error counter.
++errors = 0
++
++class TokenizerErrors:
++    """Used as the error reporter during tokenization."""
++
++    def __init__(self):
++        self.errors = []
++
++    def error(self, token, message):
++        self.errors.append((token, message))
++
++def check_macro_definitions(source, expected):
++    reporter = TokenizerErrors()
++    tokens = glibcpp.tokenize_c(source, reporter)
++
++    actual = []
++    for md in glibcpp.macro_definitions(tokens):
++        if md.function:
++            md_name = '{}({})'.format(md.name, ','.join(md.args_lowered))
++        else:
++            md_name = md.name
++        actual.append((md_name, md.body_lowered))
++
++    if actual != expected or reporter.errors:
++        global errors
++        errors += 1
++        # Obtain python source line information.
++        frame = inspect.stack(2)[1]
++        print('{}:{}: error: macro definition mismatch, actual definitions:'
++              .format(frame[1], frame[2]))
++        for md in actual:
++            print('note: {} {!r}'.format(md[0], md[1]))
++
++        if reporter.errors:
++            for err in reporter.errors:
++                print('note: tokenizer error: {}: {}'.format(
++                    err[0].line, err[1]))
++
++def check_macro_eval(source, expected, expected_errors=''):
++    reporter = TokenizerErrors()
++    tokens = list(glibcpp.tokenize_c(source, reporter))
++
++    if reporter.errors:
++        # Obtain python source line information.
++        frame = inspect.stack(2)[1]
++        for err in reporter.errors:
++            print('{}:{}: tokenizer error: {}: {}'.format(
++                frame[1], frame[2], err[0].line, err[1]))
++        return
++
++    class EvalReporter:
++        """Used as the error reporter during evaluation."""
++
++        def __init__(self):
++            self.lines = []
++
++        def error(self, line, message):
++            self.lines.append('{}: error: {}\n'.format(line, message))
++
++        def note(self, line, message):
++            self.lines.append('{}: note: {}\n'.format(line, message))
++
++    reporter = EvalReporter()
++    actual = glibcpp.macro_eval(glibcpp.macro_definitions(tokens), reporter)
++    actual_errors = ''.join(reporter.lines)
++    if actual != expected or actual_errors != expected_errors:
++        global errors
++        errors += 1
++        # Obtain python source line information.
++        frame = inspect.stack(2)[1]
++        print('{}:{}: error: macro evaluation mismatch, actual results:'
++              .format(frame[1], frame[2]))
++        for k, v in actual.items():
++            print('  {}: {!r}'.format(k, v))
++        for msg in reporter.lines:
++            sys.stdout.write('  | ' + msg)
++
++# Individual test cases follow.
++
++check_macro_definitions('', [])
++check_macro_definitions('int main()\n{\n{\n', [])
++check_macro_definitions("""
++#define A 1
++#define B 2 /* ignored */
++#define C 3 // also ignored
++#define D \
++ 4
++#define STRING "string"
++#define FUNCLIKE(a, b) (a + b)
++#define FUNCLIKE2(a, b) (a + \
++ b)
++""", [('A', ['1']),
++      ('B', ['2']),
++      ('C', ['3']),
++      ('D', ['4']),
++      ('STRING', ['"string"']),
++      ('FUNCLIKE(a,b)', list('(a+b)')),
++      ('FUNCLIKE2(a,b)', list('(a+b)')),
++      ])
++check_macro_definitions('#define MACRO', [('MACRO', [])])
++check_macro_definitions('#define MACRO\n', [('MACRO', [])])
++check_macro_definitions('#define MACRO()', [('MACRO()', [])])
++check_macro_definitions('#define MACRO()\n', [('MACRO()', [])])
++
++check_macro_eval('#define A 1', {'A': 1})
++check_macro_eval('#define A (1)', {'A': 1})
++check_macro_eval('#define A (1 + 1)', {'A': 2})
++check_macro_eval('#define A (1U << 31)', {'A': 1 << 31})
++check_macro_eval('''\
++#define A (B + 1)
++#define B 10
++#define F(x) ignored
++#define C "not ignored"
++''', {
++    'A': 11,
++    'B': 10,
++    'C': '"not ignored"',
++})
++
++# Checking for evaluation errors.
++check_macro_eval('''\
++#define A 1
++#define A 2
++''', {
++    'A': 1,
++}, '''\
++2: error: macro A redefined
++1: note: location of previous definition
++''')
++
++check_macro_eval('''\
++#define A A
++#define B 1
++''', {
++    'A': None,
++    'B': 1,
++}, '''\
++1: error: macro definition A refers to itself
++''')
++
++check_macro_eval('''\
++#define A B
++#define B A
++''', {
++    'A': None,
++    'B': None,
++}, '''\
++1: error: macro definition A refers to itself
++2: note: evaluated from B
++''')
++
++check_macro_eval('''\
++#define A B
++#define B C
++#define C A
++''', {
++    'A': None,
++    'B': None,
++    'C': None,
++}, '''\
++1: error: macro definition A refers to itself
++3: note: evaluated from C
++2: note: evaluated from B
++''')
++
++check_macro_eval('''\
++#define A 1 +
++''', {
++    'A': None,
++}, '''\
++1: error: uninterpretable macro token sequence: 1 +
++''')
++
++check_macro_eval('''\
++#define A 3*5
++''', {
++    'A': None,
++}, '''\
++1: error: uninterpretable macro token sequence: 3 * 5
++''')
++
++check_macro_eval('''\
++#define A 3 + 5
++''', {
++    'A': 8,
++}, '''\
++1: error: missing parentheses around + expression
++1: note: in definition of macro A
++''')
++
++if errors:
++    sys.exit(1)
diff --git a/SOURCES/glibc-rh2109510-2.patch b/SOURCES/glibc-rh2109510-2.patch
new file mode 100644
index 0000000..3aba395
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-2.patch
@@ -0,0 +1,208 @@
+Partial backport of:
+
+commit 7e1d42400c1b8f03316fe14176133c8853cd3bbe
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Fri Nov 30 15:20:41 2018 +0000
+
+    Replace gen-as-const.awk by gen-as-const.py.
+    
+    This patch replaces gen-as-const.awk, and some fragments of the
+    Makefile code that used it, by a Python script.  The point is not such
+    much that awk is problematic for this particular script, as that I'd
+    like to build up a general Python infrastructure for extracting
+    information from C headers, for use in writing tests of such headers.
+    Thus, although this patch does not set up such infrastructure, the
+    compute_c_consts function in gen-as-const.py might be moved to a
+    separate Python module in a subsequent patch as a starting point for
+    such infrastructure.
+    
+    The general idea of the code is the same as in the awk version, but no
+    attempt is made to make the output files textually identical.  When
+    generating a header, a dict of constant names and values is generated
+    internally then defines are printed in sorted order (rather than the
+    order in the .sym file, which would have been used before).  When
+    generating a test that the values computed match those from a normal
+    header inclusion, the test code is made into a compilation test using
+    _Static_assert, where previously the comparisons were done only when
+    the test was executed.  One fragment of test generation (converting
+    the previously generated header to use asconst_* prefixes on its macro
+    names) is still in awk code in the makefiles; only the .sym processing
+    and subsequent execution of the compiler to extract constants have
+    moved to the Python script.
+    
+    Tested for x86_64, and with build-many-glibcs.py.
+    
+            * scripts/gen-as-const.py: New file.
+            * scripts/gen-as-const.awk: Remove.
+            * Makerules ($(common-objpfx)%.h $(common-objpfx)%.h.d): Use
+            gen-as-const.py.
+            ($(objpfx)test-as-const-%.c): Likewise.
+
+In the downstream version, scripts/gen-as-const.awk is not removed and
+still used in Makerules.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+new file mode 100644
+index 0000000000000000..b7a5744bb192dd67
+--- /dev/null
++++ b/scripts/gen-as-const.py
+@@ -0,0 +1,159 @@
++#!/usr/bin/python3
++# Produce headers of assembly constants from C expressions.
++# Copyright (C) 2018 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <http://www.gnu.org/licenses/>.
++
++# The input to this script looks like:
++#       #cpp-directive ...
++#       NAME1
++#       NAME2 expression ...
++# A line giving just a name implies an expression consisting of just that name.
++
++import argparse
++import os.path
++import re
++import subprocess
++import tempfile
++
++
++def compute_c_consts(sym_data, cc):
++    """Compute the values of some C constants.
++
++    The first argument is a list whose elements are either strings
++    (preprocessor directives) or pairs of strings (a name and a C
++    expression for the corresponding value).  Preprocessor directives
++    in the middle of the list may be used to select which constants
++    end up being evaluated using which expressions.
++
++    """
++    out_lines = []
++    started = False
++    for arg in sym_data:
++        if isinstance(arg, str):
++            out_lines.append(arg)
++            continue
++        name = arg[0]
++        value = arg[1]
++        if not started:
++            out_lines.append('void\ndummy (void)\n{')
++            started = True
++        out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
++                         ': : \"i\" ((long int) (%s)));'
++                         % (name, value))
++    if started:
++        out_lines.append('}')
++    out_lines.append('')
++    out_text = '\n'.join(out_lines)
++    with tempfile.TemporaryDirectory() as temp_dir:
++        c_file_name = os.path.join(temp_dir, 'test.c')
++        s_file_name = os.path.join(temp_dir, 'test.s')
++        with open(c_file_name, 'w') as c_file:
++            c_file.write(out_text)
++        # Compilation has to be from stdin to avoid the temporary file
++        # name being written into the generated dependencies.
++        cmd = ('%s -S -o %s -x c - < %s' % (cc, s_file_name, c_file_name))
++        subprocess.check_call(cmd, shell=True)
++        consts = {}
++        with open(s_file_name, 'r') as s_file:
++            for line in s_file:
++                match = re.search('@@@name@@@([^@]*)'
++                                  '@@@value@@@[^0-9Xxa-fA-F-]*'
++                                  '([0-9Xxa-fA-F-]+).*@@@end@@@', line)
++                if match:
++                    if (match.group(1) in consts
++                        and match.group(2) != consts[match.group(1)]):
++                        raise ValueError('duplicate constant %s'
++                                         % match.group(1))
++                    consts[match.group(1)] = match.group(2)
++        return consts
++
++
++def gen_test(sym_data):
++    """Generate a test for the values of some C constants.
++
++    The first argument is as for compute_c_consts.
++
++    """
++    out_lines = []
++    started = False
++    for arg in sym_data:
++        if isinstance(arg, str):
++            out_lines.append(arg)
++            continue
++        name = arg[0]
++        value = arg[1]
++        if not started:
++            out_lines.append('#include <stdint.h>\n'
++                             '#include <stdio.h>\n'
++                             '#include <bits/wordsize.h>\n'
++                             '#if __WORDSIZE == 64\n'
++                             'typedef uint64_t c_t;\n'
++                             '# define U(n) UINT64_C (n)\n'
++                             '#else\n'
++                             'typedef uint32_t c_t;\n'
++                             '# define U(n) UINT32_C (n)\n'
++                             '#endif\n'
++                             'static int\n'
++                             'do_test (void)\n'
++                             '{\n'
++                             # Compilation test only, using static assertions.
++                             '  return 0;\n'
++                             '}\n'
++                             '#include <support/test-driver.c>')
++            started = True
++        out_lines.append('_Static_assert (U (asconst_%s) == (c_t) (%s), '
++                         '"value of %s");'
++                         % (name, value, name))
++    return '\n'.join(out_lines)
++
++
++def main():
++    """The main entry point."""
++    parser = argparse.ArgumentParser(
++        description='Produce headers of assembly constants.')
++    parser.add_argument('--cc', metavar='CC',
++                        help='C compiler (including options) to use')
++    parser.add_argument('--test', action='store_true',
++                        help='Generate test case instead of header')
++    parser.add_argument('sym_file',
++                        help='.sym file to process')
++    args = parser.parse_args()
++    sym_data = []
++    with open(args.sym_file, 'r') as sym_file:
++        for line in sym_file:
++            line = line.strip()
++            if line == '':
++                continue
++            # Pass preprocessor directives through.
++            if line.startswith('#'):
++                sym_data.append(line)
++                continue
++            words = line.split(maxsplit=1)
++            # Separator.
++            if words[0] == '--':
++                continue
++            name = words[0]
++            value = words[1] if len(words) > 1 else words[0]
++            sym_data.append((name, value))
++    if args.test:
++        print(gen_test(sym_data))
++    else:
++        consts = compute_c_consts(sym_data, args.cc)
++        print('\n'.join('#define %s %s' % c for c in sorted(consts.items())))
++
++if __name__ == '__main__':
++    main()
diff --git a/SOURCES/glibc-rh2109510-20.patch b/SOURCES/glibc-rh2109510-20.patch
new file mode 100644
index 0000000..1007e9d
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-20.patch
@@ -0,0 +1,36 @@
+commit 29eb7961197bee68470730aecfdda4d0e206812e
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Mon Sep 5 12:11:19 2022 +0200
+
+    elf.h: Remove duplicate definition of VER_FLG_WEAK
+    
+    This did not cause a warning before because the token sequence for
+    the two definitions was identical.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/elf/elf.h b/elf/elf.h
+index d6506ea1c7160dea..ec09040be639a52a 100644
+--- a/elf/elf.h
++++ b/elf/elf.h
+@@ -1027,7 +1027,8 @@ typedef struct
+ 
+ /* Legal values for vd_flags (version information flags).  */
+ #define VER_FLG_BASE	0x1		/* Version definition of file itself */
+-#define VER_FLG_WEAK	0x2		/* Weak version identifier */
++#define VER_FLG_WEAK	0x2		/* Weak version identifier.  Also
++					   used by vna_flags below.  */
+ 
+ /* Versym symbol index values.  */
+ #define	VER_NDX_LOCAL		0	/* Symbol is local.  */
+@@ -1105,10 +1106,6 @@ typedef struct
+ } Elf64_Vernaux;
+ 
+ 
+-/* Legal values for vna_flags.  */
+-#define VER_FLG_WEAK	0x2		/* Weak version identifier */
+-
+-
+ /* Auxiliary vector.  */
+ 
+ /* This vector is normally only used by the program interpreter.  The
diff --git a/SOURCES/glibc-rh2109510-21.patch b/SOURCES/glibc-rh2109510-21.patch
new file mode 100644
index 0000000..5e58123
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-21.patch
@@ -0,0 +1,1295 @@
+commit 340097d0b50eff9d3058e06c6989ae398c653d4a
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Thu Sep 22 12:10:41 2022 +0200
+
+    elf: Extract glibcelf constants from <elf.h>
+    
+    The need to maintain elf/elf.h and scripts/glibcelf.py in parallel
+    results in a backporting hazard: they need to be kept in sync to
+    avoid elf/tst-glibcelf consistency check failures.  glibcelf (unlike
+    tst-glibcelf) does not use the C implementation to extract constants.
+    This applies the additional glibcpp syntax checks to <elf.h>.
+    
+    This  changereplaces the types derived from Python enum types with
+    custom types _TypedConstant, _IntConstant, and _FlagConstant.  These
+    types have fewer safeguards, but this also allows incremental
+    construction and greater flexibility for grouping constants among
+    the types.  Architectures-specific named constants are now added
+    as members into their superclasses (but value-based lookup is
+    still restricted to generic constants only).
+    
+    Consequently, check_duplicates in elf/tst-glibcelf has been adjusted
+    to accept differently-named constants of the same value if their
+    subtypes are distinct.  The ordering check for named constants
+    has been dropped because they are no longer strictly ordered.
+    
+    Further test adjustments: Some of the type names are different.
+    The new types do not support iteration (because it is unclear
+    whether iteration should cover the all named values (including
+    architecture-specific constants), or only the generic named values),
+    so elf/tst-glibcelf now uses by_name explicit (to get all constants).
+    PF_HP_SBP and PF_PARISC_SBP are now of distinct types (PfHP and
+    PfPARISC), so they are how both present on the Python side.  EM_NUM
+    and PT_NUM are filtered (which was an oversight in the old
+    conversion).
+    
+    The new version of glibcelf should also be compatible with earlier
+    Python versions because it no longer depends on the enum module and its
+    advanced features.
+    
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+diff --git a/elf/tst-glibcelf.py b/elf/tst-glibcelf.py
+index e5026e2289df206b..a5bff45eae55edea 100644
+--- a/elf/tst-glibcelf.py
++++ b/elf/tst-glibcelf.py
+@@ -18,7 +18,6 @@
+ # <https://www.gnu.org/licenses/>.
+ 
+ import argparse
+-import enum
+ import sys
+ 
+ import glibcelf
+@@ -45,11 +44,57 @@ def find_constant_prefix(name):
+ 
+ def find_enum_types():
+     """A generator for OpenIntEnum and IntFlag classes in glibcelf."""
++    classes = set((glibcelf._TypedConstant, glibcelf._IntConstant,
++                   glibcelf._FlagConstant))
+     for obj in vars(glibcelf).values():
+-        if isinstance(obj, type) and obj.__bases__[0] in (
+-                glibcelf._OpenIntEnum, enum.Enum, enum.IntFlag):
++        if isinstance(obj, type) and obj not in classes \
++           and obj.__bases__[0] in classes:
+             yield obj
+ 
++def check_basic():
++    """Check basic functionality of the constant classes."""
++
++    if glibcelf.Pt.PT_NULL is not glibcelf.Pt(0):
++        error('Pt(0) not interned')
++    if glibcelf.Pt(17609) is glibcelf.Pt(17609):
++        error('Pt(17609) unexpectedly interned')
++    if glibcelf.Pt(17609) == glibcelf.Pt(17609):
++        pass
++    else:
++        error('Pt(17609) equality')
++    if glibcelf.Pt(17610) == glibcelf.Pt(17609):
++        error('Pt(17610) equality')
++
++    if str(glibcelf.Pt.PT_NULL) != 'PT_NULL':
++        error('str(PT_NULL)')
++    if str(glibcelf.Pt(17609)) != '17609':
++        error('str(Pt(17609))')
++
++    if repr(glibcelf.Pt.PT_NULL) != 'PT_NULL':
++        error('repr(PT_NULL)')
++    if repr(glibcelf.Pt(17609)) != 'Pt(17609)':
++        error('repr(Pt(17609))')
++
++    if glibcelf.Pt('PT_AARCH64_MEMTAG_MTE') \
++       is not glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
++        error('PT_AARCH64_MEMTAG_MTE identity')
++    if glibcelf.Pt(0x70000002) is glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
++        error('Pt(0x70000002) identity')
++    if glibcelf.PtAARCH64(0x70000002) is not glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
++        error('PtAARCH64(0x70000002) identity')
++    if glibcelf.Pt.PT_AARCH64_MEMTAG_MTE.short_name != 'AARCH64_MEMTAG_MTE':
++        error('PT_AARCH64_MEMTAG_MTE short name')
++
++    # Special cases for int-like Shn.
++    if glibcelf.Shn(32) == glibcelf.Shn.SHN_XINDEX:
++        error('Shn(32)')
++    if glibcelf.Shn(32) + 0 != 32:
++        error('Shn(32) + 0')
++    if 32 in glibcelf.Shn:
++        error('32 in Shn')
++    if 0 not in glibcelf.Shn:
++        error('0 not in Shn')
++
+ def check_duplicates():
+     """Verifies that enum types do not have duplicate values.
+ 
+@@ -59,17 +104,16 @@ def check_duplicates():
+     global_seen = {}
+     for typ in find_enum_types():
+         seen = {}
+-        last = None
+-        for (name, e) in typ.__members__.items():
++        for (name, e) in typ.by_name.items():
+             if e.value in seen:
+-                error('{} has {}={} and {}={}'.format(
+-                    typ, seen[e.value], e.value, name, e.value))
+-                last = e
++                other = seen[e.value]
++                # Value conflicts only count if they are between
++                # the same base type.
++                if e.__class__ is typ and other.__class__ is typ:
++                    error('{} has {}={} and {}={}'.format(
++                        typ, other, e.value, name, e.value))
+             else:
+                 seen[e.value] = name
+-                if last is not None and last.value > e.value:
+-                    error('{} has {}={} after {}={}'.format(
+-                        typ, name, e.value, last.name, last.value))
+                 if name in global_seen:
+                     error('{} used in {} and {}'.format(
+                         name, global_seen[name], typ))
+@@ -81,7 +125,7 @@ def check_constant_prefixes():
+     seen = set()
+     for typ in find_enum_types():
+         typ_prefix = None
+-        for val in typ:
++        for val in typ.by_name.values():
+             prefix = find_constant_prefix(val.name)
+             if prefix is None:
+                 error('constant {!r} for {} has unknown prefix'.format(
+@@ -113,7 +157,6 @@ def find_elf_h_constants(cc):
+ # used in <elf.h>.
+ glibcelf_skipped_aliases = (
+     ('EM_ARC_A5', 'EM_ARC_COMPACT'),
+-    ('PF_PARISC_SBP', 'PF_HP_SBP')
+ )
+ 
+ # Constants that provide little value and are not included in
+@@ -146,6 +189,7 @@ DT_VALRNGLO
+ DT_VERSIONTAGNUM
+ ELFCLASSNUM
+ ELFDATANUM
++EM_NUM
+ ET_HIOS
+ ET_HIPROC
+ ET_LOOS
+@@ -159,6 +203,7 @@ PT_HISUNW
+ PT_LOOS
+ PT_LOPROC
+ PT_LOSUNW
++PT_NUM
+ SHF_MASKOS
+ SHF_MASKPROC
+ SHN_HIOS
+@@ -193,7 +238,7 @@ def check_constant_values(cc):
+     """Checks the values of <elf.h> constants against glibcelf."""
+ 
+     glibcelf_constants = {
+-        e.name: e for typ in find_enum_types() for e in typ}
++        e.name: e for typ in find_enum_types() for e in typ.by_name.values()}
+     elf_h_constants = find_elf_h_constants(cc=cc)
+ 
+     missing_in_glibcelf = (set(elf_h_constants) - set(glibcelf_constants)
+@@ -229,12 +274,13 @@ def check_constant_values(cc):
+     for name in sorted(set(glibcelf_constants) & set(elf_h_constants)):
+         glibcelf_value = glibcelf_constants[name].value
+         elf_h_value = int(elf_h_constants[name])
+-        # On 32-bit architectures <elf.h> as some constants that are
++        # On 32-bit architectures <elf.h> has some constants that are
+         # parsed as signed, while they are unsigned in glibcelf.  So
+         # far, this only affects some flag constants, so special-case
+         # them here.
+         if (glibcelf_value != elf_h_value
+-            and not (isinstance(glibcelf_constants[name], enum.IntFlag)
++            and not (isinstance(glibcelf_constants[name],
++                                glibcelf._FlagConstant)
+                      and glibcelf_value == 1 << 31
+                      and elf_h_value == -(1 << 31))):
+             error('{}: glibcelf has {!r}, <elf.h> has {!r}'.format(
+@@ -266,6 +312,7 @@ def main():
+                         help='C compiler (including options) to use')
+     args = parser.parse_args()
+ 
++    check_basic()
+     check_duplicates()
+     check_constant_prefixes()
+     check_constant_values(cc=args.cc)
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 5c8f46f590722384..420cb21943b28bba 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -25,711 +25,445 @@ parsing it.
+ """
+ 
+ import collections
+-import enum
++import functools
++import os
+ import struct
+ 
+-if not hasattr(enum, 'IntFlag'):
+-    import sys
+-    sys.stdout.write(
+-        'warning: glibcelf.py needs Python 3.6 for enum support\n')
+-    sys.exit(77)
++import glibcpp
++
++class _MetaNamedValue(type):
++    """Used to set up _NamedValue subclasses."""
+ 
+-class _OpenIntEnum(enum.IntEnum):
+-    """Integer enumeration that supports arbitrary int values."""
+     @classmethod
+-    def _missing_(cls, value):
+-        # See enum.IntFlag._create_pseudo_member_.  This allows
+-        # creating of enum constants with arbitrary integer values.
+-        pseudo_member = int.__new__(cls, value)
+-        pseudo_member._name_ = None
+-        pseudo_member._value_ = value
+-        return pseudo_member
++    def __prepare__(metacls, cls, bases, **kwds):
++        # Indicates an int-based class.  Needed for types like Shn.
++        int_based = False
++        for base in bases:
++            if issubclass(base, int):
++                int_based = int
++                break
++        return dict(by_value={},
++                    by_name={},
++                    prefix=None,
++                    _int_based=int_based)
+ 
+-    def __repr__(self):
+-        name = self._name_
+-        if name is not None:
+-            # The names have prefixes like SHT_, implying their type.
+-            return name
+-        return '{}({})'.format(self.__class__.__name__, self._value_)
++    def __contains__(self, other):
++        return other in self.by_value
++
++class _NamedValue(metaclass=_MetaNamedValue):
++    """Typed, named integer constants.
++
++    Constants have the following instance attributes:
++
++    name: The full name of the constant (e.g., "PT_NULL").
++    short_name: The name with of the constant without the prefix ("NULL").
++    value: The integer value of the constant.
++
++    The following class attributes are available:
++
++    by_value: A dict mapping integers to constants.
++    by_name: A dict mapping strings to constants.
++    prefix: A string that is removed from the start of short names, or None.
++
++    """
++
++    def __new__(cls, arg0, arg1=None):
++        """Instance creation.
++
++        For the one-argument form, the argument must be a string, an
++        int, or an instance of this class.  Strings are looked up via
++        by_name.  Values are looked up via by_value; if value lookup
++        fails, a new unnamed instance is returned.  Instances of this
++        class a re returned as-is.
++
++        The two-argument form expects the name (a string) and the
++        value (an integer).  A new instance is created in this case.
++        The instance is not registered in the by_value/by_name
++        dictionaries (but the caller can do that).
++
++        """
++
++        typ0 = type(arg0)
++        if arg1 is None:
++            if isinstance(typ0, cls):
++                # Re-use the existing object.
++                return arg0
++            if typ0 is int:
++                by_value = cls.by_value
++                try:
++                    return by_value[arg0]
++                except KeyError:
++                    # Create a new object of the requested value.
++                    if cls._int_based:
++                        result = int.__new__(cls, arg0)
++                    else:
++                        result = object.__new__(cls)
++                    result.value = arg0
++                    result.name = None
++                    return result
++            if typ0 is str:
++                by_name = cls.by_name
++                try:
++                    return by_name[arg0]
++                except KeyError:
++                    raise ValueError('unknown {} constant: {!r}'.format(
++                        cls.__name__, arg0))
++        else:
++            # Types for the two-argument form are rigid.
++            if typ0 is not str and typ0 is not None:
++                raise ValueError('type {} of name {!r} should be str'.format(
++                    typ0.__name__, arg0))
++            if type(arg1) is not int:
++                raise ValueError('type {} of value {!r} should be int'.format(
++                    type(arg1).__name__, arg1))
++            # Create a new named constants.
++            if cls._int_based:
++                result = int.__new__(cls, arg1)
++            else:
++                result = object.__new__(cls)
++            result.value = arg1
++            result.name = arg0
++            # Set up the short_name attribute.
++            prefix = cls.prefix
++            if prefix and arg0.startswith(prefix):
++                result.short_name = arg0[len(prefix):]
++            else:
++                result.short_name = arg0
++            return result
+ 
+     def __str__(self):
+-        name = self._name_
+-        if name is not None:
++        name = self.name
++        if name:
++            return name
++        else:
++            return str(self.value)
++
++    def __repr__(self):
++        name = self.name
++        if name:
+             return name
+-        return str(self._value_)
++        else:
++            return '{}({})'.format(self.__class__.__name__, self.value)
++
++    def __setattr__(self, name, value):
++        # Prevent modification of the critical attributes once they
++        # have been set.
++        if name in ('name', 'value', 'short_name') and hasattr(self, name):
++            raise AttributeError('can\'t set attribute {}'.format(name))
++        object.__setattr__(self, name, value)
++
++@functools.total_ordering
++class _TypedConstant(_NamedValue):
++    """Base class for integer-valued optionally named constants.
++
++    This type is not an integer type.
++
++    """
++
++    def __eq__(self, other):
++        return isinstance(other, self.__class__) and self.value == other.value
++
++    def __lt__(self, other):
++        return isinstance(other, self.__class__) and self.value <= other.value
++
++    def __hash__(self):
++        return hash(self.value)
++
++class _IntConstant(_NamedValue, int):
++    """Base class for integer-like optionally named constants.
++
++    Instances compare equal to the integer of the same value, and can
++    be used in integer arithmetic.
++
++    """
+ 
+-class ElfClass(_OpenIntEnum):
++    pass
++
++class _FlagConstant(_TypedConstant, int):
++    pass
++
++def _parse_elf_h():
++    """Read ../elf/elf.h and return a dict with the constants in it."""
++
++    path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
++                        '..', 'elf', 'elf.h')
++    class TokenizerReporter:
++        """Report tokenizer errors to standard output."""
++
++        def __init__(self):
++            self.errors = 0
++
++        def error(self, token, message):
++            self.errors += 1
++            print('{}:{}:{}: error: {}'.format(
++                path, token.line, token.column, message))
++
++    reporter = TokenizerReporter()
++    with open(path) as inp:
++        tokens = glibcpp.tokenize_c(inp.read(), reporter)
++    if reporter.errors:
++        raise IOError('parse error in elf.h')
++
++    class MacroReporter:
++        """Report macro errors to standard output."""
++
++        def __init__(self):
++            self.errors = 0
++
++        def error(self, line, message):
++            errors += 1
++            print('{}:{}: error: {}'.format(path, line, message))
++
++        def note(self, line, message):
++            print('{}:{}: note: {}'.format(path, line, message))
++
++    reporter = MacroReporter()
++    result = glibcpp.macro_eval(glibcpp.macro_definitions(tokens), reporter)
++    if reporter.errors:
++        raise IOError('parse error in elf.h')
++
++    return result
++_elf_h = _parse_elf_h()
++del _parse_elf_h
++_elf_h_processed = set()
++
++def _register_elf_h(cls, prefix=None, skip=(), ranges=False, parent=None):
++    prefix = prefix or cls.prefix
++    if not prefix:
++        raise ValueError('missing prefix for {}'.format(cls.__name__))
++    by_value = cls.by_value
++    by_name = cls.by_name
++    processed = _elf_h_processed
++
++    skip = set(skip)
++    skip.add(prefix + 'NUM')
++    if ranges:
++        skip.add(prefix + 'LOOS')
++        skip.add(prefix + 'HIOS')
++        skip.add(prefix + 'LOPROC')
++        skip.add(prefix + 'HIPROC')
++        cls.os_range = (_elf_h[prefix + 'LOOS'], _elf_h[prefix + 'HIOS'])
++        cls.proc_range = (_elf_h[prefix + 'LOPROC'], _elf_h[prefix + 'HIPROC'])
++
++    # Inherit the prefix from the parent if not set.
++    if parent and cls.prefix is None and parent.prefix is not None:
++        cls.prefix = parent.prefix
++
++    processed_len_start = len(processed)
++    for name, value in _elf_h.items():
++        if name in skip or name in processed:
++            continue
++        if name.startswith(prefix):
++            processed.add(name)
++            if value in by_value:
++                raise ValueError('duplicate value {}: {}, {}'.format(
++                    value, name, by_value[value]))
++            obj = cls(name, value)
++            by_value[value] = obj
++            by_name[name] = obj
++            setattr(cls, name, obj)
++            if parent:
++                # Make the symbolic name available through the parent as well.
++                parent.by_name[name] = obj
++                setattr(parent, name, obj)
++
++    if len(processed) == processed_len_start:
++        raise ValueError('nothing matched prefix {!r}'.format(prefix))
++
++class ElfClass(_TypedConstant):
+     """ELF word size.  Type of EI_CLASS values."""
+-    ELFCLASSNONE = 0
+-    ELFCLASS32 = 1
+-    ELFCLASS64 = 2
++_register_elf_h(ElfClass, prefix='ELFCLASS')
+ 
+-class ElfData(_OpenIntEnum):
++class ElfData(_TypedConstant):
+     """ELF endianess.  Type of EI_DATA values."""
+-    ELFDATANONE = 0
+-    ELFDATA2LSB = 1
+-    ELFDATA2MSB = 2
++_register_elf_h(ElfData, prefix='ELFDATA')
+ 
+-class Machine(_OpenIntEnum):
++class Machine(_TypedConstant):
+     """ELF machine type.  Type of values in Ehdr.e_machine field."""
+-    EM_NONE = 0
+-    EM_M32 = 1
+-    EM_SPARC = 2
+-    EM_386 = 3
+-    EM_68K = 4
+-    EM_88K = 5
+-    EM_IAMCU = 6
+-    EM_860 = 7
+-    EM_MIPS = 8
+-    EM_S370 = 9
+-    EM_MIPS_RS3_LE = 10
+-    EM_PARISC = 15
+-    EM_VPP500 = 17
+-    EM_SPARC32PLUS = 18
+-    EM_960 = 19
+-    EM_PPC = 20
+-    EM_PPC64 = 21
+-    EM_S390 = 22
+-    EM_SPU = 23
+-    EM_V800 = 36
+-    EM_FR20 = 37
+-    EM_RH32 = 38
+-    EM_RCE = 39
+-    EM_ARM = 40
+-    EM_FAKE_ALPHA = 41
+-    EM_SH = 42
+-    EM_SPARCV9 = 43
+-    EM_TRICORE = 44
+-    EM_ARC = 45
+-    EM_H8_300 = 46
+-    EM_H8_300H = 47
+-    EM_H8S = 48
+-    EM_H8_500 = 49
+-    EM_IA_64 = 50
+-    EM_MIPS_X = 51
+-    EM_COLDFIRE = 52
+-    EM_68HC12 = 53
+-    EM_MMA = 54
+-    EM_PCP = 55
+-    EM_NCPU = 56
+-    EM_NDR1 = 57
+-    EM_STARCORE = 58
+-    EM_ME16 = 59
+-    EM_ST100 = 60
+-    EM_TINYJ = 61
+-    EM_X86_64 = 62
+-    EM_PDSP = 63
+-    EM_PDP10 = 64
+-    EM_PDP11 = 65
+-    EM_FX66 = 66
+-    EM_ST9PLUS = 67
+-    EM_ST7 = 68
+-    EM_68HC16 = 69
+-    EM_68HC11 = 70
+-    EM_68HC08 = 71
+-    EM_68HC05 = 72
+-    EM_SVX = 73
+-    EM_ST19 = 74
+-    EM_VAX = 75
+-    EM_CRIS = 76
+-    EM_JAVELIN = 77
+-    EM_FIREPATH = 78
+-    EM_ZSP = 79
+-    EM_MMIX = 80
+-    EM_HUANY = 81
+-    EM_PRISM = 82
+-    EM_AVR = 83
+-    EM_FR30 = 84
+-    EM_D10V = 85
+-    EM_D30V = 86
+-    EM_V850 = 87
+-    EM_M32R = 88
+-    EM_MN10300 = 89
+-    EM_MN10200 = 90
+-    EM_PJ = 91
+-    EM_OPENRISC = 92
+-    EM_ARC_COMPACT = 93
+-    EM_XTENSA = 94
+-    EM_VIDEOCORE = 95
+-    EM_TMM_GPP = 96
+-    EM_NS32K = 97
+-    EM_TPC = 98
+-    EM_SNP1K = 99
+-    EM_ST200 = 100
+-    EM_IP2K = 101
+-    EM_MAX = 102
+-    EM_CR = 103
+-    EM_F2MC16 = 104
+-    EM_MSP430 = 105
+-    EM_BLACKFIN = 106
+-    EM_SE_C33 = 107
+-    EM_SEP = 108
+-    EM_ARCA = 109
+-    EM_UNICORE = 110
+-    EM_EXCESS = 111
+-    EM_DXP = 112
+-    EM_ALTERA_NIOS2 = 113
+-    EM_CRX = 114
+-    EM_XGATE = 115
+-    EM_C166 = 116
+-    EM_M16C = 117
+-    EM_DSPIC30F = 118
+-    EM_CE = 119
+-    EM_M32C = 120
+-    EM_TSK3000 = 131
+-    EM_RS08 = 132
+-    EM_SHARC = 133
+-    EM_ECOG2 = 134
+-    EM_SCORE7 = 135
+-    EM_DSP24 = 136
+-    EM_VIDEOCORE3 = 137
+-    EM_LATTICEMICO32 = 138
+-    EM_SE_C17 = 139
+-    EM_TI_C6000 = 140
+-    EM_TI_C2000 = 141
+-    EM_TI_C5500 = 142
+-    EM_TI_ARP32 = 143
+-    EM_TI_PRU = 144
+-    EM_MMDSP_PLUS = 160
+-    EM_CYPRESS_M8C = 161
+-    EM_R32C = 162
+-    EM_TRIMEDIA = 163
+-    EM_QDSP6 = 164
+-    EM_8051 = 165
+-    EM_STXP7X = 166
+-    EM_NDS32 = 167
+-    EM_ECOG1X = 168
+-    EM_MAXQ30 = 169
+-    EM_XIMO16 = 170
+-    EM_MANIK = 171
+-    EM_CRAYNV2 = 172
+-    EM_RX = 173
+-    EM_METAG = 174
+-    EM_MCST_ELBRUS = 175
+-    EM_ECOG16 = 176
+-    EM_CR16 = 177
+-    EM_ETPU = 178
+-    EM_SLE9X = 179
+-    EM_L10M = 180
+-    EM_K10M = 181
+-    EM_AARCH64 = 183
+-    EM_AVR32 = 185
+-    EM_STM8 = 186
+-    EM_TILE64 = 187
+-    EM_TILEPRO = 188
+-    EM_MICROBLAZE = 189
+-    EM_CUDA = 190
+-    EM_TILEGX = 191
+-    EM_CLOUDSHIELD = 192
+-    EM_COREA_1ST = 193
+-    EM_COREA_2ND = 194
+-    EM_ARCV2 = 195
+-    EM_OPEN8 = 196
+-    EM_RL78 = 197
+-    EM_VIDEOCORE5 = 198
+-    EM_78KOR = 199
+-    EM_56800EX = 200
+-    EM_BA1 = 201
+-    EM_BA2 = 202
+-    EM_XCORE = 203
+-    EM_MCHP_PIC = 204
+-    EM_INTELGT = 205
+-    EM_KM32 = 210
+-    EM_KMX32 = 211
+-    EM_EMX16 = 212
+-    EM_EMX8 = 213
+-    EM_KVARC = 214
+-    EM_CDP = 215
+-    EM_COGE = 216
+-    EM_COOL = 217
+-    EM_NORC = 218
+-    EM_CSR_KALIMBA = 219
+-    EM_Z80 = 220
+-    EM_VISIUM = 221
+-    EM_FT32 = 222
+-    EM_MOXIE = 223
+-    EM_AMDGPU = 224
+-    EM_RISCV = 243
+-    EM_BPF = 247
+-    EM_CSKY = 252
+-    EM_LOONGARCH = 258
+-    EM_NUM = 259
+-    EM_ALPHA = 0x9026
+-
+-class Et(_OpenIntEnum):
++    prefix = 'EM_'
++_register_elf_h(Machine, skip=('EM_ARC_A5',))
++
++class Et(_TypedConstant):
+     """ELF file type.  Type of ET_* values and the Ehdr.e_type field."""
+-    ET_NONE = 0
+-    ET_REL = 1
+-    ET_EXEC = 2
+-    ET_DYN = 3
+-    ET_CORE = 4
++    prefix = 'ET_'
++_register_elf_h(Et, ranges=True)
+ 
+-class Shn(_OpenIntEnum):
++class Shn(_IntConstant):
+     """ELF reserved section indices."""
+-    SHN_UNDEF = 0
+-    SHN_BEFORE = 0xff00
+-    SHN_AFTER = 0xff01
+-    SHN_ABS = 0xfff1
+-    SHN_COMMON = 0xfff2
+-    SHN_XINDEX = 0xffff
+-
+-class ShnMIPS(enum.Enum):
++    prefix = 'SHN_'
++class ShnMIPS(Shn):
+     """Supplemental SHN_* constants for EM_MIPS."""
+-    SHN_MIPS_ACOMMON = 0xff00
+-    SHN_MIPS_TEXT = 0xff01
+-    SHN_MIPS_DATA = 0xff02
+-    SHN_MIPS_SCOMMON = 0xff03
+-    SHN_MIPS_SUNDEFINED = 0xff04
+-
+-class ShnPARISC(enum.Enum):
++class ShnPARISC(Shn):
+     """Supplemental SHN_* constants for EM_PARISC."""
+-    SHN_PARISC_ANSI_COMMON = 0xff00
+-    SHN_PARISC_HUGE_COMMON = 0xff01
++_register_elf_h(ShnMIPS, prefix='SHN_MIPS_', parent=Shn)
++_register_elf_h(ShnPARISC, prefix='SHN_PARISC_', parent=Shn)
++_register_elf_h(Shn, skip='SHN_LORESERVE SHN_HIRESERVE'.split(), ranges=True)
+ 
+-class Sht(_OpenIntEnum):
++class Sht(_TypedConstant):
+     """ELF section types.  Type of SHT_* values."""
+-    SHT_NULL = 0
+-    SHT_PROGBITS = 1
+-    SHT_SYMTAB = 2
+-    SHT_STRTAB = 3
+-    SHT_RELA = 4
+-    SHT_HASH = 5
+-    SHT_DYNAMIC = 6
+-    SHT_NOTE = 7
+-    SHT_NOBITS = 8
+-    SHT_REL = 9
+-    SHT_SHLIB = 10
+-    SHT_DYNSYM = 11
+-    SHT_INIT_ARRAY = 14
+-    SHT_FINI_ARRAY = 15
+-    SHT_PREINIT_ARRAY = 16
+-    SHT_GROUP = 17
+-    SHT_SYMTAB_SHNDX = 18
+-    SHT_RELR = 19
+-    SHT_GNU_ATTRIBUTES = 0x6ffffff5
+-    SHT_GNU_HASH = 0x6ffffff6
+-    SHT_GNU_LIBLIST = 0x6ffffff7
+-    SHT_CHECKSUM = 0x6ffffff8
+-    SHT_SUNW_move = 0x6ffffffa
+-    SHT_SUNW_COMDAT = 0x6ffffffb
+-    SHT_SUNW_syminfo = 0x6ffffffc
+-    SHT_GNU_verdef = 0x6ffffffd
+-    SHT_GNU_verneed = 0x6ffffffe
+-    SHT_GNU_versym = 0x6fffffff
+-
+-class ShtALPHA(enum.Enum):
++    prefix = 'SHT_'
++class ShtALPHA(Sht):
+     """Supplemental SHT_* constants for EM_ALPHA."""
+-    SHT_ALPHA_DEBUG = 0x70000001
+-    SHT_ALPHA_REGINFO = 0x70000002
+-
+-class ShtARM(enum.Enum):
++class ShtARM(Sht):
+     """Supplemental SHT_* constants for EM_ARM."""
+-    SHT_ARM_EXIDX = 0x70000001
+-    SHT_ARM_PREEMPTMAP = 0x70000002
+-    SHT_ARM_ATTRIBUTES = 0x70000003
+-
+-class ShtCSKY(enum.Enum):
++class ShtCSKY(Sht):
+     """Supplemental SHT_* constants for EM_CSKY."""
+-    SHT_CSKY_ATTRIBUTES = 0x70000001
+-
+-class ShtIA_64(enum.Enum):
++class ShtIA_64(Sht):
+     """Supplemental SHT_* constants for EM_IA_64."""
+-    SHT_IA_64_EXT = 0x70000000
+-    SHT_IA_64_UNWIND = 0x70000001
+-
+-class ShtMIPS(enum.Enum):
++class ShtMIPS(Sht):
+     """Supplemental SHT_* constants for EM_MIPS."""
+-    SHT_MIPS_LIBLIST = 0x70000000
+-    SHT_MIPS_MSYM = 0x70000001
+-    SHT_MIPS_CONFLICT = 0x70000002
+-    SHT_MIPS_GPTAB = 0x70000003
+-    SHT_MIPS_UCODE = 0x70000004
+-    SHT_MIPS_DEBUG = 0x70000005
+-    SHT_MIPS_REGINFO = 0x70000006
+-    SHT_MIPS_PACKAGE = 0x70000007
+-    SHT_MIPS_PACKSYM = 0x70000008
+-    SHT_MIPS_RELD = 0x70000009
+-    SHT_MIPS_IFACE = 0x7000000b
+-    SHT_MIPS_CONTENT = 0x7000000c
+-    SHT_MIPS_OPTIONS = 0x7000000d
+-    SHT_MIPS_SHDR = 0x70000010
+-    SHT_MIPS_FDESC = 0x70000011
+-    SHT_MIPS_EXTSYM = 0x70000012
+-    SHT_MIPS_DENSE = 0x70000013
+-    SHT_MIPS_PDESC = 0x70000014
+-    SHT_MIPS_LOCSYM = 0x70000015
+-    SHT_MIPS_AUXSYM = 0x70000016
+-    SHT_MIPS_OPTSYM = 0x70000017
+-    SHT_MIPS_LOCSTR = 0x70000018
+-    SHT_MIPS_LINE = 0x70000019
+-    SHT_MIPS_RFDESC = 0x7000001a
+-    SHT_MIPS_DELTASYM = 0x7000001b
+-    SHT_MIPS_DELTAINST = 0x7000001c
+-    SHT_MIPS_DELTACLASS = 0x7000001d
+-    SHT_MIPS_DWARF = 0x7000001e
+-    SHT_MIPS_DELTADECL = 0x7000001f
+-    SHT_MIPS_SYMBOL_LIB = 0x70000020
+-    SHT_MIPS_EVENTS = 0x70000021
+-    SHT_MIPS_TRANSLATE = 0x70000022
+-    SHT_MIPS_PIXIE = 0x70000023
+-    SHT_MIPS_XLATE = 0x70000024
+-    SHT_MIPS_XLATE_DEBUG = 0x70000025
+-    SHT_MIPS_WHIRL = 0x70000026
+-    SHT_MIPS_EH_REGION = 0x70000027
+-    SHT_MIPS_XLATE_OLD = 0x70000028
+-    SHT_MIPS_PDR_EXCEPTION = 0x70000029
+-    SHT_MIPS_XHASH = 0x7000002b
+-
+-class ShtPARISC(enum.Enum):
++class ShtPARISC(Sht):
+     """Supplemental SHT_* constants for EM_PARISC."""
+-    SHT_PARISC_EXT = 0x70000000
+-    SHT_PARISC_UNWIND = 0x70000001
+-    SHT_PARISC_DOC = 0x70000002
+-
+-class ShtRISCV(enum.Enum):
++class ShtRISCV(Sht):
+     """Supplemental SHT_* constants for EM_RISCV."""
+-    SHT_RISCV_ATTRIBUTES = 0x70000003
+-
+-class Pf(enum.IntFlag):
++_register_elf_h(ShtALPHA, prefix='SHT_ALPHA_', parent=Sht)
++_register_elf_h(ShtARM, prefix='SHT_ARM_', parent=Sht)
++_register_elf_h(ShtCSKY, prefix='SHT_CSKY_', parent=Sht)
++_register_elf_h(ShtIA_64, prefix='SHT_IA_64_', parent=Sht)
++_register_elf_h(ShtMIPS, prefix='SHT_MIPS_', parent=Sht)
++_register_elf_h(ShtPARISC, prefix='SHT_PARISC_', parent=Sht)
++_register_elf_h(ShtRISCV, prefix='SHT_RISCV_', parent=Sht)
++_register_elf_h(Sht, ranges=True,
++                skip='SHT_LOSUNW SHT_HISUNW SHT_LOUSER SHT_HIUSER'.split())
++
++class Pf(_FlagConstant):
+     """Program header flags.  Type of Phdr.p_flags values."""
+-    PF_X = 1
+-    PF_W = 2
+-    PF_R = 4
+-
+-class PfARM(enum.IntFlag):
++    prefix = 'PF_'
++class PfARM(Pf):
+     """Supplemental PF_* flags for EM_ARM."""
+-    PF_ARM_SB = 0x10000000
+-    PF_ARM_PI = 0x20000000
+-    PF_ARM_ABS = 0x40000000
+-
+-class PfPARISC(enum.IntFlag):
+-    """Supplemental PF_* flags for EM_PARISC."""
+-    PF_HP_PAGE_SIZE = 0x00100000
+-    PF_HP_FAR_SHARED = 0x00200000
+-    PF_HP_NEAR_SHARED = 0x00400000
+-    PF_HP_CODE = 0x01000000
+-    PF_HP_MODIFY = 0x02000000
+-    PF_HP_LAZYSWAP = 0x04000000
+-    PF_HP_SBP = 0x08000000
+-
+-class PfIA_64(enum.IntFlag):
++class PfHP(Pf):
++    """Supplemental PF_* flags for HP-UX."""
++class PfIA_64(Pf):
+     """Supplemental PF_* flags for EM_IA_64."""
+-    PF_IA_64_NORECOV = 0x80000000
+-
+-class PfMIPS(enum.IntFlag):
++class PfMIPS(Pf):
+     """Supplemental PF_* flags for EM_MIPS."""
+-    PF_MIPS_LOCAL = 0x10000000
+-
+-class Shf(enum.IntFlag):
++class PfPARISC(Pf):
++    """Supplemental PF_* flags for EM_PARISC."""
++_register_elf_h(PfARM, prefix='PF_ARM_', parent=Pf)
++_register_elf_h(PfHP, prefix='PF_HP_', parent=Pf)
++_register_elf_h(PfIA_64, prefix='PF_IA_64_', parent=Pf)
++_register_elf_h(PfMIPS, prefix='PF_MIPS_', parent=Pf)
++_register_elf_h(PfPARISC, prefix='PF_PARISC_', parent=Pf)
++_register_elf_h(Pf, skip='PF_MASKOS PF_MASKPROC'.split())
++
++class Shf(_FlagConstant):
+     """Section flags.  Type of Shdr.sh_type values."""
+-    SHF_WRITE = 1 << 0
+-    SHF_ALLOC = 1 << 1
+-    SHF_EXECINSTR = 1 << 2
+-    SHF_MERGE = 1 << 4
+-    SHF_STRINGS = 1 << 5
+-    SHF_INFO_LINK = 1 << 6
+-    SHF_LINK_ORDER = 1 << 7
+-    SHF_OS_NONCONFORMING = 256
+-    SHF_GROUP = 1 << 9
+-    SHF_TLS = 1 << 10
+-    SHF_COMPRESSED = 1 << 11
+-    SHF_GNU_RETAIN = 1 << 21
+-    SHF_ORDERED = 1 << 30
+-    SHF_EXCLUDE = 1 << 31
+-
+-class ShfALPHA(enum.IntFlag):
++    prefix = 'SHF_'
++class ShfALPHA(Shf):
+     """Supplemental SHF_* constants for EM_ALPHA."""
+-    SHF_ALPHA_GPREL = 0x10000000
+-
+-class ShfARM(enum.IntFlag):
++class ShfARM(Shf):
+     """Supplemental SHF_* constants for EM_ARM."""
+-    SHF_ARM_ENTRYSECT = 0x10000000
+-    SHF_ARM_COMDEF = 0x80000000
+-
+-class ShfIA_64(enum.IntFlag):
++class ShfIA_64(Shf):
+     """Supplemental SHF_* constants for EM_IA_64."""
+-    SHF_IA_64_SHORT  = 0x10000000
+-    SHF_IA_64_NORECOV = 0x20000000
+-
+-class ShfMIPS(enum.IntFlag):
++class ShfMIPS(Shf):
+     """Supplemental SHF_* constants for EM_MIPS."""
+-    SHF_MIPS_GPREL = 0x10000000
+-    SHF_MIPS_MERGE = 0x20000000
+-    SHF_MIPS_ADDR = 0x40000000
+-    SHF_MIPS_STRINGS = 0x80000000
+-    SHF_MIPS_NOSTRIP = 0x08000000
+-    SHF_MIPS_LOCAL = 0x04000000
+-    SHF_MIPS_NAMES = 0x02000000
+-    SHF_MIPS_NODUPE = 0x01000000
+-
+-class ShfPARISC(enum.IntFlag):
++class ShfPARISC(Shf):
+     """Supplemental SHF_* constants for EM_PARISC."""
+-    SHF_PARISC_SHORT = 0x20000000
+-    SHF_PARISC_HUGE = 0x40000000
+-    SHF_PARISC_SBP = 0x80000000
+-
+-class Stb(_OpenIntEnum):
++_register_elf_h(ShfALPHA, prefix='SHF_ALPHA_', parent=Shf)
++_register_elf_h(ShfARM, prefix='SHF_ARM_', parent=Shf)
++_register_elf_h(ShfIA_64, prefix='SHF_IA_64_', parent=Shf)
++_register_elf_h(ShfMIPS, prefix='SHF_MIPS_', parent=Shf)
++_register_elf_h(ShfPARISC, prefix='SHF_PARISC_', parent=Shf)
++_register_elf_h(Shf, skip='SHF_MASKOS SHF_MASKPROC'.split())
++
++class Stb(_TypedConstant):
+     """ELF symbol binding type."""
+-    STB_LOCAL = 0
+-    STB_GLOBAL = 1
+-    STB_WEAK = 2
+-    STB_GNU_UNIQUE = 10
+-    STB_MIPS_SPLIT_COMMON = 13
++    prefix = 'STB_'
++_register_elf_h(Stb, ranges=True)
+ 
+-class Stt(_OpenIntEnum):
++class Stt(_TypedConstant):
+     """ELF symbol type."""
+-    STT_NOTYPE = 0
+-    STT_OBJECT = 1
+-    STT_FUNC = 2
+-    STT_SECTION = 3
+-    STT_FILE = 4
+-    STT_COMMON = 5
+-    STT_TLS = 6
+-    STT_GNU_IFUNC = 10
+-
+-class SttARM(enum.Enum):
++    prefix = 'STT_'
++class SttARM(Sht):
+     """Supplemental STT_* constants for EM_ARM."""
+-    STT_ARM_TFUNC = 13
+-    STT_ARM_16BIT = 15
+-
+-class SttPARISC(enum.Enum):
++class SttPARISC(Sht):
+     """Supplemental STT_* constants for EM_PARISC."""
+-    STT_HP_OPAQUE = 11
+-    STT_HP_STUB = 12
+-    STT_PARISC_MILLICODE = 13
+-
+-class SttSPARC(enum.Enum):
++class SttSPARC(Sht):
+     """Supplemental STT_* constants for EM_SPARC."""
+     STT_SPARC_REGISTER = 13
+-
+-class SttX86_64(enum.Enum):
++class SttX86_64(Sht):
+     """Supplemental STT_* constants for EM_X86_64."""
+-    SHT_X86_64_UNWIND = 0x70000001
++_register_elf_h(SttARM, prefix='STT_ARM_', parent=Stt)
++_register_elf_h(SttPARISC, prefix='STT_PARISC_', parent=Stt)
++_register_elf_h(SttSPARC, prefix='STT_SPARC_', parent=Stt)
++_register_elf_h(Stt, ranges=True)
++
+ 
+-class Pt(_OpenIntEnum):
++class Pt(_TypedConstant):
+     """ELF program header types.  Type of Phdr.p_type."""
+-    PT_NULL = 0
+-    PT_LOAD = 1
+-    PT_DYNAMIC = 2
+-    PT_INTERP = 3
+-    PT_NOTE = 4
+-    PT_SHLIB = 5
+-    PT_PHDR = 6
+-    PT_TLS = 7
+-    PT_NUM = 8
+-    PT_GNU_EH_FRAME = 0x6474e550
+-    PT_GNU_STACK = 0x6474e551
+-    PT_GNU_RELRO = 0x6474e552
+-    PT_GNU_PROPERTY = 0x6474e553
+-    PT_SUNWBSS = 0x6ffffffa
+-    PT_SUNWSTACK = 0x6ffffffb
+-
+-class PtAARCH64(enum.Enum):
++    prefix = 'PT_'
++class PtAARCH64(Pt):
+     """Supplemental PT_* constants for EM_AARCH64."""
+-    PT_AARCH64_MEMTAG_MTE = 0x70000002
+-
+-class PtARM(enum.Enum):
++class PtARM(Pt):
+     """Supplemental PT_* constants for EM_ARM."""
+-    PT_ARM_EXIDX = 0x70000001
+-
+-class PtIA_64(enum.Enum):
++class PtHP(Pt):
++    """Supplemental PT_* constants for HP-U."""
++class PtIA_64(Pt):
+     """Supplemental PT_* constants for EM_IA_64."""
+-    PT_IA_64_HP_OPT_ANOT = 0x60000012
+-    PT_IA_64_HP_HSL_ANOT = 0x60000013
+-    PT_IA_64_HP_STACK = 0x60000014
+-    PT_IA_64_ARCHEXT = 0x70000000
+-    PT_IA_64_UNWIND = 0x70000001
+-
+-class PtMIPS(enum.Enum):
++class PtMIPS(Pt):
+     """Supplemental PT_* constants for EM_MIPS."""
+-    PT_MIPS_REGINFO = 0x70000000
+-    PT_MIPS_RTPROC = 0x70000001
+-    PT_MIPS_OPTIONS = 0x70000002
+-    PT_MIPS_ABIFLAGS = 0x70000003
+-
+-class PtPARISC(enum.Enum):
++class PtPARISC(Pt):
+     """Supplemental PT_* constants for EM_PARISC."""
+-    PT_HP_TLS = 0x60000000
+-    PT_HP_CORE_NONE = 0x60000001
+-    PT_HP_CORE_VERSION = 0x60000002
+-    PT_HP_CORE_KERNEL = 0x60000003
+-    PT_HP_CORE_COMM = 0x60000004
+-    PT_HP_CORE_PROC = 0x60000005
+-    PT_HP_CORE_LOADABLE = 0x60000006
+-    PT_HP_CORE_STACK = 0x60000007
+-    PT_HP_CORE_SHM = 0x60000008
+-    PT_HP_CORE_MMF = 0x60000009
+-    PT_HP_PARALLEL = 0x60000010
+-    PT_HP_FASTBIND = 0x60000011
+-    PT_HP_OPT_ANNOT = 0x60000012
+-    PT_HP_HSL_ANNOT = 0x60000013
+-    PT_HP_STACK = 0x60000014
+-    PT_PARISC_ARCHEXT = 0x70000000
+-    PT_PARISC_UNWIND = 0x70000001
+-
+-class PtRISCV(enum.Enum):
++class PtRISCV(Pt):
+     """Supplemental PT_* constants for EM_RISCV."""
+-    PT_RISCV_ATTRIBUTES = 0x70000003
+-
+-class Dt(_OpenIntEnum):
++_register_elf_h(PtAARCH64, prefix='PT_AARCH64_', parent=Pt)
++_register_elf_h(PtARM, prefix='PT_ARM_', parent=Pt)
++_register_elf_h(PtHP, prefix='PT_HP_', parent=Pt)
++_register_elf_h(PtIA_64, prefix='PT_IA_64_', parent=Pt)
++_register_elf_h(PtMIPS, prefix='PT_MIPS_', parent=Pt)
++_register_elf_h(PtPARISC, prefix='PT_PARISC_', parent=Pt)
++_register_elf_h(PtRISCV, prefix='PT_RISCV_', parent=Pt)
++_register_elf_h(Pt, skip='PT_LOSUNW PT_HISUNW'.split(), ranges=True)
++
++class Dt(_TypedConstant):
+     """ELF dynamic segment tags.  Type of Dyn.d_val."""
+-    DT_NULL = 0
+-    DT_NEEDED = 1
+-    DT_PLTRELSZ = 2
+-    DT_PLTGOT = 3
+-    DT_HASH = 4
+-    DT_STRTAB = 5
+-    DT_SYMTAB = 6
+-    DT_RELA = 7
+-    DT_RELASZ = 8
+-    DT_RELAENT = 9
+-    DT_STRSZ = 10
+-    DT_SYMENT = 11
+-    DT_INIT = 12
+-    DT_FINI = 13
+-    DT_SONAME = 14
+-    DT_RPATH = 15
+-    DT_SYMBOLIC = 16
+-    DT_REL = 17
+-    DT_RELSZ = 18
+-    DT_RELENT = 19
+-    DT_PLTREL = 20
+-    DT_DEBUG = 21
+-    DT_TEXTREL = 22
+-    DT_JMPREL = 23
+-    DT_BIND_NOW = 24
+-    DT_INIT_ARRAY = 25
+-    DT_FINI_ARRAY = 26
+-    DT_INIT_ARRAYSZ = 27
+-    DT_FINI_ARRAYSZ = 28
+-    DT_RUNPATH = 29
+-    DT_FLAGS = 30
+-    DT_PREINIT_ARRAY = 32
+-    DT_PREINIT_ARRAYSZ = 33
+-    DT_SYMTAB_SHNDX = 34
+-    DT_RELRSZ = 35
+-    DT_RELR = 36
+-    DT_RELRENT = 37
+-    DT_GNU_PRELINKED = 0x6ffffdf5
+-    DT_GNU_CONFLICTSZ = 0x6ffffdf6
+-    DT_GNU_LIBLISTSZ = 0x6ffffdf7
+-    DT_CHECKSUM = 0x6ffffdf8
+-    DT_PLTPADSZ = 0x6ffffdf9
+-    DT_MOVEENT = 0x6ffffdfa
+-    DT_MOVESZ = 0x6ffffdfb
+-    DT_FEATURE_1 = 0x6ffffdfc
+-    DT_POSFLAG_1 = 0x6ffffdfd
+-    DT_SYMINSZ = 0x6ffffdfe
+-    DT_SYMINENT = 0x6ffffdff
+-    DT_GNU_HASH = 0x6ffffef5
+-    DT_TLSDESC_PLT = 0x6ffffef6
+-    DT_TLSDESC_GOT = 0x6ffffef7
+-    DT_GNU_CONFLICT = 0x6ffffef8
+-    DT_GNU_LIBLIST = 0x6ffffef9
+-    DT_CONFIG = 0x6ffffefa
+-    DT_DEPAUDIT = 0x6ffffefb
+-    DT_AUDIT = 0x6ffffefc
+-    DT_PLTPAD = 0x6ffffefd
+-    DT_MOVETAB = 0x6ffffefe
+-    DT_SYMINFO = 0x6ffffeff
+-    DT_VERSYM = 0x6ffffff0
+-    DT_RELACOUNT = 0x6ffffff9
+-    DT_RELCOUNT = 0x6ffffffa
+-    DT_FLAGS_1 = 0x6ffffffb
+-    DT_VERDEF = 0x6ffffffc
+-    DT_VERDEFNUM = 0x6ffffffd
+-    DT_VERNEED = 0x6ffffffe
+-    DT_VERNEEDNUM = 0x6fffffff
+-    DT_AUXILIARY = 0x7ffffffd
+-    DT_FILTER = 0x7fffffff
+-
+-class DtAARCH64(enum.Enum):
++    prefix = 'DT_'
++class DtAARCH64(Dt):
+     """Supplemental DT_* constants for EM_AARCH64."""
+-    DT_AARCH64_BTI_PLT = 0x70000001
+-    DT_AARCH64_PAC_PLT = 0x70000003
+-    DT_AARCH64_VARIANT_PCS = 0x70000005
+-
+-class DtALPHA(enum.Enum):
++class DtALPHA(Dt):
+     """Supplemental DT_* constants for EM_ALPHA."""
+-    DT_ALPHA_PLTRO = 0x70000000
+-
+-class DtALTERA_NIOS2(enum.Enum):
++class DtALTERA_NIOS2(Dt):
+     """Supplemental DT_* constants for EM_ALTERA_NIOS2."""
+-    DT_NIOS2_GP = 0x70000002
+-
+-class DtIA_64(enum.Enum):
++class DtIA_64(Dt):
+     """Supplemental DT_* constants for EM_IA_64."""
+-    DT_IA_64_PLT_RESERVE = 0x70000000
+-
+-class DtMIPS(enum.Enum):
++class DtMIPS(Dt):
+     """Supplemental DT_* constants for EM_MIPS."""
+-    DT_MIPS_RLD_VERSION = 0x70000001
+-    DT_MIPS_TIME_STAMP = 0x70000002
+-    DT_MIPS_ICHECKSUM = 0x70000003
+-    DT_MIPS_IVERSION = 0x70000004
+-    DT_MIPS_FLAGS = 0x70000005
+-    DT_MIPS_BASE_ADDRESS = 0x70000006
+-    DT_MIPS_MSYM = 0x70000007
+-    DT_MIPS_CONFLICT = 0x70000008
+-    DT_MIPS_LIBLIST = 0x70000009
+-    DT_MIPS_LOCAL_GOTNO = 0x7000000a
+-    DT_MIPS_CONFLICTNO = 0x7000000b
+-    DT_MIPS_LIBLISTNO = 0x70000010
+-    DT_MIPS_SYMTABNO = 0x70000011
+-    DT_MIPS_UNREFEXTNO = 0x70000012
+-    DT_MIPS_GOTSYM = 0x70000013
+-    DT_MIPS_HIPAGENO = 0x70000014
+-    DT_MIPS_RLD_MAP = 0x70000016
+-    DT_MIPS_DELTA_CLASS = 0x70000017
+-    DT_MIPS_DELTA_CLASS_NO = 0x70000018
+-    DT_MIPS_DELTA_INSTANCE = 0x70000019
+-    DT_MIPS_DELTA_INSTANCE_NO = 0x7000001a
+-    DT_MIPS_DELTA_RELOC = 0x7000001b
+-    DT_MIPS_DELTA_RELOC_NO = 0x7000001c
+-    DT_MIPS_DELTA_SYM = 0x7000001d
+-    DT_MIPS_DELTA_SYM_NO = 0x7000001e
+-    DT_MIPS_DELTA_CLASSSYM = 0x70000020
+-    DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021
+-    DT_MIPS_CXX_FLAGS = 0x70000022
+-    DT_MIPS_PIXIE_INIT = 0x70000023
+-    DT_MIPS_SYMBOL_LIB = 0x70000024
+-    DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025
+-    DT_MIPS_LOCAL_GOTIDX = 0x70000026
+-    DT_MIPS_HIDDEN_GOTIDX = 0x70000027
+-    DT_MIPS_PROTECTED_GOTIDX = 0x70000028
+-    DT_MIPS_OPTIONS = 0x70000029
+-    DT_MIPS_INTERFACE = 0x7000002a
+-    DT_MIPS_DYNSTR_ALIGN = 0x7000002b
+-    DT_MIPS_INTERFACE_SIZE = 0x7000002c
+-    DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 0x7000002d
+-    DT_MIPS_PERF_SUFFIX = 0x7000002e
+-    DT_MIPS_COMPACT_SIZE = 0x7000002f
+-    DT_MIPS_GP_VALUE = 0x70000030
+-    DT_MIPS_AUX_DYNAMIC = 0x70000031
+-    DT_MIPS_PLTGOT = 0x70000032
+-    DT_MIPS_RWPLT = 0x70000034
+-    DT_MIPS_RLD_MAP_REL = 0x70000035
+-    DT_MIPS_XHASH = 0x70000036
+-
+-class DtPPC(enum.Enum):
++class DtPPC(Dt):
+     """Supplemental DT_* constants for EM_PPC."""
+-    DT_PPC_GOT = 0x70000000
+-    DT_PPC_OPT = 0x70000001
+-
+-class DtPPC64(enum.Enum):
++class DtPPC64(Dt):
+     """Supplemental DT_* constants for EM_PPC64."""
+-    DT_PPC64_GLINK = 0x70000000
+-    DT_PPC64_OPD = 0x70000001
+-    DT_PPC64_OPDSZ = 0x70000002
+-    DT_PPC64_OPT = 0x70000003
+-
+-class DtRISCV(enum.Enum):
++class DtRISCV(Dt):
+     """Supplemental DT_* constants for EM_RISCV."""
+-    DT_RISCV_VARIANT_CC = 0x70000001
+-
+-class DtSPARC(enum.Enum):
++class DtSPARC(Dt):
+     """Supplemental DT_* constants for EM_SPARC."""
+-    DT_SPARC_REGISTER = 0x70000001
++_dt_skip = '''
++DT_ENCODING DT_PROCNUM
++DT_ADDRRNGLO DT_ADDRRNGHI DT_ADDRNUM
++DT_VALRNGLO DT_VALRNGHI DT_VALNUM
++DT_VERSIONTAGNUM DT_EXTRANUM
++DT_AARCH64_NUM
++DT_ALPHA_NUM
++DT_IA_64_NUM
++DT_MIPS_NUM
++DT_PPC_NUM
++DT_PPC64_NUM
++DT_SPARC_NUM
++'''.strip().split()
++_register_elf_h(DtAARCH64, prefix='DT_AARCH64_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtALPHA, prefix='DT_ALPHA_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtALTERA_NIOS2, prefix='DT_NIOS2_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtIA_64, prefix='DT_IA_64_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtMIPS, prefix='DT_MIPS_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtPPC, prefix='DT_PPC_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtPPC64, prefix='DT_PPC64_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtRISCV, prefix='DT_RISCV_', skip=_dt_skip, parent=Dt)
++_register_elf_h(DtSPARC, prefix='DT_SPARC_', skip=_dt_skip, parent=Dt)
++_register_elf_h(Dt, skip=_dt_skip, ranges=True)
++del _dt_skip
++
++# Constant extraction is complete.
++del _register_elf_h
++del _elf_h
+ 
+ class StInfo:
+     """ELF symbol binding and type.  Type of the Sym.st_info field."""
diff --git a/SOURCES/glibc-rh2109510-22.patch b/SOURCES/glibc-rh2109510-22.patch
new file mode 100644
index 0000000..e87b99f
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-22.patch
@@ -0,0 +1,34 @@
+commit d33705c0b020632274318323931695a99753b5be
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Thu Nov 3 12:24:17 2022 +0100
+
+    scripts/glibcelf.py: Properly report <elf.h> parsing failures
+    
+    Without this change, parse failures result in an exception:
+    
+    Traceback (most recent call last):
+      File "tst-glibcelf.py", line 23, in <module>
+        import glibcelf
+      File "/path/to/git/scripts/glibcelf.py", line 226, in <module>
+        _elf_h = _parse_elf_h()
+      File "/path/to/git/scripts/glibcelf.py", line 221, in _parse_elf_h
+        result = glibcpp.macro_eval(glibcpp.macro_definitions(tokens), reporter)
+      File "/path/to/git/scripts/glibcpp.py", line 379, in macro_eval
+        reporter.error(md.line, 'macro {} redefined'.format(md.name))
+      File "/path/to/git/scripts/glibcelf.py", line 214, in error
+        errors += 1
+    UnboundLocalError: local variable 'errors' referenced before assignment
+
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 420cb21943b28bba..59aab56ecf9deb3e 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -211,7 +211,7 @@ def _parse_elf_h():
+             self.errors = 0
+ 
+         def error(self, line, message):
+-            errors += 1
++            self.errors += 1
+             print('{}:{}: error: {}'.format(path, line, message))
+ 
+         def note(self, line, message):
diff --git a/SOURCES/glibc-rh2109510-23.patch b/SOURCES/glibc-rh2109510-23.patch
new file mode 100644
index 0000000..7823014
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-23.patch
@@ -0,0 +1,108 @@
+Downstream-only adjustments to scripts/glibcelf.py.  We do not have
+CSKY nor RISC-V constants in <elf.h>, so glibcelf cannot extract
+those.  PT_AARCH64_* constants are missing as well.
+
+Adjust elf/tst-glibcelf.py to use PT_MIPS_OPTIONS instead of
+PT_AARCH64_MEMTAG_MTE for testing.  It has the same numeric value
+(0x70000002).
+
+diff --git a/elf/tst-glibcelf.py b/elf/tst-glibcelf.py
+index a5bff45eae55edea..9cb0861589d6ae2e 100644
+--- a/elf/tst-glibcelf.py
++++ b/elf/tst-glibcelf.py
+@@ -75,15 +75,17 @@ def check_basic():
+     if repr(glibcelf.Pt(17609)) != 'Pt(17609)':
+         error('repr(Pt(17609))')
+ 
+-    if glibcelf.Pt('PT_AARCH64_MEMTAG_MTE') \
+-       is not glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
+-        error('PT_AARCH64_MEMTAG_MTE identity')
+-    if glibcelf.Pt(0x70000002) is glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
++    # Note: Upstream uses PT_AARCH64_MEMTAG_MTE instead of PT_MIPS_OPTIONS.
++    # PT_AARCH64_MEMTAG_MTE is not yet available downstream.
++    if glibcelf.Pt('PT_MIPS_OPTIONS') \
++       is not glibcelf.Pt.PT_MIPS_OPTIONS:
++        error('PT_MIPS_OPTIONS identity')
++    if glibcelf.Pt(0x70000002) is glibcelf.Pt.PT_MIPS_OPTIONS:
+         error('Pt(0x70000002) identity')
+-    if glibcelf.PtAARCH64(0x70000002) is not glibcelf.Pt.PT_AARCH64_MEMTAG_MTE:
+-        error('PtAARCH64(0x70000002) identity')
+-    if glibcelf.Pt.PT_AARCH64_MEMTAG_MTE.short_name != 'AARCH64_MEMTAG_MTE':
+-        error('PT_AARCH64_MEMTAG_MTE short name')
++    if glibcelf.PtMIPS(0x70000002) is not glibcelf.Pt.PT_MIPS_OPTIONS:
++        error('PtMIPS(0x70000002) identity')
++    if glibcelf.Pt.PT_MIPS_OPTIONS.short_name != 'MIPS_OPTIONS':
++        error('PT_MIPS_OPTIONS short name')
+ 
+     # Special cases for int-like Shn.
+     if glibcelf.Shn(32) == glibcelf.Shn.SHN_XINDEX:
+diff --git a/scripts/glibcelf.py b/scripts/glibcelf.py
+index 59aab56ecf9deb3e..5980d7cc906005e2 100644
+--- a/scripts/glibcelf.py
++++ b/scripts/glibcelf.py
+@@ -306,23 +306,17 @@ class ShtALPHA(Sht):
+     """Supplemental SHT_* constants for EM_ALPHA."""
+ class ShtARM(Sht):
+     """Supplemental SHT_* constants for EM_ARM."""
+-class ShtCSKY(Sht):
+-    """Supplemental SHT_* constants for EM_CSKY."""
+ class ShtIA_64(Sht):
+     """Supplemental SHT_* constants for EM_IA_64."""
+ class ShtMIPS(Sht):
+     """Supplemental SHT_* constants for EM_MIPS."""
+ class ShtPARISC(Sht):
+     """Supplemental SHT_* constants for EM_PARISC."""
+-class ShtRISCV(Sht):
+-    """Supplemental SHT_* constants for EM_RISCV."""
+ _register_elf_h(ShtALPHA, prefix='SHT_ALPHA_', parent=Sht)
+ _register_elf_h(ShtARM, prefix='SHT_ARM_', parent=Sht)
+-_register_elf_h(ShtCSKY, prefix='SHT_CSKY_', parent=Sht)
+ _register_elf_h(ShtIA_64, prefix='SHT_IA_64_', parent=Sht)
+ _register_elf_h(ShtMIPS, prefix='SHT_MIPS_', parent=Sht)
+ _register_elf_h(ShtPARISC, prefix='SHT_PARISC_', parent=Sht)
+-_register_elf_h(ShtRISCV, prefix='SHT_RISCV_', parent=Sht)
+ _register_elf_h(Sht, ranges=True,
+                 skip='SHT_LOSUNW SHT_HISUNW SHT_LOUSER SHT_HIUSER'.split())
+ 
+@@ -392,8 +386,6 @@ _register_elf_h(Stt, ranges=True)
+ class Pt(_TypedConstant):
+     """ELF program header types.  Type of Phdr.p_type."""
+     prefix = 'PT_'
+-class PtAARCH64(Pt):
+-    """Supplemental PT_* constants for EM_AARCH64."""
+ class PtARM(Pt):
+     """Supplemental PT_* constants for EM_ARM."""
+ class PtHP(Pt):
+@@ -404,15 +396,11 @@ class PtMIPS(Pt):
+     """Supplemental PT_* constants for EM_MIPS."""
+ class PtPARISC(Pt):
+     """Supplemental PT_* constants for EM_PARISC."""
+-class PtRISCV(Pt):
+-    """Supplemental PT_* constants for EM_RISCV."""
+-_register_elf_h(PtAARCH64, prefix='PT_AARCH64_', parent=Pt)
+ _register_elf_h(PtARM, prefix='PT_ARM_', parent=Pt)
+ _register_elf_h(PtHP, prefix='PT_HP_', parent=Pt)
+ _register_elf_h(PtIA_64, prefix='PT_IA_64_', parent=Pt)
+ _register_elf_h(PtMIPS, prefix='PT_MIPS_', parent=Pt)
+ _register_elf_h(PtPARISC, prefix='PT_PARISC_', parent=Pt)
+-_register_elf_h(PtRISCV, prefix='PT_RISCV_', parent=Pt)
+ _register_elf_h(Pt, skip='PT_LOSUNW PT_HISUNW'.split(), ranges=True)
+ 
+ class Dt(_TypedConstant):
+@@ -432,8 +420,6 @@ class DtPPC(Dt):
+     """Supplemental DT_* constants for EM_PPC."""
+ class DtPPC64(Dt):
+     """Supplemental DT_* constants for EM_PPC64."""
+-class DtRISCV(Dt):
+-    """Supplemental DT_* constants for EM_RISCV."""
+ class DtSPARC(Dt):
+     """Supplemental DT_* constants for EM_SPARC."""
+ _dt_skip = '''
+@@ -456,7 +442,6 @@ _register_elf_h(DtIA_64, prefix='DT_IA_64_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(DtMIPS, prefix='DT_MIPS_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(DtPPC, prefix='DT_PPC_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(DtPPC64, prefix='DT_PPC64_', skip=_dt_skip, parent=Dt)
+-_register_elf_h(DtRISCV, prefix='DT_RISCV_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(DtSPARC, prefix='DT_SPARC_', skip=_dt_skip, parent=Dt)
+ _register_elf_h(Dt, skip=_dt_skip, ranges=True)
+ del _dt_skip
diff --git a/SOURCES/glibc-rh2109510-3.patch b/SOURCES/glibc-rh2109510-3.patch
new file mode 100644
index 0000000..59496a7
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-3.patch
@@ -0,0 +1,32 @@
+commit 7b36d26b22d147ffc347f427f9fd584700578a94
+Author: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Date:   Mon Dec 3 14:40:48 2018 +0100
+
+    Fix test-as-const-jmp_buf-ssp.c generation on gnu-i386
+    
+    hurd's jmp_buf-ssp.sym does not define any symbol.
+    scripts/gen-as-const.py currently was emitting an empty line in that
+    case, and the gawk invocation was prepending "asconst_" to it, ending up
+    with:
+    
+    .../build/glibc/setjmp/test-as-const-jmp_buf-ssp.c:1:2: error: expected « = », « , », « ; », « asm » or
+    « __attribute__ » at end of input
+        1 |  asconst_
+          |  ^~~~~~~~
+    
+            * scripts/gen-as-const.py (main): Avoid emitting empty line when
+            there is no element in `consts'.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+index b7a5744bb192dd67..cabf401ed15e8367 100644
+--- a/scripts/gen-as-const.py
++++ b/scripts/gen-as-const.py
+@@ -153,7 +153,7 @@ def main():
+         print(gen_test(sym_data))
+     else:
+         consts = compute_c_consts(sym_data, args.cc)
+-        print('\n'.join('#define %s %s' % c for c in sorted(consts.items())))
++        print(''.join('#define %s %s\n' % c for c in sorted(consts.items())), end='')
+ 
+ if __name__ == '__main__':
+     main()
diff --git a/SOURCES/glibc-rh2109510-4.patch b/SOURCES/glibc-rh2109510-4.patch
new file mode 100644
index 0000000..a56943a
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-4.patch
@@ -0,0 +1,157 @@
+commit 477a02f63751c4b759ddd9454d17f2a7ad120ee3
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Dec 3 22:08:50 2018 +0000
+
+    Make gen-as-const.py handle '--' consistently with awk script.
+    
+    It was reported in
+    <https://sourceware.org/ml/libc-alpha/2018-12/msg00045.html> that
+    gen-as-const.py fails to generate test code in the case where a .sym
+    file has no symbols in it, so resulting in a test failing to link for
+    Hurd.
+    
+    The relevant difference from the old awk script is that the old script
+    treated '--' lines as indicating that the text to do at the start of
+    the test (or file used to compute constants) should be output at that
+    point if not already output, as well as treating lines with actual
+    entries for constants like that.  This patch changes gen-as-const.py
+    accordingly, making it the sole responsibility of the code parsing
+    .sym files to determine when such text should be output and ensuring
+    it's always output at some point even if there are no symbols and no
+    '--' lines, since not outputting it means the test fails to link.
+    Handling '--' like that also avoids any problems that would arise if
+    the first entry for a symbol were inside #ifdef (since the text in
+    question must not be output inside #ifdef).
+    
+    Tested for x86_64, and with build-many-glibcs.py for i686-gnu.  Note
+    that there are still compilation test failures for i686-gnu
+    (linknamespace tests, possibly arising from recent posix_spawn-related
+    changes).
+    
+            * scripts/gen-as-const.py (compute_c_consts): Take an argument
+            'START' to indicate that start text should be output.
+            (gen_test): Likewise.
+            (main): Generate 'START' for first symbol or '--' line, or at end
+            of input if not previously generated.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+index cabf401ed15e8367..eb85ef1aa0f4934d 100644
+--- a/scripts/gen-as-const.py
++++ b/scripts/gen-as-const.py
+@@ -34,28 +34,28 @@ def compute_c_consts(sym_data, cc):
+     """Compute the values of some C constants.
+ 
+     The first argument is a list whose elements are either strings
+-    (preprocessor directives) or pairs of strings (a name and a C
++    (preprocessor directives, or the special string 'START' to
++    indicate this function should insert its initial boilerplate text
++    in the output there) or pairs of strings (a name and a C
+     expression for the corresponding value).  Preprocessor directives
+     in the middle of the list may be used to select which constants
+     end up being evaluated using which expressions.
+ 
+     """
+     out_lines = []
+-    started = False
+     for arg in sym_data:
+         if isinstance(arg, str):
+-            out_lines.append(arg)
++            if arg == 'START':
++                out_lines.append('void\ndummy (void)\n{')
++            else:
++                out_lines.append(arg)
+             continue
+         name = arg[0]
+         value = arg[1]
+-        if not started:
+-            out_lines.append('void\ndummy (void)\n{')
+-            started = True
+         out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
+                          ': : \"i\" ((long int) (%s)));'
+                          % (name, value))
+-    if started:
+-        out_lines.append('}')
++    out_lines.append('}')
+     out_lines.append('')
+     out_text = '\n'.join(out_lines)
+     with tempfile.TemporaryDirectory() as temp_dir:
+@@ -89,32 +89,32 @@ def gen_test(sym_data):
+ 
+     """
+     out_lines = []
+-    started = False
+     for arg in sym_data:
+         if isinstance(arg, str):
+-            out_lines.append(arg)
++            if arg == 'START':
++                out_lines.append('#include <stdint.h>\n'
++                                 '#include <stdio.h>\n'
++                                 '#include <bits/wordsize.h>\n'
++                                 '#if __WORDSIZE == 64\n'
++                                 'typedef uint64_t c_t;\n'
++                                 '# define U(n) UINT64_C (n)\n'
++                                 '#else\n'
++                                 'typedef uint32_t c_t;\n'
++                                 '# define U(n) UINT32_C (n)\n'
++                                 '#endif\n'
++                                 'static int\n'
++                                 'do_test (void)\n'
++                                 '{\n'
++                                 # Compilation test only, using static
++                                 # assertions.
++                                 '  return 0;\n'
++                                 '}\n'
++                                 '#include <support/test-driver.c>')
++            else:
++                out_lines.append(arg)
+             continue
+         name = arg[0]
+         value = arg[1]
+-        if not started:
+-            out_lines.append('#include <stdint.h>\n'
+-                             '#include <stdio.h>\n'
+-                             '#include <bits/wordsize.h>\n'
+-                             '#if __WORDSIZE == 64\n'
+-                             'typedef uint64_t c_t;\n'
+-                             '# define U(n) UINT64_C (n)\n'
+-                             '#else\n'
+-                             'typedef uint32_t c_t;\n'
+-                             '# define U(n) UINT32_C (n)\n'
+-                             '#endif\n'
+-                             'static int\n'
+-                             'do_test (void)\n'
+-                             '{\n'
+-                             # Compilation test only, using static assertions.
+-                             '  return 0;\n'
+-                             '}\n'
+-                             '#include <support/test-driver.c>')
+-            started = True
+         out_lines.append('_Static_assert (U (asconst_%s) == (c_t) (%s), '
+                          '"value of %s");'
+                          % (name, value, name))
+@@ -134,6 +134,7 @@ def main():
+     args = parser.parse_args()
+     sym_data = []
+     with open(args.sym_file, 'r') as sym_file:
++        started = False
+         for line in sym_file:
+             line = line.strip()
+             if line == '':
+@@ -143,12 +144,17 @@ def main():
+                 sym_data.append(line)
+                 continue
+             words = line.split(maxsplit=1)
++            if not started:
++                sym_data.append('START')
++                started = True
+             # Separator.
+             if words[0] == '--':
+                 continue
+             name = words[0]
+             value = words[1] if len(words) > 1 else words[0]
+             sym_data.append((name, value))
++        if not started:
++            sym_data.append('START')
+     if args.test:
+         print(gen_test(sym_data))
+     else:
diff --git a/SOURCES/glibc-rh2109510-5.patch b/SOURCES/glibc-rh2109510-5.patch
new file mode 100644
index 0000000..3e93b78
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-5.patch
@@ -0,0 +1,483 @@
+commit a8110b727e508f7ddf34f940af622e6f95435201
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Dec 10 22:27:13 2018 +0000
+
+    Move tst-signal-numbers to Python.
+    
+    This patch converts the tst-signal-numbers test from shell + awk to
+    Python.
+    
+    As with gen-as-const, the point is not so much that shell and awk are
+    problematic for this code, as that it's useful to build up general
+    infrastructure in Python for use of a range of code involving
+    extracting values from C headers.  This patch moves some code from
+    gen-as-const.py to a new glibcextract.py, which also gains functions
+    relating to listing macros, and comparing the values of a set of
+    macros from compiling two different pieces of code.
+    
+    It's not just signal numbers that should have such tests; pretty much
+    any case where glibc copies constants from Linux kernel headers should
+    have such tests that the values and sets of constants agree except
+    where differences are known to be OK.  Much the same also applies to
+    structure layouts (although testing those without hardcoding lists of
+    fields to test will be more complicated).
+    
+    Given this patch, another test for a set of macros would essentially
+    be just a call to glibcextract.compare_macro_consts (plus boilerplate
+    code - and we could move to having separate text files defining such
+    tests, like the .sym inputs to gen-as-const, so that only a single
+    Python script is needed for most such tests).  Some such tests would
+    of course need new features, e.g. where the set of macros changes in
+    new kernel versions (so you need to allow new macro names on the
+    kernel side if the kernel headers are newer than the version known to
+    glibc, and extra macros on the glibc side if the kernel headers are
+    older).  tst-syscall-list.sh could become a Python script that uses
+    common code to generate lists of macros but does other things with its
+    own custom logic.
+    
+    There are a few differences from the existing shell + awk test.
+    Because the new test evaluates constants using the compiler, no
+    special handling is needed any more for one signal name being defined
+    to another.  Because asm/signal.h now needs to pass through the
+    compiler, not just the preprocessor, stddef.h is included as well
+    (given the asm/signal.h issue that it requires an externally provided
+    definition of size_t).  The previous code defined __ASSEMBLER__ with
+    asm/signal.h; this is removed (__ASSEMBLY__, a different macro,
+    eliminates the requirement for stddef.h on some but not all
+    architectures).
+    
+    Tested for x86_64, and with build-many-glibcs.py.
+    
+            * scripts/glibcextract.py: New file.
+            * scripts/gen-as-const.py: Do not import os.path, re, subprocess
+            or tempfile.  Import glibcexctract.
+            (compute_c_consts): Remove.  Moved to glibcextract.py.
+            (gen_test): Update reference to compute_c_consts.
+            (main): Likewise.
+            * sysdeps/unix/sysv/linux/tst-signal-numbers.py: New file.
+            * sysdeps/unix/sysv/linux/tst-signal-numbers.sh: Remove.
+            * sysdeps/unix/sysv/linux/Makefile
+            ($(objpfx)tst-signal-numbers.out): Use tst-signal-numbers.py.
+            Redirect stderr as well as stdout.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+index eb85ef1aa0f4934d..f85e359394acb1a4 100644
+--- a/scripts/gen-as-const.py
++++ b/scripts/gen-as-const.py
+@@ -24,68 +24,14 @@
+ # A line giving just a name implies an expression consisting of just that name.
+ 
+ import argparse
+-import os.path
+-import re
+-import subprocess
+-import tempfile
+ 
+-
+-def compute_c_consts(sym_data, cc):
+-    """Compute the values of some C constants.
+-
+-    The first argument is a list whose elements are either strings
+-    (preprocessor directives, or the special string 'START' to
+-    indicate this function should insert its initial boilerplate text
+-    in the output there) or pairs of strings (a name and a C
+-    expression for the corresponding value).  Preprocessor directives
+-    in the middle of the list may be used to select which constants
+-    end up being evaluated using which expressions.
+-
+-    """
+-    out_lines = []
+-    for arg in sym_data:
+-        if isinstance(arg, str):
+-            if arg == 'START':
+-                out_lines.append('void\ndummy (void)\n{')
+-            else:
+-                out_lines.append(arg)
+-            continue
+-        name = arg[0]
+-        value = arg[1]
+-        out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
+-                         ': : \"i\" ((long int) (%s)));'
+-                         % (name, value))
+-    out_lines.append('}')
+-    out_lines.append('')
+-    out_text = '\n'.join(out_lines)
+-    with tempfile.TemporaryDirectory() as temp_dir:
+-        c_file_name = os.path.join(temp_dir, 'test.c')
+-        s_file_name = os.path.join(temp_dir, 'test.s')
+-        with open(c_file_name, 'w') as c_file:
+-            c_file.write(out_text)
+-        # Compilation has to be from stdin to avoid the temporary file
+-        # name being written into the generated dependencies.
+-        cmd = ('%s -S -o %s -x c - < %s' % (cc, s_file_name, c_file_name))
+-        subprocess.check_call(cmd, shell=True)
+-        consts = {}
+-        with open(s_file_name, 'r') as s_file:
+-            for line in s_file:
+-                match = re.search('@@@name@@@([^@]*)'
+-                                  '@@@value@@@[^0-9Xxa-fA-F-]*'
+-                                  '([0-9Xxa-fA-F-]+).*@@@end@@@', line)
+-                if match:
+-                    if (match.group(1) in consts
+-                        and match.group(2) != consts[match.group(1)]):
+-                        raise ValueError('duplicate constant %s'
+-                                         % match.group(1))
+-                    consts[match.group(1)] = match.group(2)
+-        return consts
++import glibcextract
+ 
+ 
+ def gen_test(sym_data):
+     """Generate a test for the values of some C constants.
+ 
+-    The first argument is as for compute_c_consts.
++    The first argument is as for glibcextract.compute_c_consts.
+ 
+     """
+     out_lines = []
+@@ -158,7 +104,7 @@ def main():
+     if args.test:
+         print(gen_test(sym_data))
+     else:
+-        consts = compute_c_consts(sym_data, args.cc)
++        consts = glibcextract.compute_c_consts(sym_data, args.cc)
+         print(''.join('#define %s %s\n' % c for c in sorted(consts.items())), end='')
+ 
+ if __name__ == '__main__':
+diff --git a/scripts/glibcextract.py b/scripts/glibcextract.py
+new file mode 100644
+index 0000000000000000..ecc4d5b6cc387c7d
+--- /dev/null
++++ b/scripts/glibcextract.py
+@@ -0,0 +1,162 @@
++#!/usr/bin/python3
++# Extract information from C headers.
++# Copyright (C) 2018 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <http://www.gnu.org/licenses/>.
++
++import os.path
++import re
++import subprocess
++import tempfile
++
++
++def compute_c_consts(sym_data, cc):
++    """Compute the values of some C constants.
++
++    The first argument is a list whose elements are either strings
++    (preprocessor directives, or the special string 'START' to
++    indicate this function should insert its initial boilerplate text
++    in the output there) or pairs of strings (a name and a C
++    expression for the corresponding value).  Preprocessor directives
++    in the middle of the list may be used to select which constants
++    end up being evaluated using which expressions.
++
++    """
++    out_lines = []
++    for arg in sym_data:
++        if isinstance(arg, str):
++            if arg == 'START':
++                out_lines.append('void\ndummy (void)\n{')
++            else:
++                out_lines.append(arg)
++            continue
++        name = arg[0]
++        value = arg[1]
++        out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
++                         ': : \"i\" ((long int) (%s)));'
++                         % (name, value))
++    out_lines.append('}')
++    out_lines.append('')
++    out_text = '\n'.join(out_lines)
++    with tempfile.TemporaryDirectory() as temp_dir:
++        c_file_name = os.path.join(temp_dir, 'test.c')
++        s_file_name = os.path.join(temp_dir, 'test.s')
++        with open(c_file_name, 'w') as c_file:
++            c_file.write(out_text)
++        # Compilation has to be from stdin to avoid the temporary file
++        # name being written into the generated dependencies.
++        cmd = ('%s -S -o %s -x c - < %s' % (cc, s_file_name, c_file_name))
++        subprocess.check_call(cmd, shell=True)
++        consts = {}
++        with open(s_file_name, 'r') as s_file:
++            for line in s_file:
++                match = re.search('@@@name@@@([^@]*)'
++                                  '@@@value@@@[^0-9Xxa-fA-F-]*'
++                                  '([0-9Xxa-fA-F-]+).*@@@end@@@', line)
++                if match:
++                    if (match.group(1) in consts
++                        and match.group(2) != consts[match.group(1)]):
++                        raise ValueError('duplicate constant %s'
++                                         % match.group(1))
++                    consts[match.group(1)] = match.group(2)
++        return consts
++
++
++def list_macros(source_text, cc):
++    """List the preprocessor macros defined by the given source code.
++
++    The return value is a pair of dicts, the first one mapping macro
++    names to their expansions and the second one mapping macro names
++    to lists of their arguments, or to None for object-like macros.
++
++    """
++    with tempfile.TemporaryDirectory() as temp_dir:
++        c_file_name = os.path.join(temp_dir, 'test.c')
++        i_file_name = os.path.join(temp_dir, 'test.i')
++        with open(c_file_name, 'w') as c_file:
++            c_file.write(source_text)
++        cmd = ('%s -E -dM -o %s %s' % (cc, i_file_name, c_file_name))
++        subprocess.check_call(cmd, shell=True)
++        macros_exp = {}
++        macros_args = {}
++        with open(i_file_name, 'r') as i_file:
++            for line in i_file:
++                match = re.fullmatch('#define ([0-9A-Za-z_]+)(.*)\n', line)
++                if not match:
++                    raise ValueError('bad -dM output line: %s' % line)
++                name = match.group(1)
++                value = match.group(2)
++                if value.startswith(' '):
++                    value = value[1:]
++                    args = None
++                elif value.startswith('('):
++                    match = re.fullmatch(r'\((.*?)\) (.*)', value)
++                    if not match:
++                        raise ValueError('bad -dM output line: %s' % line)
++                    args = match.group(1).split(',')
++                    value = match.group(2)
++                else:
++                    raise ValueError('bad -dM output line: %s' % line)
++                if name in macros_exp:
++                    raise ValueError('duplicate macro: %s' % line)
++                macros_exp[name] = value
++                macros_args[name] = args
++    return macros_exp, macros_args
++
++
++def compute_macro_consts(source_text, cc, macro_re, exclude_re=None):
++    """Compute the integer constant values of macros defined by source_text.
++
++    Macros must match the regular expression macro_re, and if
++    exclude_re is defined they must not match exclude_re.  Values are
++    computed with compute_c_consts.
++
++    """
++    macros_exp, macros_args = list_macros(source_text, cc)
++    macros_set = {m for m in macros_exp
++                  if (macros_args[m] is None
++                      and re.fullmatch(macro_re, m)
++                      and (exclude_re is None
++                           or not re.fullmatch(exclude_re, m)))}
++    sym_data = [source_text, 'START']
++    sym_data.extend(sorted((m, m) for m in macros_set))
++    return compute_c_consts(sym_data, cc)
++
++
++def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None):
++    """Compare the values of macros defined by two different sources.
++
++    The sources would typically be includes of a glibc header and a
++    kernel header.  Return 1 if there were any differences, 0 if the
++    macro values were the same.
++
++    """
++    macros_1 = compute_macro_consts(source_1, cc, macro_re, exclude_re)
++    macros_2 = compute_macro_consts(source_2, cc, macro_re, exclude_re)
++    if macros_1 == macros_2:
++        return 0
++    print('First source:\n%s\n' % source_1)
++    print('Second source:\n%s\n' % source_2)
++    for name, value in sorted(macros_1.items()):
++        if name not in macros_2:
++            print('Only in first source: %s' % name)
++        elif macros_1[name] != macros_2[name]:
++            print('Different values for %s: %s != %s'
++                  % (name, macros_1[name], macros_2[name]))
++    for name in sorted(macros_2.keys()):
++        if name not in macros_1:
++            print('Only in second source: %s' % name)
++    return 1
+diff --git a/sysdeps/unix/sysv/linux/Makefile b/sysdeps/unix/sysv/linux/Makefile
+index bb055f9d6b841ff5..9c10ee53b26e1b1b 100644
+--- a/sysdeps/unix/sysv/linux/Makefile
++++ b/sysdeps/unix/sysv/linux/Makefile
+@@ -113,11 +113,14 @@ tests-special += $(objpfx)tst-signal-numbers.out
+ # in this context, but signal.c includes signal.h and not much else so it'll
+ # be conservatively correct.
+ $(objpfx)tst-signal-numbers.out: \
+-		../sysdeps/unix/sysv/linux/tst-signal-numbers.sh \
++		../sysdeps/unix/sysv/linux/tst-signal-numbers.py \
+ 		$(objpfx)signal.o*
+-	AWK=$(AWK) $(SHELL) ../sysdeps/unix/sysv/linux/tst-signal-numbers.sh \
+-	$(CC) $(patsubst -DMODULE_NAME=%,-DMODULE_NAME=testsuite,$(CPPFLAGS)) \
+-	< /dev/null > $@; $(evaluate-test)
++	PYTHONPATH=../scripts \
++	$(PYTHON) ../sysdeps/unix/sysv/linux/tst-signal-numbers.py \
++		   --cc="$(CC) $(patsubst -DMODULE_NAME=%, \
++					  -DMODULE_NAME=testsuite, \
++					  $(CPPFLAGS))" \
++	< /dev/null > $@ 2>&1; $(evaluate-test)
+ endif
+ 
+ ifeq ($(subdir),socket)
+diff --git a/sysdeps/unix/sysv/linux/tst-signal-numbers.py b/sysdeps/unix/sysv/linux/tst-signal-numbers.py
+new file mode 100644
+index 0000000000000000..48c63d1218e8303d
+--- /dev/null
++++ b/sysdeps/unix/sysv/linux/tst-signal-numbers.py
+@@ -0,0 +1,48 @@
++#!/usr/bin/python3
++# Test that glibc's signal numbers match the kernel's.
++# Copyright (C) 2018 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <http://www.gnu.org/licenses/>.
++
++import argparse
++import sys
++
++import glibcextract
++
++
++def main():
++    """The main entry point."""
++    parser = argparse.ArgumentParser(
++        description="Test that glibc's signal numbers match the kernel's.")
++    parser.add_argument('--cc', metavar='CC',
++                        help='C compiler (including options) to use')
++    args = parser.parse_args()
++    sys.exit(glibcextract.compare_macro_consts(
++        '#define _GNU_SOURCE 1\n'
++        '#include <signal.h>\n',
++        '#define _GNU_SOURCE 1\n'
++        '#include <stddef.h>\n'
++        '#include <asm/signal.h>\n',
++        args.cc,
++        # Filter out constants that aren't signal numbers.
++        'SIG[A-Z]+',
++        # Discard obsolete signal numbers and unrelated constants:
++        #    SIGCLD, SIGIOT, SIGSWI, SIGUNUSED.
++        #    SIGSTKSZ, SIGRTMIN, SIGRTMAX.
++        'SIG(CLD|IOT|RT(MIN|MAX)|STKSZ|SWI|UNUSED)'))
++
++if __name__ == '__main__':
++    main()
+diff --git a/sysdeps/unix/sysv/linux/tst-signal-numbers.sh b/sysdeps/unix/sysv/linux/tst-signal-numbers.sh
+deleted file mode 100644
+index e1f7be0337c720a6..0000000000000000
+--- a/sysdeps/unix/sysv/linux/tst-signal-numbers.sh
++++ /dev/null
+@@ -1,86 +0,0 @@
+-#! /bin/sh
+-# Test that glibc's signal numbers match the kernel's.
+-# Copyright (C) 2017-2018 Free Software Foundation, Inc.
+-# This file is part of the GNU C Library.
+-
+-# The GNU C Library is free software; you can redistribute it and/or
+-# modify it under the terms of the GNU Lesser General Public
+-# License as published by the Free Software Foundation; either
+-# version 2.1 of the License, or (at your option) any later version.
+-
+-# The GNU C Library is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+-# Lesser General Public License for more details.
+-
+-# You should have received a copy of the GNU Lesser General Public
+-# License along with the GNU C Library; if not, see
+-# <http://www.gnu.org/licenses/>.
+-
+-set -e
+-if [ -n "$BASH_VERSION" ]; then set -o pipefail; fi
+-LC_ALL=C; export LC_ALL
+-
+-# We cannot use Linux's asm/signal.h to define signal numbers, because
+-# it isn't sufficiently namespace-clean.  Instead, this test checks
+-# that our signal numbers match the kernel's.  This script expects
+-# "$@" to be $(CC) $(CPPFLAGS) as set by glibc's Makefiles, and $AWK
+-# to be set in the environment.
+-
+-# Before doing anything else, fail if the compiler doesn't work.
+-"$@" -E -xc -dM - < /dev/null > /dev/null
+-
+-tmpG=`mktemp -t signums_glibc.XXXXXXXXX`
+-tmpK=`mktemp -t signums_kernel.XXXXXXXXX`
+-trap "rm -f '$tmpG' '$tmpK'" 0
+-
+-# Filter out constants that aren't signal numbers.
+-# If SIGPOLL is defined as SIGIO, swap it around so SIGIO is defined as
+-# SIGPOLL. Similarly for SIGABRT and SIGIOT.
+-# Discard obsolete signal numbers and unrelated constants:
+-#    SIGCLD, SIGIOT, SIGSWI, SIGUNUSED.
+-#    SIGSTKSZ, SIGRTMIN, SIGRTMAX.
+-# Then sort the list.
+-filter_defines ()
+-{
+-    $AWK '
+-/^#define SIG[A-Z]+ ([0-9]+|SIG[A-Z0-9]+)$/ { signals[$2] = $3 }
+-END {
+-  if ("SIGPOLL" in signals && "SIGIO" in signals &&
+-      signals["SIGPOLL"] == "SIGIO") {
+-    signals["SIGPOLL"] = signals["SIGIO"]
+-    signals["SIGIO"] = "SIGPOLL"
+-  }
+-  if ("SIGABRT" in signals && "SIGIOT" in signals &&
+-      signals["SIGABRT"] == "SIGIOT") {
+-    signals["SIGABRT"] = signals["SIGIOT"]
+-    signals["SIGIOT"] = "SIGABRT"
+-  }
+-  for (sig in signals) {
+-    if (sig !~ /^SIG(CLD|IOT|RT(MIN|MAX)|STKSZ|SWI|UNUSED)$/) {
+-      printf("#define %s %s\n", sig, signals[sig])
+-    }
+-  }
+-}' | sort
+-}
+-
+-# $CC may contain command-line switches, so it should be word-split.
+-printf '%s' '#define _GNU_SOURCE 1
+-#include <signal.h>
+-' |
+-    "$@" -E -xc -dM - |
+-    filter_defines > "$tmpG"
+-
+-printf '%s' '#define _GNU_SOURCE 1
+-#define __ASSEMBLER__ 1
+-#include <asm/signal.h>
+-' |
+-    "$@" -E -xc -dM - |
+-    filter_defines > "$tmpK"
+-
+-if cmp -s "$tmpG" "$tmpK"; then
+-    exit 0
+-else
+-    diff -u "$tmpG" "$tmpK"
+-    exit 1
+-fi
diff --git a/SOURCES/glibc-rh2109510-6.patch b/SOURCES/glibc-rh2109510-6.patch
new file mode 100644
index 0000000..61251dc
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-6.patch
@@ -0,0 +1,98 @@
+Partial backport of:
+
+commit cb7be1590e9b18e272e72eb4e910a7ad06a53bd0
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Dec 10 22:56:59 2018 +0000
+
+    Use gen-as-const.py to process .pysym files.
+    
+    This patch eliminates the gen-py-const.awk variant of gen-as-const,
+    switching to use of gnu-as-const.py (with a new --python option) to
+    process .pysym files (i.e., to generate nptl_lock_constants.py), as
+    the syntax of those files is identical to that of .sym files.
+    
+    Note that the generated nptl_lock_constants.py is *not* identical to
+    the version generated by the awk script.  Apart from the trivial
+    changes (comment referencing the new script, and output being sorted),
+    the constant FUTEX_WAITERS, PTHREAD_MUTEXATTR_FLAG_BITS,
+    PTHREAD_MUTEXATTR_FLAG_PSHARED and PTHREAD_MUTEX_PRIO_CEILING_MASK are
+    now output as positive rather than negative constants (on x86_64
+    anyway; maybe not necessarily on 32-bit systems):
+    
+    < FUTEX_WAITERS = -2147483648
+    ---
+    > FUTEX_WAITERS = 2147483648
+    
+    < PTHREAD_MUTEXATTR_FLAG_BITS = -251662336
+    < PTHREAD_MUTEXATTR_FLAG_PSHARED = -2147483648
+    ---
+    > PTHREAD_MUTEXATTR_FLAG_BITS = 4043304960
+    > PTHREAD_MUTEXATTR_FLAG_PSHARED = 2147483648
+    
+    < PTHREAD_MUTEX_PRIO_CEILING_MASK = -524288
+    ---
+    > PTHREAD_MUTEX_PRIO_CEILING_MASK = 4294443008
+    
+    This is because gen-as-const has a cast of the constant value to long
+    int, which gen-py-const lacks.
+    
+    I think the positive values are more logically correct, since the
+    constants in question are in fact unsigned in C.  But to reliably
+    produce gen-as-const.py output for constants that always (in C and
+    Python) reflects the signedness of values with the high bit of "long
+    int" set would mean more complicated logic needs to be used in
+    computing values.
+    
+    The more correct positive values by themselves produce a failure of
+    nptl/test-mutexattr-printers, because masking with
+    ~PTHREAD_MUTEXATTR_FLAG_BITS & ~PTHREAD_MUTEX_NO_ELISION_NP now leaves
+    a bit -1 << 32 in the Python value, resulting in a KeyError exception.
+    To avoid that, places masking with ~ of one of the constants in
+    question are changed to mask with 0xffffffff as well (this reflects
+    how ~ in Python applies to an infinite-precision integer whereas ~ in
+    C does not do any promotions beyond the width of int).
+    
+    Tested for x86_64.
+    
+            * scripts/gen-as-const.py (main): Handle --python option.
+            * scripts/gen-py-const.awk: Remove.
+            * Makerules (py-const-script): Use gen-as-const.py.
+            ($(py-const)): Likewise.
+            * nptl/nptl-printers.py (MutexPrinter.read_status_no_robust): Mask
+            with 0xffffffff together with ~(PTHREAD_MUTEX_PRIO_CEILING_MASK).
+            (MutexAttributesPrinter.read_values): Mask with 0xffffffff
+            together with ~PTHREAD_MUTEXATTR_FLAG_BITS and
+            ~PTHREAD_MUTEX_NO_ELISION_NP.
+            * manual/README.pretty-printers: Update reference to
+            gen-py-const.awk.
+
+Only the gen-as-const.py changes are included downstream.  We keep using
+gen-py-const.awk for the build.
+
+diff --git a/scripts/gen-as-const.py b/scripts/gen-as-const.py
+index f85e359394acb1a4..2f1dff092b98e044 100644
+--- a/scripts/gen-as-const.py
++++ b/scripts/gen-as-const.py
+@@ -75,6 +75,8 @@ def main():
+                         help='C compiler (including options) to use')
+     parser.add_argument('--test', action='store_true',
+                         help='Generate test case instead of header')
++    parser.add_argument('--python', action='store_true',
++                        help='Generate Python file instead of header')
+     parser.add_argument('sym_file',
+                         help='.sym file to process')
+     args = parser.parse_args()
+@@ -103,6 +105,13 @@ def main():
+             sym_data.append('START')
+     if args.test:
+         print(gen_test(sym_data))
++    elif args.python:
++        consts = glibcextract.compute_c_consts(sym_data, args.cc)
++        print('# GENERATED FILE\n'
++              '\n'
++              '# Constant definitions.\n'
++              '# See gen-as-const.py for details.\n')
++        print(''.join('%s = %s\n' % c for c in sorted(consts.items())), end='')
+     else:
+         consts = glibcextract.compute_c_consts(sym_data, args.cc)
+         print(''.join('#define %s %s\n' % c for c in sorted(consts.items())), end='')
diff --git a/SOURCES/glibc-rh2109510-7.patch b/SOURCES/glibc-rh2109510-7.patch
new file mode 100644
index 0000000..3da8337
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-7.patch
@@ -0,0 +1,178 @@
+commit df648905e7d8340bb3e78813fd25e2077b9685d9
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Dec 17 18:29:36 2018 +0000
+
+    Add test that MAP_* constants agree with kernel.
+    
+    Continuing the process of building up and using Python infrastructure
+    for extracting and using values in headers, this patch adds a test
+    that MAP_* constants from sys/mman.h agree with those in the Linux
+    kernel headers.  (Other sys/mman.h constants could be added to the
+    test separately.)
+    
+    This set of constants has grown over time, so the generic code is
+    enhanced to allow saying extra constants are OK on either side of the
+    comparison (where the caller sets those parameters based on the Linux
+    kernel headers version, compared with the version the headers were
+    last updated from).  Although the test is a custom Python file, my
+    intention is to move in future to a single Python script for such
+    tests and text files it takes as inputs, once there are enough
+    examples to provide a guide to the common cases in such tests (I'd
+    like to end up with most or all such sets of constants copied from
+    kernel headers having such tests, and likewise for structure layouts
+    from the kernel).
+    
+    The Makefile code is essentially the same as for tst-signal-numbers,
+    but I didn't try to find an object file to depend on to represent the
+    dependency on the headers used by the test (the conform/ tests don't
+    try to represent such header dependencies at all, for example).
+    
+    Tested with build-many-glibcs.py, and also for x86_64 with older
+    kernel headers.
+    
+            * scripts/glibcextract.py (compare_macro_consts): Take parameters
+            to allow extra macros from first or second sources.
+            * sysdeps/unix/sysv/linux/tst-mman-consts.py: New file.
+            * sysdeps/unix/sysv/linux/Makefile [$(subdir) = misc]
+            (tests-special): Add $(objpfx)tst-mman-consts.out.
+            ($(objpfx)tst-mman-consts.out): New makefile target.
+
+diff --git a/scripts/glibcextract.py b/scripts/glibcextract.py
+index ecc4d5b6cc387c7d..06f712ad115e0f9e 100644
+--- a/scripts/glibcextract.py
++++ b/scripts/glibcextract.py
+@@ -136,12 +136,19 @@ def compute_macro_consts(source_text, cc, macro_re, exclude_re=None):
+     return compute_c_consts(sym_data, cc)
+ 
+ 
+-def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None):
++def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None,
++                         allow_extra_1=False, allow_extra_2=False):
+     """Compare the values of macros defined by two different sources.
+ 
+     The sources would typically be includes of a glibc header and a
+-    kernel header.  Return 1 if there were any differences, 0 if the
+-    macro values were the same.
++    kernel header.  If allow_extra_1, the first source may define
++    extra macros (typically if the kernel headers are older than the
++    version glibc has taken definitions from); if allow_extra_2, the
++    second source may define extra macros (typically if the kernel
++    headers are newer than the version glibc has taken definitions
++    from).  Return 1 if there were any differences other than those
++    allowed, 0 if the macro values were the same apart from any
++    allowed differences.
+ 
+     """
+     macros_1 = compute_macro_consts(source_1, cc, macro_re, exclude_re)
+@@ -150,13 +157,19 @@ def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None):
+         return 0
+     print('First source:\n%s\n' % source_1)
+     print('Second source:\n%s\n' % source_2)
++    ret = 0
+     for name, value in sorted(macros_1.items()):
+         if name not in macros_2:
+             print('Only in first source: %s' % name)
++            if not allow_extra_1:
++                ret = 1
+         elif macros_1[name] != macros_2[name]:
+             print('Different values for %s: %s != %s'
+                   % (name, macros_1[name], macros_2[name]))
++            ret = 1
+     for name in sorted(macros_2.keys()):
+         if name not in macros_1:
+             print('Only in second source: %s' % name)
+-    return 1
++            if not allow_extra_2:
++                ret = 1
++    return ret
+diff --git a/sysdeps/unix/sysv/linux/Makefile b/sysdeps/unix/sysv/linux/Makefile
+index 9c10ee53b26e1b1b..863ed80c2a2713d3 100644
+--- a/sysdeps/unix/sysv/linux/Makefile
++++ b/sysdeps/unix/sysv/linux/Makefile
+@@ -98,6 +98,15 @@ $(objpfx)tst-sysconf-iov_max: $(objpfx)tst-sysconf-iov_max-uapi.o
+ 
+ $(objpfx)tst-pkey: $(shared-thread-library)
+ 
++tests-special += $(objpfx)tst-mman-consts.out
++$(objpfx)tst-mman-consts.out: ../sysdeps/unix/sysv/linux/tst-mman-consts.py
++	PYTHONPATH=../scripts \
++	$(PYTHON) ../sysdeps/unix/sysv/linux/tst-mman-consts.py \
++		   --cc="$(CC) $(patsubst -DMODULE_NAME=%, \
++					  -DMODULE_NAME=testsuite, \
++					  $(CPPFLAGS))" \
++	< /dev/null > $@ 2>&1; $(evaluate-test)
++
+ endif # $(subdir) == misc
+ 
+ ifeq ($(subdir),time)
+diff --git a/sysdeps/unix/sysv/linux/tst-mman-consts.py b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+new file mode 100644
+index 0000000000000000..1a613beec0da16fb
+--- /dev/null
++++ b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+@@ -0,0 +1,65 @@
++#!/usr/bin/python3
++# Test that glibc's sys/mman.h constants match the kernel's.
++# Copyright (C) 2018 Free Software Foundation, Inc.
++# This file is part of the GNU C Library.
++#
++# The GNU C Library is free software; you can redistribute it and/or
++# modify it under the terms of the GNU Lesser General Public
++# License as published by the Free Software Foundation; either
++# version 2.1 of the License, or (at your option) any later version.
++#
++# The GNU C Library is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++# Lesser General Public License for more details.
++#
++# You should have received a copy of the GNU Lesser General Public
++# License along with the GNU C Library; if not, see
++# <http://www.gnu.org/licenses/>.
++
++import argparse
++import sys
++
++import glibcextract
++
++
++def linux_kernel_version(cc):
++    """Return the (major, minor) version of the Linux kernel headers."""
++    sym_data = ['#include <linux/version.h>', 'START',
++                ('LINUX_VERSION_CODE', 'LINUX_VERSION_CODE')]
++    val = glibcextract.compute_c_consts(sym_data, cc)['LINUX_VERSION_CODE']
++    val = int(val)
++    return ((val & 0xff0000) >> 16, (val & 0xff00) >> 8)
++
++
++def main():
++    """The main entry point."""
++    parser = argparse.ArgumentParser(
++        description="Test that glibc's sys/mman.h constants "
++        "match the kernel's.")
++    parser.add_argument('--cc', metavar='CC',
++                        help='C compiler (including options) to use')
++    args = parser.parse_args()
++    linux_version_headers = linux_kernel_version(args.cc)
++    linux_version_glibc = (4, 19)
++    sys.exit(glibcextract.compare_macro_consts(
++        '#define _GNU_SOURCE 1\n'
++        '#include <sys/mman.h>\n',
++        '#define _GNU_SOURCE 1\n'
++        '#include <linux/mman.h>\n',
++        args.cc,
++        'MAP_.*',
++        # A series of MAP_HUGE_<size> macros are defined by the kernel
++        # but not by glibc.  MAP_UNINITIALIZED is kernel-only.
++        # MAP_FAILED is not a MAP_* flag and is glibc-only, as is the
++        # MAP_ANON alias for MAP_ANONYMOUS.  MAP_RENAME, MAP_AUTOGROW,
++        # MAP_LOCAL and MAP_AUTORSRV are in the kernel header for
++        # MIPS, marked as "not used by linux"; SPARC has MAP_INHERIT
++        # in the kernel header, but does not use it.
++        'MAP_HUGE_[0-9].*|MAP_UNINITIALIZED|MAP_FAILED|MAP_ANON'
++        '|MAP_RENAME|MAP_AUTOGROW|MAP_LOCAL|MAP_AUTORSRV|MAP_INHERIT',
++        linux_version_glibc > linux_version_headers,
++        linux_version_headers > linux_version_glibc))
++
++if __name__ == '__main__':
++    main()
diff --git a/SOURCES/glibc-rh2109510-8.patch b/SOURCES/glibc-rh2109510-8.patch
new file mode 100644
index 0000000..120abed
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-8.patch
@@ -0,0 +1,23 @@
+commit 46baeb61e16511f26db1b255e19dc9163f590367
+Author: Fangrui Song <maskray@google.com>
+Date:   Tue Oct 19 09:58:16 2021 -0700
+
+    glibcextract.py: Place un-assemblable @@@ in a comment
+    
+    Unlike GCC, Clang parses asm statements and verifies they are valid
+    instructions/directives. Place the magic @@@ into a comment to avoid
+    a parse error.
+
+diff --git a/scripts/glibcextract.py b/scripts/glibcextract.py
+index 06f712ad115e0f9e..8f2246aae6a9dfb7 100644
+--- a/scripts/glibcextract.py
++++ b/scripts/glibcextract.py
+@@ -45,7 +45,7 @@ def compute_c_consts(sym_data, cc):
+             continue
+         name = arg[0]
+         value = arg[1]
+-        out_lines.append('asm ("@@@name@@@%s@@@value@@@%%0@@@end@@@" '
++        out_lines.append('asm ("/* @@@name@@@%s@@@value@@@%%0@@@end@@@ */" '
+                          ': : \"i\" ((long int) (%s)));'
+                          % (name, value))
+     out_lines.append('}')
diff --git a/SOURCES/glibc-rh2109510-9.patch b/SOURCES/glibc-rh2109510-9.patch
new file mode 100644
index 0000000..289f6df
--- /dev/null
+++ b/SOURCES/glibc-rh2109510-9.patch
@@ -0,0 +1,45 @@
+commit 841afa116e32b3c7195475769c26bf46fd870d32
+Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date:   Wed Aug 10 16:24:06 2022 -0300
+
+    glibcextract.py: Add compile_c_snippet
+    
+    It might be used on tests to check if a snippet build with the provided
+    compiler and flags.
+    
+    Reviewed-by: Florian Weimer <fweimer@redhat.com>
+
+diff --git a/scripts/glibcextract.py b/scripts/glibcextract.py
+index 8f2246aae6a9dfb7..0fb50dc8f9c4f7f9 100644
+--- a/scripts/glibcextract.py
++++ b/scripts/glibcextract.py
+@@ -17,6 +17,7 @@
+ # License along with the GNU C Library; if not, see
+ # <http://www.gnu.org/licenses/>.
+ 
++import collections
+ import os.path
+ import re
+ import subprocess
+@@ -173,3 +174,21 @@ def compare_macro_consts(source_1, source_2, cc, macro_re, exclude_re=None,
+             if not allow_extra_2:
+                 ret = 1
+     return ret
++
++CompileResult = collections.namedtuple("CompileResult", "returncode output")
++
++def compile_c_snippet(snippet, cc, extra_cc_args=''):
++    """Compile and return whether the SNIPPET can be build with CC along
++       EXTRA_CC_ARGS compiler flags.  Return a CompileResult with RETURNCODE
++       being 0 for success, or the failure value and the compiler output.
++    """
++    with tempfile.TemporaryDirectory() as temp_dir:
++        c_file_name = os.path.join(temp_dir, 'test.c')
++        obj_file_name = os.path.join(temp_dir, 'test.o')
++        with open(c_file_name, 'w') as c_file:
++            c_file.write(snippet + '\n')
++        cmd = cc.split() + extra_cc_args.split() + ['-c', '-o', obj_file_name,
++                c_file_name]
++        r = subprocess.run(cmd, check=False, stdout=subprocess.PIPE,
++                stderr=subprocess.STDOUT)
++        return CompileResult(r.returncode, r.stdout)
diff --git a/SOURCES/glibc-rh2116938.patch b/SOURCES/glibc-rh2116938.patch
new file mode 100644
index 0000000..f642aba
--- /dev/null
+++ b/SOURCES/glibc-rh2116938.patch
@@ -0,0 +1,449 @@
+1. Added "$(objpfx)tst-cmsghdr: $(libdl)" to socket/Makefile since we still
+   need $(libdl) in RHEL8.
+
+2. Included stddef.h in socket/tst-cmsghdr-skeleton.c because it uses NULL.
+
+commit 9c443ac4559a47ed99859bd80d14dc4b6dd220a1
+Author: Arjun Shankar <arjun@redhat.com>
+Date:   Tue Aug 2 11:10:25 2022 +0200
+
+    socket: Check lengths before advancing pointer in CMSG_NXTHDR
+    
+    The inline and library functions that the CMSG_NXTHDR macro may expand
+    to increment the pointer to the header before checking the stride of
+    the increment against available space.  Since C only allows incrementing
+    pointers to one past the end of an array, the increment must be done
+    after a length check.  This commit fixes that and includes a regression
+    test for CMSG_FIRSTHDR and CMSG_NXTHDR.
+    
+    The Linux, Hurd, and generic headers are all changed.
+    
+    Tested on Linux on armv7hl, i686, x86_64, aarch64, ppc64le, and s390x.
+    
+    [BZ #28846]
+    
+    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+Conflicts:
+	socket/Makefile
+	  (usual test backport differences)
+
+diff --git a/bits/socket.h b/bits/socket.h
+index 725798882e4b803b..0474613a9c003eeb 100644
+--- a/bits/socket.h
++++ b/bits/socket.h
+@@ -245,6 +245,12 @@ struct cmsghdr
+ 			 + CMSG_ALIGN (sizeof (struct cmsghdr)))
+ #define CMSG_LEN(len)   (CMSG_ALIGN (sizeof (struct cmsghdr)) + (len))
+ 
++/* Given a length, return the additional padding necessary such that
++   len + __CMSG_PADDING(len) == CMSG_ALIGN (len).  */
++#define __CMSG_PADDING(len) ((sizeof (size_t) \
++                              - ((len) & (sizeof (size_t) - 1))) \
++                             & (sizeof (size_t) - 1))
++
+ extern struct cmsghdr *__cmsg_nxthdr (struct msghdr *__mhdr,
+ 				      struct cmsghdr *__cmsg) __THROW;
+ #ifdef __USE_EXTERN_INLINES
+@@ -254,18 +260,38 @@ extern struct cmsghdr *__cmsg_nxthdr (struct msghdr *__mhdr,
+ _EXTERN_INLINE struct cmsghdr *
+ __NTH (__cmsg_nxthdr (struct msghdr *__mhdr, struct cmsghdr *__cmsg))
+ {
++  /* We may safely assume that __cmsg lies between __mhdr->msg_control and
++     __mhdr->msg_controllen because the user is required to obtain the first
++     cmsg via CMSG_FIRSTHDR, set its length, then obtain subsequent cmsgs
++     via CMSG_NXTHDR, setting lengths along the way.  However, we don't yet
++     trust the value of __cmsg->cmsg_len and therefore do not use it in any
++     pointer arithmetic until we check its value.  */
++
++  unsigned char * __msg_control_ptr = (unsigned char *) __mhdr->msg_control;
++  unsigned char * __cmsg_ptr = (unsigned char *) __cmsg;
++
++  size_t __size_needed = sizeof (struct cmsghdr)
++                         + __CMSG_PADDING (__cmsg->cmsg_len);
++
++  /* The current header is malformed, too small to be a full header.  */
+   if ((size_t) __cmsg->cmsg_len < sizeof (struct cmsghdr))
+-    /* The kernel header does this so there may be a reason.  */
+     return (struct cmsghdr *) 0;
+ 
++  /* There isn't enough space between __cmsg and the end of the buffer to
++  hold the current cmsg *and* the next one.  */
++  if (((size_t)
++         (__msg_control_ptr + __mhdr->msg_controllen - __cmsg_ptr)
++       < __size_needed)
++      || ((size_t)
++            (__msg_control_ptr + __mhdr->msg_controllen - __cmsg_ptr
++             - __size_needed)
++          < __cmsg->cmsg_len))
++
++    return (struct cmsghdr *) 0;
++
++  /* Now, we trust cmsg_len and can use it to find the next header.  */
+   __cmsg = (struct cmsghdr *) ((unsigned char *) __cmsg
+ 			       + CMSG_ALIGN (__cmsg->cmsg_len));
+-  if ((unsigned char *) (__cmsg + 1) > ((unsigned char *) __mhdr->msg_control
+-					+ __mhdr->msg_controllen)
+-      || ((unsigned char *) __cmsg + CMSG_ALIGN (__cmsg->cmsg_len)
+-	  > ((unsigned char *) __mhdr->msg_control + __mhdr->msg_controllen)))
+-    /* No more entries.  */
+-    return (struct cmsghdr *) 0;
+   return __cmsg;
+ }
+ #endif	/* Use `extern inline'.  */
+diff --git a/socket/Makefile b/socket/Makefile
+index 8975a65c2aabbfbc..a445383f8739351e 100644
+--- a/socket/Makefile
++++ b/socket/Makefile
+@@ -31,7 +31,12 @@ routines := accept bind connect getpeername getsockname getsockopt	\
+ 	    setsockopt shutdown socket socketpair isfdtype opensock	\
+ 	    sockatmark accept4 recvmmsg sendmmsg sockaddr_un_set
+ 
+-tests := tst-accept4
++tests := \
++  tst-accept4 \
++  tst-cmsghdr \
++  # tests
++
++$(objpfx)tst-cmsghdr: $(libdl)
+ 
+ tests-internal := \
+   tst-sockaddr_un_set \
+diff --git a/socket/tst-cmsghdr-skeleton.c b/socket/tst-cmsghdr-skeleton.c
+new file mode 100644
+index 0000000000000000..7accfa6e54708e2a
+--- /dev/null
++++ b/socket/tst-cmsghdr-skeleton.c
+@@ -0,0 +1,93 @@
++/* Test ancillary data header creation.
++   Copyright (C) 2022 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++/* We use the preprocessor to generate the function/macro tests instead of
++   using indirection because having all the macro expansions alongside
++   each other lets the compiler warn us about suspicious pointer
++   arithmetic across subsequent CMSG_{FIRST,NXT}HDR expansions.  */
++
++#include <stdint.h>
++#include <stddef.h>
++
++#define RUN_TEST_CONCAT(suffix) run_test_##suffix
++#define RUN_TEST_FUNCNAME(suffix) RUN_TEST_CONCAT (suffix)
++
++static void
++RUN_TEST_FUNCNAME (CMSG_NXTHDR_IMPL) (void)
++{
++  struct msghdr m = {0};
++  struct cmsghdr *cmsg;
++  char cmsgbuf[3 * CMSG_SPACE (sizeof (PAYLOAD))] = {0};
++
++  m.msg_control = cmsgbuf;
++  m.msg_controllen = sizeof (cmsgbuf);
++
++  /* First header should point to the start of the buffer.  */
++  cmsg = CMSG_FIRSTHDR (&m);
++  TEST_VERIFY_EXIT ((char *) cmsg == cmsgbuf);
++
++  /* If the first header length consumes the entire buffer, there is no
++     space remaining for additional headers.  */
++  cmsg->cmsg_len = sizeof (cmsgbuf);
++  cmsg = CMSG_NXTHDR_IMPL (&m, cmsg);
++  TEST_VERIFY_EXIT (cmsg == NULL);
++
++  /* The first header length is so big, using it would cause an overflow.  */
++  cmsg = CMSG_FIRSTHDR (&m);
++  TEST_VERIFY_EXIT ((char *) cmsg == cmsgbuf);
++  cmsg->cmsg_len = SIZE_MAX;
++  cmsg = CMSG_NXTHDR_IMPL (&m, cmsg);
++  TEST_VERIFY_EXIT (cmsg == NULL);
++
++  /* The first header leaves just enough space to hold another header.  */
++  cmsg = CMSG_FIRSTHDR (&m);
++  TEST_VERIFY_EXIT ((char *) cmsg == cmsgbuf);
++  cmsg->cmsg_len = sizeof (cmsgbuf) - sizeof (struct cmsghdr);
++  cmsg = CMSG_NXTHDR_IMPL (&m, cmsg);
++  TEST_VERIFY_EXIT (cmsg != NULL);
++
++  /* The first header leaves space but not enough for another header.  */
++  cmsg = CMSG_FIRSTHDR (&m);
++  TEST_VERIFY_EXIT ((char *) cmsg == cmsgbuf);
++  cmsg->cmsg_len ++;
++  cmsg = CMSG_NXTHDR_IMPL (&m, cmsg);
++  TEST_VERIFY_EXIT (cmsg == NULL);
++
++  /* The second header leaves just enough space to hold another header.  */
++  cmsg = CMSG_FIRSTHDR (&m);
++  TEST_VERIFY_EXIT ((char *) cmsg == cmsgbuf);
++  cmsg->cmsg_len = CMSG_LEN (sizeof (PAYLOAD));
++  cmsg = CMSG_NXTHDR_IMPL (&m, cmsg);
++  TEST_VERIFY_EXIT (cmsg != NULL);
++  cmsg->cmsg_len = sizeof (cmsgbuf)
++                   - CMSG_SPACE (sizeof (PAYLOAD)) /* First header.  */
++                   - sizeof (struct cmsghdr);
++  cmsg = CMSG_NXTHDR_IMPL (&m, cmsg);
++  TEST_VERIFY_EXIT (cmsg != NULL);
++
++  /* The second header leaves space but not enough for another header.  */
++  cmsg = CMSG_FIRSTHDR (&m);
++  TEST_VERIFY_EXIT ((char *) cmsg == cmsgbuf);
++  cmsg = CMSG_NXTHDR_IMPL (&m, cmsg);
++  TEST_VERIFY_EXIT (cmsg != NULL);
++  cmsg->cmsg_len ++;
++  cmsg = CMSG_NXTHDR_IMPL (&m, cmsg);
++  TEST_VERIFY_EXIT (cmsg == NULL);
++
++  return;
++}
+diff --git a/socket/tst-cmsghdr.c b/socket/tst-cmsghdr.c
+new file mode 100644
+index 0000000000000000..68c96d3c9dd2bce8
+--- /dev/null
++++ b/socket/tst-cmsghdr.c
+@@ -0,0 +1,56 @@
++/* Test ancillary data header creation.
++   Copyright (C) 2022 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++#include <sys/socket.h>
++#include <gnu/lib-names.h>
++#include <support/xdlfcn.h>
++#include <support/check.h>
++
++#define PAYLOAD "Hello, World!"
++
++/* CMSG_NXTHDR is a macro that calls an inline function defined in
++   bits/socket.h.  In case the function cannot be inlined, libc.so carries
++   a copy.  Both versions need to be tested.  */
++
++#define CMSG_NXTHDR_IMPL CMSG_NXTHDR
++#include "tst-cmsghdr-skeleton.c"
++#undef CMSG_NXTHDR_IMPL
++
++static struct cmsghdr * (* cmsg_nxthdr) (struct msghdr *, struct cmsghdr *);
++
++#define CMSG_NXTHDR_IMPL cmsg_nxthdr
++#include "tst-cmsghdr-skeleton.c"
++#undef CMSG_NXTHDR_IMPL
++
++static int
++do_test (void)
++{
++  static void *handle;
++
++  run_test_CMSG_NXTHDR ();
++
++  handle = xdlopen (LIBC_SO, RTLD_LAZY);
++  cmsg_nxthdr = (struct cmsghdr * (*) (struct msghdr *, struct cmsghdr *))
++                  xdlsym (handle, "__cmsg_nxthdr");
++
++  run_test_cmsg_nxthdr ();
++
++  return 0;
++}
++
++#include <support/test-driver.c>
+diff --git a/sysdeps/mach/hurd/bits/socket.h b/sysdeps/mach/hurd/bits/socket.h
+index 18959139dc7d325b..cc66684061e3e179 100644
+--- a/sysdeps/mach/hurd/bits/socket.h
++++ b/sysdeps/mach/hurd/bits/socket.h
+@@ -249,6 +249,12 @@ struct cmsghdr
+ 			 + CMSG_ALIGN (sizeof (struct cmsghdr)))
+ #define CMSG_LEN(len)   (CMSG_ALIGN (sizeof (struct cmsghdr)) + (len))
+ 
++/* Given a length, return the additional padding necessary such that
++   len + __CMSG_PADDING(len) == CMSG_ALIGN (len).  */
++#define __CMSG_PADDING(len) ((sizeof (size_t) \
++                              - ((len) & (sizeof (size_t) - 1))) \
++                             & (sizeof (size_t) - 1))
++
+ extern struct cmsghdr *__cmsg_nxthdr (struct msghdr *__mhdr,
+ 				      struct cmsghdr *__cmsg) __THROW;
+ #ifdef __USE_EXTERN_INLINES
+@@ -258,18 +264,38 @@ extern struct cmsghdr *__cmsg_nxthdr (struct msghdr *__mhdr,
+ _EXTERN_INLINE struct cmsghdr *
+ __NTH (__cmsg_nxthdr (struct msghdr *__mhdr, struct cmsghdr *__cmsg))
+ {
++  /* We may safely assume that __cmsg lies between __mhdr->msg_control and
++     __mhdr->msg_controllen because the user is required to obtain the first
++     cmsg via CMSG_FIRSTHDR, set its length, then obtain subsequent cmsgs
++     via CMSG_NXTHDR, setting lengths along the way.  However, we don't yet
++     trust the value of __cmsg->cmsg_len and therefore do not use it in any
++     pointer arithmetic until we check its value.  */
++
++  unsigned char * __msg_control_ptr = (unsigned char *) __mhdr->msg_control;
++  unsigned char * __cmsg_ptr = (unsigned char *) __cmsg;
++
++  size_t __size_needed = sizeof (struct cmsghdr)
++                         + __CMSG_PADDING (__cmsg->cmsg_len);
++
++  /* The current header is malformed, too small to be a full header.  */
+   if ((size_t) __cmsg->cmsg_len < sizeof (struct cmsghdr))
+-    /* The kernel header does this so there may be a reason.  */
+     return (struct cmsghdr *) 0;
+ 
++  /* There isn't enough space between __cmsg and the end of the buffer to
++  hold the current cmsg *and* the next one.  */
++  if (((size_t)
++         (__msg_control_ptr + __mhdr->msg_controllen - __cmsg_ptr)
++       < __size_needed)
++      || ((size_t)
++            (__msg_control_ptr + __mhdr->msg_controllen - __cmsg_ptr
++             - __size_needed)
++          < __cmsg->cmsg_len))
++
++    return (struct cmsghdr *) 0;
++
++  /* Now, we trust cmsg_len and can use it to find the next header.  */
+   __cmsg = (struct cmsghdr *) ((unsigned char *) __cmsg
+ 			       + CMSG_ALIGN (__cmsg->cmsg_len));
+-  if ((unsigned char *) (__cmsg + 1) > ((unsigned char *) __mhdr->msg_control
+-					+ __mhdr->msg_controllen)
+-      || ((unsigned char *) __cmsg + CMSG_ALIGN (__cmsg->cmsg_len)
+-	  > ((unsigned char *) __mhdr->msg_control + __mhdr->msg_controllen)))
+-    /* No more entries.  */
+-    return (struct cmsghdr *) 0;
+   return __cmsg;
+ }
+ #endif	/* Use `extern inline'.  */
+diff --git a/sysdeps/unix/sysv/linux/bits/socket.h b/sysdeps/unix/sysv/linux/bits/socket.h
+index c3fbb2110296273c..6b895b89831d2cb5 100644
+--- a/sysdeps/unix/sysv/linux/bits/socket.h
++++ b/sysdeps/unix/sysv/linux/bits/socket.h
+@@ -302,6 +302,12 @@ struct cmsghdr
+ 			 + CMSG_ALIGN (sizeof (struct cmsghdr)))
+ #define CMSG_LEN(len)   (CMSG_ALIGN (sizeof (struct cmsghdr)) + (len))
+ 
++/* Given a length, return the additional padding necessary such that
++   len + __CMSG_PADDING(len) == CMSG_ALIGN (len).  */
++#define __CMSG_PADDING(len) ((sizeof (size_t) \
++                              - ((len) & (sizeof (size_t) - 1))) \
++                             & (sizeof (size_t) - 1))
++
+ extern struct cmsghdr *__cmsg_nxthdr (struct msghdr *__mhdr,
+ 				      struct cmsghdr *__cmsg) __THROW;
+ #ifdef __USE_EXTERN_INLINES
+@@ -311,18 +317,38 @@ extern struct cmsghdr *__cmsg_nxthdr (struct msghdr *__mhdr,
+ _EXTERN_INLINE struct cmsghdr *
+ __NTH (__cmsg_nxthdr (struct msghdr *__mhdr, struct cmsghdr *__cmsg))
+ {
++  /* We may safely assume that __cmsg lies between __mhdr->msg_control and
++     __mhdr->msg_controllen because the user is required to obtain the first
++     cmsg via CMSG_FIRSTHDR, set its length, then obtain subsequent cmsgs
++     via CMSG_NXTHDR, setting lengths along the way.  However, we don't yet
++     trust the value of __cmsg->cmsg_len and therefore do not use it in any
++     pointer arithmetic until we check its value.  */
++
++  unsigned char * __msg_control_ptr = (unsigned char *) __mhdr->msg_control;
++  unsigned char * __cmsg_ptr = (unsigned char *) __cmsg;
++
++  size_t __size_needed = sizeof (struct cmsghdr)
++                         + __CMSG_PADDING (__cmsg->cmsg_len);
++
++  /* The current header is malformed, too small to be a full header.  */
+   if ((size_t) __cmsg->cmsg_len < sizeof (struct cmsghdr))
+-    /* The kernel header does this so there may be a reason.  */
+     return (struct cmsghdr *) 0;
+ 
++  /* There isn't enough space between __cmsg and the end of the buffer to
++  hold the current cmsg *and* the next one.  */
++  if (((size_t)
++         (__msg_control_ptr + __mhdr->msg_controllen - __cmsg_ptr)
++       < __size_needed)
++      || ((size_t)
++            (__msg_control_ptr + __mhdr->msg_controllen - __cmsg_ptr
++             - __size_needed)
++          < __cmsg->cmsg_len))
++
++    return (struct cmsghdr *) 0;
++
++  /* Now, we trust cmsg_len and can use it to find the next header.  */
+   __cmsg = (struct cmsghdr *) ((unsigned char *) __cmsg
+ 			       + CMSG_ALIGN (__cmsg->cmsg_len));
+-  if ((unsigned char *) (__cmsg + 1) > ((unsigned char *) __mhdr->msg_control
+-					+ __mhdr->msg_controllen)
+-      || ((unsigned char *) __cmsg + CMSG_ALIGN (__cmsg->cmsg_len)
+-	  > ((unsigned char *) __mhdr->msg_control + __mhdr->msg_controllen)))
+-    /* No more entries.  */
+-    return (struct cmsghdr *) 0;
+   return __cmsg;
+ }
+ #endif	/* Use `extern inline'.  */
+diff --git a/sysdeps/unix/sysv/linux/cmsg_nxthdr.c b/sysdeps/unix/sysv/linux/cmsg_nxthdr.c
+index bab0be6884d9da1c..16594622211c1c8b 100644
+--- a/sysdeps/unix/sysv/linux/cmsg_nxthdr.c
++++ b/sysdeps/unix/sysv/linux/cmsg_nxthdr.c
+@@ -23,18 +23,38 @@
+ struct cmsghdr *
+ __cmsg_nxthdr (struct msghdr *mhdr, struct cmsghdr *cmsg)
+ {
++  /* We may safely assume that cmsg lies between mhdr->msg_control and
++     mhdr->msg_controllen because the user is required to obtain the first
++     cmsg via CMSG_FIRSTHDR, set its length, then obtain subsequent cmsgs
++     via CMSG_NXTHDR, setting lengths along the way.  However, we don't yet
++     trust the value of cmsg->cmsg_len and therefore do not use it in any
++     pointer arithmetic until we check its value.  */
++
++  unsigned char * msg_control_ptr = (unsigned char *) mhdr->msg_control;
++  unsigned char * cmsg_ptr = (unsigned char *) cmsg;
++
++  size_t size_needed = sizeof (struct cmsghdr)
++                       + __CMSG_PADDING (cmsg->cmsg_len);
++
++  /* The current header is malformed, too small to be a full header.  */
+   if ((size_t) cmsg->cmsg_len < sizeof (struct cmsghdr))
+-    /* The kernel header does this so there may be a reason.  */
+-    return NULL;
++    return (struct cmsghdr *) 0;
++
++  /* There isn't enough space between cmsg and the end of the buffer to
++  hold the current cmsg *and* the next one.  */
++  if (((size_t)
++         (msg_control_ptr + mhdr->msg_controllen - cmsg_ptr)
++       < size_needed)
++      || ((size_t)
++            (msg_control_ptr + mhdr->msg_controllen - cmsg_ptr
++             - size_needed)
++          < cmsg->cmsg_len))
++
++    return (struct cmsghdr *) 0;
+ 
++  /* Now, we trust cmsg_len and can use it to find the next header.  */
+   cmsg = (struct cmsghdr *) ((unsigned char *) cmsg
+ 			     + CMSG_ALIGN (cmsg->cmsg_len));
+-  if ((unsigned char *) (cmsg + 1) > ((unsigned char *) mhdr->msg_control
+-				      + mhdr->msg_controllen)
+-      || ((unsigned char *) cmsg + CMSG_ALIGN (cmsg->cmsg_len)
+-	  > ((unsigned char *) mhdr->msg_control + mhdr->msg_controllen)))
+-    /* No more entries.  */
+-    return NULL;
+   return cmsg;
+ }
+ libc_hidden_def (__cmsg_nxthdr)
diff --git a/SOURCES/glibc-rh2118667.patch b/SOURCES/glibc-rh2118667.patch
new file mode 100644
index 0000000..64f2bcc
--- /dev/null
+++ b/SOURCES/glibc-rh2118667.patch
@@ -0,0 +1,96 @@
+commit dd2315a866a4ac2b838ea1cb10c5ea1c35d51a2f
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Tue Aug 16 08:27:50 2022 +0200
+
+    elf: Run tst-audit-tlsdesc, tst-audit-tlsdesc-dlopen everywhere
+    
+    The test is valid for all TLS models, but we want to make a reasonable
+    effort to test the GNU2 model specifically.  For example, aarch64
+    defaults to GNU2, but does not have -mtls-dialect=gnu2, and the test
+    was not run there.
+    
+    Suggested-by: Martin Coufal <mcoufal@redhat.com>
+
+Conflicts:
+	elf/Makefile
+	  (missing tst-align3 backport, missing libdl integration)
+
+diff --git a/elf/Makefile b/elf/Makefile
+index 9e721d5d4e0a1cd9..1dd36ba0486e56a0 100644
+--- a/elf/Makefile
++++ b/elf/Makefile
+@@ -331,6 +331,8 @@ tests += \
+   tst-addr1 \
+   tst-align \
+   tst-align2 \
++  tst-audit-tlsdesc \
++  tst-audit-tlsdesc-dlopen \
+   tst-audit1 \
+   tst-audit11 \
+   tst-audit12 \
+@@ -607,6 +609,8 @@ modules-names = \
+   tst-alignmod2 \
+   tst-array2dep \
+   tst-array5dep \
++  tst-audit-tlsdesc-mod1 \
++  tst-audit-tlsdesc-mod2 \
+   tst-audit11mod1 \
+   tst-audit11mod2 \
+   tst-audit12mod1 \
+@@ -640,6 +644,7 @@ modules-names = \
+   tst-auditmanymod7 \
+   tst-auditmanymod8 \
+   tst-auditmanymod9 \
++  tst-auditmod-tlsdesc  \
+   tst-auditmod1 \
+   tst-auditmod9a \
+   tst-auditmod9b \
+@@ -809,23 +814,8 @@ modules-names += tst-gnu2-tls1mod
+ $(objpfx)tst-gnu2-tls1: $(objpfx)tst-gnu2-tls1mod.so
+ tst-gnu2-tls1mod.so-no-z-defs = yes
+ CFLAGS-tst-gnu2-tls1mod.c += -mtls-dialect=gnu2
++endif # $(have-mtls-dialect-gnu2)
+ 
+-tests += tst-audit-tlsdesc tst-audit-tlsdesc-dlopen
+-modules-names += tst-audit-tlsdesc-mod1 tst-audit-tlsdesc-mod2 tst-auditmod-tlsdesc
+-$(objpfx)tst-audit-tlsdesc: $(objpfx)tst-audit-tlsdesc-mod1.so \
+-			    $(objpfx)tst-audit-tlsdesc-mod2.so \
+-			    $(shared-thread-library)
+-CFLAGS-tst-audit-tlsdesc-mod1.c += -mtls-dialect=gnu2
+-CFLAGS-tst-audit-tlsdesc-mod2.c += -mtls-dialect=gnu2
+-$(objpfx)tst-audit-tlsdesc-dlopen: $(shared-thread-library) $(libdl)
+-$(objpfx)tst-audit-tlsdesc-dlopen.out: $(objpfx)tst-audit-tlsdesc-mod1.so \
+-				       $(objpfx)tst-audit-tlsdesc-mod2.so
+-$(objpfx)tst-audit-tlsdesc-mod1.so: $(objpfx)tst-audit-tlsdesc-mod2.so
+-$(objpfx)tst-audit-tlsdesc.out: $(objpfx)tst-auditmod-tlsdesc.so
+-tst-audit-tlsdesc-ENV = LD_AUDIT=$(objpfx)tst-auditmod-tlsdesc.so
+-$(objpfx)tst-audit-tlsdesc-dlopen.out: $(objpfx)tst-auditmod-tlsdesc.so
+-tst-audit-tlsdesc-dlopen-ENV = LD_AUDIT=$(objpfx)tst-auditmod-tlsdesc.so
+-endif
+ ifeq (yes,$(have-protected-data))
+ modules-names += tst-protected1moda tst-protected1modb
+ tests += tst-protected1a tst-protected1b
+@@ -2559,5 +2549,23 @@ $(objpfx)tst-tls21.out: $(objpfx)tst-tls21mod.so
+ $(objpfx)tst-tls21mod.so: $(tst-tls-many-dynamic-modules:%=$(objpfx)%.so)
+ 
+ $(objpfx)tst-rtld-run-static.out: $(objpfx)/ldconfig
++
+ $(objpfx)tst-dlmopen-gethostbyname: $(libdl)
+ $(objpfx)tst-dlmopen-gethostbyname.out: $(objpfx)tst-dlmopen-gethostbyname-mod.so
++$(objpfx)tst-audit-tlsdesc: $(objpfx)tst-audit-tlsdesc-mod1.so \
++			    $(objpfx)tst-audit-tlsdesc-mod2.so \
++			    $(shared-thread-library)
++ifeq (yes,$(have-mtls-dialect-gnu2))
++# The test is valid for all TLS types, but we want to exercise GNU2
++# TLS if possible.
++CFLAGS-tst-audit-tlsdesc-mod1.c += -mtls-dialect=gnu2
++CFLAGS-tst-audit-tlsdesc-mod2.c += -mtls-dialect=gnu2
++endif
++$(objpfx)tst-audit-tlsdesc-dlopen: $(shared-thread-library) $(libdl)
++$(objpfx)tst-audit-tlsdesc-dlopen.out: $(objpfx)tst-audit-tlsdesc-mod1.so \
++				       $(objpfx)tst-audit-tlsdesc-mod2.so
++$(objpfx)tst-audit-tlsdesc-mod1.so: $(objpfx)tst-audit-tlsdesc-mod2.so
++$(objpfx)tst-audit-tlsdesc.out: $(objpfx)tst-auditmod-tlsdesc.so
++tst-audit-tlsdesc-ENV = LD_AUDIT=$(objpfx)tst-auditmod-tlsdesc.so
++$(objpfx)tst-audit-tlsdesc-dlopen.out: $(objpfx)tst-auditmod-tlsdesc.so
++tst-audit-tlsdesc-dlopen-ENV = LD_AUDIT=$(objpfx)tst-auditmod-tlsdesc.so
diff --git a/SOURCES/glibc-rh2121746-1.patch b/SOURCES/glibc-rh2121746-1.patch
new file mode 100644
index 0000000..a27c0eb
--- /dev/null
+++ b/SOURCES/glibc-rh2121746-1.patch
@@ -0,0 +1,202 @@
+From d0e357ff45a75553dee3b17ed7d303bfa544f6fe Mon Sep 17 00:00:00 2001
+From: Florian Weimer <fweimer@redhat.com>
+Date: Fri, 26 Aug 2022 21:15:43 +0200
+Subject: elf: Call __libc_early_init for reused namespaces (bug 29528)
+
+libc_map is never reset to NULL, neither during dlclose nor on a
+dlopen call which reuses the namespace structure.  As a result, if a
+namespace is reused, its libc is not initialized properly.  The most
+visible result is a crash in the <ctype.h> functions.
+
+To prevent similar bugs on namespace reuse from surfacing,
+unconditionally initialize the chosen namespace to zero using memset.
+
+[Note from DJ: Regenerated for new line numbers and context, added
+link dependency on libdl]]
+
+diff -rupN a/elf/Makefile b/elf/Makefile
+--- a/elf/Makefile	2022-10-05 15:04:11.814901849 -0400
++++ b/elf/Makefile	2022-10-05 17:02:19.858635958 -0400
+@@ -367,6 +367,7 @@ tests += \
+   tst-dlmopen3 \
+   tst-dlmopen-dlerror \
+   tst-dlmopen-gethostbyname \
++  tst-dlmopen-twice \
+   tst-dlopenfail \
+   tst-dlopenfail-2 \
+   tst-dlopenrpath \
+@@ -671,6 +672,8 @@ modules-names = \
+   tst-dlmopen1mod \
+   tst-dlmopen-dlerror-mod \
+   tst-dlmopen-gethostbyname-mod \
++  tst-dlmopen-twice-mod1 \
++  tst-dlmopen-twice-mod2 \
+   tst-dlopenfaillinkmod \
+   tst-dlopenfailmod1 \
+   tst-dlopenfailmod2 \
+@@ -2569,3 +2572,9 @@ $(objpfx)tst-audit-tlsdesc.out: $(objpfx
+ tst-audit-tlsdesc-ENV = LD_AUDIT=$(objpfx)tst-auditmod-tlsdesc.so
+ $(objpfx)tst-audit-tlsdesc-dlopen.out: $(objpfx)tst-auditmod-tlsdesc.so
+ tst-audit-tlsdesc-dlopen-ENV = LD_AUDIT=$(objpfx)tst-auditmod-tlsdesc.so
++
++
++$(objpfx)tst-dlmopen-twice: $(libdl)
++$(objpfx)tst-dlmopen-twice.out: \
++  $(objpfx)tst-dlmopen-twice-mod1.so \
++  $(objpfx)tst-dlmopen-twice-mod2.so
+diff -rupN a/elf/dl-open.c b/elf/dl-open.c
+--- a/elf/dl-open.c	2022-10-05 15:04:11.635894932 -0400
++++ b/elf/dl-open.c	2022-10-05 15:10:31.667638060 -0400
+@@ -836,11 +836,14 @@ _dl_open (const char *file, int mode, co
+ 	  _dl_signal_error (EINVAL, file, NULL, N_("\
+ no more namespaces available for dlmopen()"));
+ 	}
+-      else if (nsid == GL(dl_nns))
+-	{
+-	  __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
+-	  ++GL(dl_nns);
+-	}
++
++      if (nsid == GL(dl_nns))
++	++GL(dl_nns);
++
++      /* Initialize the new namespace.  Most members are
++	 zero-initialized, only the lock needs special treatment.  */
++      memset (&GL(dl_ns)[nsid], 0, sizeof (GL(dl_ns)[nsid]));
++      __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
+ 
+       _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
+     }
+diff -rupN a/elf/tst-dlmopen-twice-mod1.c b/elf/tst-dlmopen-twice-mod1.c
+--- a/elf/tst-dlmopen-twice-mod1.c	1969-12-31 19:00:00.000000000 -0500
++++ b/elf/tst-dlmopen-twice-mod1.c	2022-10-05 15:10:31.671638216 -0400
+@@ -0,0 +1,37 @@
++/* Initialization of libc after dlmopen/dlclose/dlmopen (bug 29528).  Module 1.
++   Copyright (C) 2022 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++#include <stdio.h>
++
++static void __attribute__ ((constructor))
++init (void)
++{
++  puts ("info: tst-dlmopen-twice-mod1.so loaded");
++  fflush (stdout);
++}
++
++static void __attribute__ ((destructor))
++fini (void)
++{
++  puts ("info: tst-dlmopen-twice-mod1.so about to be unloaded");
++  fflush (stdout);
++}
++
++/* Large allocation.  The second module does not have this, so it
++   should load libc at a different address.  */
++char large_allocate[16 * 1024 * 1024];
+diff -rupN a/elf/tst-dlmopen-twice-mod2.c b/elf/tst-dlmopen-twice-mod2.c
+--- a/elf/tst-dlmopen-twice-mod2.c	1969-12-31 19:00:00.000000000 -0500
++++ b/elf/tst-dlmopen-twice-mod2.c	2022-10-05 15:10:31.676638411 -0400
+@@ -0,0 +1,50 @@
++/* Initialization of libc after dlmopen/dlclose/dlmopen (bug 29528).  Module 2.
++   Copyright (C) 2022 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++#include <ctype.h>
++#include <stdio.h>
++
++static void __attribute__ ((constructor))
++init (void)
++{
++  puts ("info: tst-dlmopen-twice-mod2.so loaded");
++  fflush (stdout);
++}
++
++static void __attribute__ ((destructor))
++fini (void)
++{
++  puts ("info: tst-dlmopen-twice-mod2.so about to be unloaded");
++  fflush (stdout);
++}
++
++int
++run_check (void)
++{
++  puts ("info: about to call isalpha");
++  fflush (stdout);
++
++  volatile char ch = 'a';
++  if (!isalpha (ch))
++    {
++      puts ("error: isalpha ('a') is not true");
++      fflush (stdout);
++      return 1;
++    }
++  return 0;
++}
+diff -rupN a/elf/tst-dlmopen-twice.c b/elf/tst-dlmopen-twice.c
+--- a/elf/tst-dlmopen-twice.c	1969-12-31 19:00:00.000000000 -0500
++++ b/elf/tst-dlmopen-twice.c	2022-10-05 15:10:31.679638528 -0400
+@@ -0,0 +1,34 @@
++/* Initialization of libc after dlmopen/dlclose/dlmopen (bug 29528).  Main.
++   Copyright (C) 2022 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++#include <support/xdlfcn.h>
++#include <support/check.h>
++
++static int
++do_test (void)
++{
++  void *handle = xdlmopen (LM_ID_NEWLM, "tst-dlmopen-twice-mod1.so", RTLD_NOW);
++  xdlclose (handle);
++  handle = xdlmopen (LM_ID_NEWLM, "tst-dlmopen-twice-mod2.so", RTLD_NOW);
++  int (*run_check) (void) = xdlsym (handle, "run_check");
++  TEST_COMPARE (run_check (), 0);
++  xdlclose (handle);
++  return 0;
++}
++
++#include <support/test-driver.c>
diff --git a/SOURCES/glibc-rh2121746-2.patch b/SOURCES/glibc-rh2121746-2.patch
new file mode 100644
index 0000000..5bd43c8
--- /dev/null
+++ b/SOURCES/glibc-rh2121746-2.patch
@@ -0,0 +1,98 @@
+From 2c42257314536b94cc8d52edede86e94e98c1436 Mon Sep 17 00:00:00 2001
+From: Florian Weimer <fweimer@redhat.com>
+Date: Fri, 14 Oct 2022 11:02:25 +0200
+Subject: [PATCH] elf: Do not completely clear reused namespace in dlmopen (bug
+ 29600)
+Content-type: text/plain; charset=UTF-8
+
+The data in the _ns_debug member must be preserved, otherwise
+_dl_debug_initialize enters an infinite loop.  To be conservative,
+only clear the libc_map member for now, to fix bug 29528.
+
+Fixes commit d0e357ff45a75553dee3b17ed7d303bfa544f6fe
+("elf: Call __libc_early_init for reused namespaces (bug 29528)"),
+by reverting most of it.
+
+Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+Tested-by: Carlos O'Donell <carlos@redhat.com>
+---
+ elf/dl-open.c           | 14 ++++++--------
+ elf/tst-dlmopen-twice.c | 28 ++++++++++++++++++++++++----
+ 2 files changed, 30 insertions(+), 12 deletions(-)
+
+diff --git a/elf/dl-open.c b/elf/dl-open.c
+index 46e8066fd8..e7db5e9642 100644
+--- a/elf/dl-open.c
++++ b/elf/dl-open.c
+@@ -836,15 +836,13 @@ _dl_open (const char *file, int mode, co
+ 	  _dl_signal_error (EINVAL, file, NULL, N_("\
+ no more namespaces available for dlmopen()"));
+ 	}
++      else if (nsid == GL(dl_nns))
++	{
++	  __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
++	  ++GL(dl_nns);
++	}
+ 
+-      if (nsid == GL(dl_nns))
+-	++GL(dl_nns);
+-
+-      /* Initialize the new namespace.  Most members are
+-	 zero-initialized, only the lock needs special treatment.  */
+-      memset (&GL(dl_ns)[nsid], 0, sizeof (GL(dl_ns)[nsid]));
+-      __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
+-
++      GL(dl_ns)[nsid].libc_map = NULL;
+       _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
+     }
+   /* Never allow loading a DSO in a namespace which is empty.  Such
+diff --git a/elf/tst-dlmopen-twice.c b/elf/tst-dlmopen-twice.c
+index 449f3c8fa9..70c71fe19c 100644
+--- a/elf/tst-dlmopen-twice.c
++++ b/elf/tst-dlmopen-twice.c
+@@ -16,18 +16,38 @@
+    License along with the GNU C Library; if not, see
+    <https://www.gnu.org/licenses/>.  */
+ 
+-#include <support/xdlfcn.h>
++#include <stdio.h>
+ #include <support/check.h>
++#include <support/xdlfcn.h>
+ 
+-static int
+-do_test (void)
++/* Run the test multiple times, to check finding a new namespace while
++   another namespace is already in use.  This used to trigger bug 29600.  */
++static void
++recurse (int depth)
+ {
+-  void *handle = xdlmopen (LM_ID_NEWLM, "tst-dlmopen-twice-mod1.so", RTLD_NOW);
++  if (depth == 0)
++    return;
++
++  printf ("info: running at depth %d\n", depth);
++  void *handle = xdlmopen (LM_ID_NEWLM, "tst-dlmopen-twice-mod1.so",
++                           RTLD_NOW);
+   xdlclose (handle);
+   handle = xdlmopen (LM_ID_NEWLM, "tst-dlmopen-twice-mod2.so", RTLD_NOW);
+   int (*run_check) (void) = xdlsym (handle, "run_check");
+   TEST_COMPARE (run_check (), 0);
++  recurse (depth - 1);
+   xdlclose (handle);
++}
++
++static int
++do_test (void)
++{
++  /* First run the test without nesting.  */
++  recurse (1);
++
++  /* Then with nesting.  The constant needs to be less than the
++     internal DL_NNS namespace constant.  */
++  recurse (10);
+   return 0;
+ }
+ 
+-- 
+2.31.1
+
diff --git a/SOURCES/glibc-rh2122498.patch b/SOURCES/glibc-rh2122498.patch
new file mode 100644
index 0000000..1699abf
--- /dev/null
+++ b/SOURCES/glibc-rh2122498.patch
@@ -0,0 +1,39 @@
+commit 02ca25fef2785974011e9c5beecc99b900b69fd7
+Author: Fabian Vogt <fvogt@suse.de>
+Date:   Wed Jul 27 11:44:07 2022 +0200
+
+    nscd: Fix netlink cache invalidation if epoll is used [BZ #29415]
+    
+    Processes cache network interface information such as whether IPv4 or IPv6
+    are enabled. This is only checked again if the "netlink timestamp" provided
+    by nscd changed, which is triggered by netlink socket activity.
+    
+    However, in the epoll handler for the netlink socket, it was missed to
+    assign the new timestamp to the nscd database. The handler for plain poll
+    did that properly, copy that over.
+    
+    This bug caused that e.g. processes which started before network
+    configuration got unusuable addresses from getaddrinfo, like IPv6 only even
+    though only IPv4 is available:
+    https://gitlab.freedesktop.org/NetworkManager/NetworkManager/-/issues/1041
+    
+    It's a bit hard to reproduce, so I verified this by checking the timestamp
+    on calls to __check_pf manually. Without this patch it's stuck at 1, now
+    it's increasing on network changes as expected.
+    
+    Signed-off-by: Fabian Vogt <fvogt@suse.de>
+
+diff --git a/nscd/connections.c b/nscd/connections.c
+index 98182007646a33d5..19039bdbb210466a 100644
+--- a/nscd/connections.c
++++ b/nscd/connections.c
+@@ -2286,7 +2286,8 @@ main_loop_epoll (int efd)
+ 					     sizeof (buf))) != -1)
+ 	      ;
+ 
+-	    __bump_nl_timestamp ();
++	    dbs[hstdb].head->extra_data[NSCD_HST_IDX_CONF_TIMESTAMP]
++	      = __bump_nl_timestamp ();
+ 	  }
+ # endif
+ 	else
diff --git a/SOURCES/glibc-rh2122501-1.patch b/SOURCES/glibc-rh2122501-1.patch
new file mode 100644
index 0000000..75d333e
--- /dev/null
+++ b/SOURCES/glibc-rh2122501-1.patch
@@ -0,0 +1,472 @@
+commit c6fad4fa149485a307207f707e5851216f190fc8
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Thu Mar 19 18:32:28 2020 -0300
+
+    stdio: Remove memory leak from multibyte convertion [BZ#25691]
+    
+    This is an updated version of a previous patch [1] with the
+    following changes:
+    
+      - Use compiler overflow builtins on done_add_func function.
+      - Define the scratch +utstring_converted_wide_string using
+        CHAR_T.
+      - Added a testcase and mention the bug report.
+    
+    Both default and wide printf functions might leak memory when
+    manipulate multibyte characters conversion depending of the size
+    of the input (whether __libc_use_alloca trigger or not the fallback
+    heap allocation).
+    
+    This patch fixes it by removing the extra memory allocation on
+    string formatting with conversion parts.
+    
+    The testcase uses input argument size that trigger memory leaks
+    on unpatched code (using a scratch buffer the threashold to use
+    heap allocation is lower).
+    
+    Checked on x86_64-linux-gnu and i686-linux-gnu.
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+    
+    [1] https://sourceware.org/pipermail/libc-alpha/2017-June/082098.html
+    
+    (cherry picked from commit 3cc4a8367c23582b7db14cf4e150e4068b7fd461)
+
+diff --git a/stdio-common/vfprintf.c b/stdio-common/vfprintf.c
+index ae412e4b8444aea2..dab56b6ba2c7bdbe 100644
+--- a/stdio-common/vfprintf.c
++++ b/stdio-common/vfprintf.c
+@@ -31,6 +31,7 @@
+ #include <locale/localeinfo.h>
+ #include <stdio.h>
+ #include <scratch_buffer.h>
++#include <intprops.h>
+ 
+ /* This code is shared between the standard stdio implementation found
+    in GNU C library and the libio implementation originally found in
+@@ -64,23 +65,40 @@
+     } while (0)
+ #define UNBUFFERED_P(S) ((S)->_flags & _IO_UNBUFFERED)
+ 
+-#define done_add(val) \
+-  do {									      \
+-    unsigned int _val = val;						      \
+-    assert ((unsigned int) done < (unsigned int) INT_MAX);		      \
+-    if (__glibc_unlikely (INT_MAX - done < _val))			      \
+-      {									      \
+-	done = -1;							      \
+-	 __set_errno (EOVERFLOW);					      \
+-	goto all_done;							      \
+-      }									      \
+-    done += _val;							      \
+-  } while (0)
++/* Add LENGTH to DONE.  Return the new value of DONE, or -1 on
++   overflow (and set errno accordingly).  */
++static inline int
++done_add_func (size_t length, int done)
++{
++  if (done < 0)
++    return done;
++  int ret;
++  if (INT_ADD_WRAPV (done, length, &ret))
++    {
++      __set_errno (EOVERFLOW);
++      return -1;
++    }
++  return ret;
++}
++
++#define done_add(val)							\
++  do									\
++    {									\
++      /* Ensure that VAL has a type similar to int.  */			\
++      _Static_assert (sizeof (val) == sizeof (int), "value int size");	\
++      _Static_assert ((__typeof__ (val)) -1 < 0, "value signed");	\
++      done = done_add_func ((val), done);				\
++      if (done < 0)							\
++	goto all_done;							\
++    }									\
++  while (0)
+ 
+ #ifndef COMPILE_WPRINTF
+ # define vfprintf	_IO_vfprintf_internal
+ # define CHAR_T		char
++# define CHAR_T		char
+ # define UCHAR_T	unsigned char
++# define OTHER_CHAR_T   wchar_t
+ # define INT_T		int
+ typedef const char *THOUSANDS_SEP_T;
+ # define L_(Str)	Str
+@@ -88,22 +106,10 @@ typedef const char *THOUSANDS_SEP_T;
+ # define STR_LEN(Str)	strlen (Str)
+ 
+ # define PUT(F, S, N)	_IO_sputn ((F), (S), (N))
+-# define PAD(Padchar) \
+-  do {									      \
+-    if (width > 0)							      \
+-      {									      \
+-	ssize_t written = _IO_padn (s, (Padchar), width);		      \
+-	if (__glibc_unlikely (written != width))			      \
+-	  {								      \
+-	    done = -1;							      \
+-	    goto all_done;						      \
+-	  }								      \
+-	done_add (written);						      \
+-      }									      \
+-  } while (0)
+ # define PUTC(C, F)	_IO_putc_unlocked (C, F)
+ # define ORIENT		if (_IO_vtable_offset (s) == 0 && _IO_fwide (s, -1) != -1)\
+ 			  return -1
++# define CONVERT_FROM_OTHER_STRING __wcsrtombs
+ #else
+ # define vfprintf	_IO_vfwprintf
+ # define CHAR_T		wchar_t
+@@ -118,21 +124,11 @@ typedef wchar_t THOUSANDS_SEP_T;
+ # include <_itowa.h>
+ 
+ # define PUT(F, S, N)	_IO_sputn ((F), (S), (N))
+-# define PAD(Padchar) \
+-  do {									      \
+-    if (width > 0)							      \
+-      {									      \
+-	ssize_t written = _IO_wpadn (s, (Padchar), width);		      \
+-	if (__glibc_unlikely (written != width))			      \
+-	  {								      \
+-	    done = -1;							      \
+-	    goto all_done;						      \
+-	  }								      \
+-	done_add (written);						      \
+-      }									      \
+-  } while (0)
+ # define PUTC(C, F)	_IO_putwc_unlocked (C, F)
+ # define ORIENT		if (_IO_fwide (s, 1) != 1) return -1
++# define CONVERT_FROM_OTHER_STRING __mbsrtowcs
++# define CHAR_T		wchar_t
++# define OTHER_CHAR_T   char
+ 
+ # undef _itoa
+ # define _itoa(Val, Buf, Base, Case) _itowa (Val, Buf, Base, Case)
+@@ -141,6 +137,33 @@ typedef wchar_t THOUSANDS_SEP_T;
+ # define EOF WEOF
+ #endif
+ 
++static inline int
++pad_func (FILE *s, CHAR_T padchar, int width, int done)
++{
++  if (width > 0)
++    {
++      ssize_t written;
++#ifndef COMPILE_WPRINTF
++      written = _IO_padn (s, padchar, width);
++#else
++      written = _IO_wpadn (s, padchar, width);
++#endif
++      if (__glibc_unlikely (written != width))
++	return -1;
++      return done_add_func (width, done);
++    }
++  return done;
++}
++
++#define PAD(Padchar)							\
++  do									\
++    {									\
++      done = pad_func (s, (Padchar), width, done);			\
++      if (done < 0)							\
++	goto all_done;							\
++    }									\
++  while (0)
++
+ #include "_i18n_number.h"
+ 
+ /* Include the shared code for parsing the format string.  */
+@@ -160,24 +183,115 @@ typedef wchar_t THOUSANDS_SEP_T;
+     }									      \
+   while (0)
+ 
+-#define outstring(String, Len)						      \
+-  do									      \
+-    {									      \
+-      assert ((size_t) done <= (size_t) INT_MAX);			      \
+-      if ((size_t) PUT (s, (String), (Len)) != (size_t) (Len))		      \
+-	{								      \
+-	  done = -1;							      \
+-	  goto all_done;						      \
+-	}								      \
+-      if (__glibc_unlikely (INT_MAX - done < (Len)))			      \
+-      {									      \
+-	done = -1;							      \
+-	 __set_errno (EOVERFLOW);					      \
+-	goto all_done;							      \
+-      }									      \
+-      done += (Len);							      \
+-    }									      \
+-  while (0)
++static inline int
++outstring_func (FILE *s, const UCHAR_T *string, size_t length, int done)
++{
++  assert ((size_t) done <= (size_t) INT_MAX);
++  if ((size_t) PUT (s, string, length) != (size_t) (length))
++    return -1;
++  return done_add_func (length, done);
++}
++
++#define outstring(String, Len)						\
++  do									\
++    {									\
++      const void *string_ = (String);					\
++      done = outstring_func (s, string_, (Len), done);			\
++      if (done < 0)							\
++	goto all_done;							\
++    }									\
++   while (0)
++
++/* Write the string SRC to S.  If PREC is non-negative, write at most
++   PREC bytes.  If LEFT is true, perform left justification.  */
++static int
++outstring_converted_wide_string (FILE *s, const OTHER_CHAR_T *src, int prec,
++				 int width, bool left, int done)
++{
++  /* Use a small buffer to combine processing of multiple characters.
++     CONVERT_FROM_OTHER_STRING expects the buffer size in (wide)
++     characters, and buf_length counts that.  */
++  enum { buf_length = 256 / sizeof (CHAR_T) };
++  CHAR_T buf[buf_length];
++  _Static_assert (sizeof (buf) > MB_LEN_MAX,
++		  "buffer is large enough for a single multi-byte character");
++
++  /* Add the initial padding if needed.  */
++  if (width > 0 && !left)
++    {
++      /* Make a first pass to find the output width, so that we can
++	 add the required padding.  */
++      mbstate_t mbstate = { 0 };
++      const OTHER_CHAR_T *src_copy = src;
++      size_t total_written;
++      if (prec < 0)
++	total_written = CONVERT_FROM_OTHER_STRING
++	  (NULL, &src_copy, 0, &mbstate);
++      else
++	{
++	  /* The source might not be null-terminated.  Enforce the
++	     limit manually, based on the output length.  */
++	  total_written = 0;
++	  size_t limit = prec;
++	  while (limit > 0 && src_copy != NULL)
++	    {
++	      size_t write_limit = buf_length;
++	      if (write_limit > limit)
++		write_limit = limit;
++	      size_t written = CONVERT_FROM_OTHER_STRING
++		(buf, &src_copy, write_limit, &mbstate);
++	      if (written == (size_t) -1)
++		return -1;
++	      if (written == 0)
++		break;
++	      total_written += written;
++	      limit -= written;
++	    }
++	}
++
++      /* Output initial padding.  */
++      if (total_written < width)
++	{
++	  done = pad_func (s, L_(' '), width - total_written, done);
++	  if (done < 0)
++	    return done;
++	}
++    }
++
++  /* Convert the input string, piece by piece.  */
++  size_t total_written = 0;
++  {
++    mbstate_t mbstate = { 0 };
++    /* If prec is negative, remaining is not decremented, otherwise,
++      it serves as the write limit.  */
++    size_t remaining = -1;
++    if (prec >= 0)
++      remaining = prec;
++    while (remaining > 0 && src != NULL)
++      {
++	size_t write_limit = buf_length;
++	if (remaining < write_limit)
++	  write_limit = remaining;
++	size_t written = CONVERT_FROM_OTHER_STRING
++	  (buf, &src, write_limit, &mbstate);
++	if (written == (size_t) -1)
++	  return -1;
++	if (written == 0)
++	  break;
++	done = outstring_func (s, (const UCHAR_T *) buf, written, done);
++	if (done < 0)
++	  return done;
++	total_written += written;
++	if (prec >= 0)
++	  remaining -= written;
++      }
++  }
++
++  /* Add final padding.  */
++  if (width > 0 && left && total_written < width)
++    return pad_func (s, L_(' '), width - total_written, done);
++  return done;
++}
+ 
+ /* For handling long_double and longlong we use the same flag.  If
+    `long' and `long long' are effectively the same type define it to
+@@ -975,7 +1089,6 @@ static const uint8_t jump_table[] =
+     LABEL (form_string):						      \
+       {									      \
+ 	size_t len;							      \
+-	int string_malloced;						      \
+ 									      \
+ 	/* The string argument could in fact be `char *' or `wchar_t *'.      \
+ 	   But this should not make a difference here.  */		      \
+@@ -987,7 +1100,6 @@ static const uint8_t jump_table[] =
+ 	/* Entry point for printing other strings.  */			      \
+       LABEL (print_string):						      \
+ 									      \
+-	string_malloced = 0;						      \
+ 	if (string == NULL)						      \
+ 	  {								      \
+ 	    /* Write "(null)" if there's space.  */			      \
+@@ -1004,41 +1116,12 @@ static const uint8_t jump_table[] =
+ 	  }								      \
+ 	else if (!is_long && spec != L_('S'))				      \
+ 	  {								      \
+-	    /* This is complicated.  We have to transform the multibyte	      \
+-	       string into a wide character string.  */			      \
+-	    const char *mbs = (const char *) string;			      \
+-	    mbstate_t mbstate;						      \
+-									      \
+-	    len = prec != -1 ? __strnlen (mbs, (size_t) prec) : strlen (mbs); \
+-									      \
+-	    /* Allocate dynamically an array which definitely is long	      \
+-	       enough for the wide character version.  Each byte in the	      \
+-	       multi-byte string can produce at most one wide character.  */  \
+-	    if (__glibc_unlikely (len > SIZE_MAX / sizeof (wchar_t)))	      \
+-	      {								      \
+-		__set_errno (EOVERFLOW);				      \
+-		done = -1;						      \
+-		goto all_done;						      \
+-	      }								      \
+-	    else if (__libc_use_alloca (len * sizeof (wchar_t)))	      \
+-	      string = (CHAR_T *) alloca (len * sizeof (wchar_t));	      \
+-	    else if ((string = (CHAR_T *) malloc (len * sizeof (wchar_t)))    \
+-		     == NULL)						      \
+-	      {								      \
+-		done = -1;						      \
+-		goto all_done;						      \
+-	      }								      \
+-	    else							      \
+-	      string_malloced = 1;					      \
+-									      \
+-	    memset (&mbstate, '\0', sizeof (mbstate_t));		      \
+-	    len = __mbsrtowcs (string, &mbs, len, &mbstate);		      \
+-	    if (len == (size_t) -1)					      \
+-	      {								      \
+-		/* Illegal multibyte character.  */			      \
+-		done = -1;						      \
+-		goto all_done;						      \
+-	      }								      \
++	    done = outstring_converted_wide_string			      \
++	      (s, (const char *) string, prec, width, left, done);	      \
++	    if (done < 0)						      \
++	      goto all_done;						      \
++	    /* The padding has already been written.  */		      \
++	    break;							      \
+ 	  }								      \
+ 	else								      \
+ 	  {								      \
+@@ -1061,8 +1144,6 @@ static const uint8_t jump_table[] =
+ 	outstring (string, len);					      \
+ 	if (left)							      \
+ 	  PAD (L' ');							      \
+-	if (__glibc_unlikely (string_malloced))				      \
+-	  free (string);						      \
+       }									      \
+       break;
+ #else
+@@ -1111,7 +1192,6 @@ static const uint8_t jump_table[] =
+     LABEL (form_string):						      \
+       {									      \
+ 	size_t len;							      \
+-	int string_malloced;						      \
+ 									      \
+ 	/* The string argument could in fact be `char *' or `wchar_t *'.      \
+ 	   But this should not make a difference here.  */		      \
+@@ -1123,7 +1203,6 @@ static const uint8_t jump_table[] =
+ 	/* Entry point for printing other strings.  */			      \
+       LABEL (print_string):						      \
+ 									      \
+-	string_malloced = 0;						      \
+ 	if (string == NULL)						      \
+ 	  {								      \
+ 	    /* Write "(null)" if there's space.  */			      \
+@@ -1149,51 +1228,12 @@ static const uint8_t jump_table[] =
+ 	  }								      \
+ 	else								      \
+ 	  {								      \
+-	    const wchar_t *s2 = (const wchar_t *) string;		      \
+-	    mbstate_t mbstate;						      \
+-									      \
+-	    memset (&mbstate, '\0', sizeof (mbstate_t));		      \
+-									      \
+-	    if (prec >= 0)						      \
+-	      {								      \
+-		/* The string `s2' might not be NUL terminated.  */	      \
+-		if (__libc_use_alloca (prec))				      \
+-		  string = (char *) alloca (prec);			      \
+-		else if ((string = (char *) malloc (prec)) == NULL)	      \
+-		  {							      \
+-		    done = -1;						      \
+-		    goto all_done;					      \
+-		  }							      \
+-		else							      \
+-		  string_malloced = 1;					      \
+-		len = __wcsrtombs (string, &s2, prec, &mbstate);	      \
+-	      }								      \
+-	    else							      \
+-	      {								      \
+-		len = __wcsrtombs (NULL, &s2, 0, &mbstate);		      \
+-		if (len != (size_t) -1)					      \
+-		  {							      \
+-		    assert (__mbsinit (&mbstate));			      \
+-		    s2 = (const wchar_t *) string;			      \
+-		    if (__libc_use_alloca (len + 1))			      \
+-		      string = (char *) alloca (len + 1);		      \
+-		    else if ((string = (char *) malloc (len + 1)) == NULL)    \
+-		      {							      \
+-			done = -1;					      \
+-			goto all_done;					      \
+-		      }							      \
+-		    else						      \
+-		      string_malloced = 1;				      \
+-		    (void) __wcsrtombs (string, &s2, len + 1, &mbstate);      \
+-		  }							      \
+-	      }								      \
+-									      \
+-	    if (len == (size_t) -1)					      \
+-	      {								      \
+-		/* Illegal wide-character string.  */			      \
+-		done = -1;						      \
+-		goto all_done;						      \
+-	      }								      \
++	    done = outstring_converted_wide_string			      \
++	      (s, (const wchar_t *) string, prec, width, left, done);	      \
++	    if (done < 0)						      \
++	      goto all_done;						      \
++	    /* The padding has already been written.  */		      \
++	    break;							      \
+ 	  }								      \
+ 									      \
+ 	if ((width -= len) < 0)						      \
+@@ -1207,8 +1247,6 @@ static const uint8_t jump_table[] =
+ 	outstring (string, len);					      \
+ 	if (left)							      \
+ 	  PAD (' ');							      \
+-	if (__glibc_unlikely (string_malloced))			              \
+-	  free (string);						      \
+       }									      \
+       break;
+ #endif
diff --git a/SOURCES/glibc-rh2122501-2.patch b/SOURCES/glibc-rh2122501-2.patch
new file mode 100644
index 0000000..8cac488
--- /dev/null
+++ b/SOURCES/glibc-rh2122501-2.patch
@@ -0,0 +1,160 @@
+commit 29b12753b51866b227a6c0ac96c2c6c0e20f3497
+Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date:   Thu Mar 19 18:35:46 2020 -0300
+
+    stdio: Add tests for printf multibyte convertion leak [BZ#25691]
+    
+    Checked on x86_64-linux-gnu and i686-linux-gnu.
+    
+    (cherry picked from commit 910a835dc96c1f518ac2a6179fc622ba81ffb159)
+
+diff --git a/stdio-common/Makefile b/stdio-common/Makefile
+index a10f12ab3ccbd76e..51062a7dbf698931 100644
+--- a/stdio-common/Makefile
++++ b/stdio-common/Makefile
+@@ -63,6 +63,7 @@ tests := tstscanf test_rdwr test-popen tstgetln test-fseek \
+ 	 tst-vfprintf-mbs-prec \
+ 	 tst-scanf-round \
+ 	 tst-renameat2 \
++	 tst-printf-bz25691 \
+ 
+ test-srcs = tst-unbputc tst-printf tst-printfsz-islongdouble
+ 
+@@ -71,10 +72,12 @@ tests-special += $(objpfx)tst-unbputc.out $(objpfx)tst-printf.out \
+ 		 $(objpfx)tst-printf-bz18872-mem.out \
+ 		 $(objpfx)tst-setvbuf1-cmp.out \
+ 		 $(objpfx)tst-vfprintf-width-prec-mem.out \
+-		 $(objpfx)tst-printfsz-islongdouble.out
++		 $(objpfx)tst-printfsz-islongdouble.out \
++		 $(objpfx)tst-printf-bz25691-mem.out
+ generated += tst-printf-bz18872.c tst-printf-bz18872.mtrace \
+ 	     tst-printf-bz18872-mem.out \
+-	     tst-vfprintf-width-prec.mtrace tst-vfprintf-width-prec-mem.out
++	     tst-vfprintf-width-prec.mtrace tst-vfprintf-width-prec-mem.out \
++	     tst-printf-bz25691.mtrace tst-printf-bz25691-mem.out
+ endif
+ 
+ include ../Rules
+@@ -96,6 +99,8 @@ endif
+ tst-printf-bz18872-ENV = MALLOC_TRACE=$(objpfx)tst-printf-bz18872.mtrace
+ tst-vfprintf-width-prec-ENV = \
+   MALLOC_TRACE=$(objpfx)tst-vfprintf-width-prec.mtrace
++tst-printf-bz25691-ENV = \
++  MALLOC_TRACE=$(objpfx)tst-printf-bz25691.mtrace
+ 
+ $(objpfx)tst-unbputc.out: tst-unbputc.sh $(objpfx)tst-unbputc
+ 	$(SHELL) $< $(common-objpfx) '$(test-program-prefix)' > $@; \
+diff --git a/stdio-common/tst-printf-bz25691.c b/stdio-common/tst-printf-bz25691.c
+new file mode 100644
+index 0000000000000000..37b30a3a8a7dc5e2
+--- /dev/null
++++ b/stdio-common/tst-printf-bz25691.c
+@@ -0,0 +1,108 @@
++/* Test for memory leak with large width (BZ#25691).
++   Copyright (C) 2020 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <wchar.h>
++#include <stdint.h>
++#include <locale.h>
++
++#include <mcheck.h>
++#include <support/check.h>
++#include <support/support.h>
++
++static int
++do_test (void)
++{
++  mtrace ();
++
++  /* For 's' conversion specifier with 'l' modifier the array must be
++     converted to multibyte characters up to the precision specific
++     value.  */
++  {
++    /* The input size value is to force a heap allocation on temporary
++       buffer (in the old implementation).  */
++    const size_t winputsize = 64 * 1024 + 1;
++    wchar_t *winput = xmalloc (winputsize * sizeof (wchar_t));
++    wmemset (winput, L'a', winputsize - 1);
++    winput[winputsize - 1] = L'\0';
++
++    char result[9];
++    const char expected[] = "aaaaaaaa";
++    int ret;
++
++    ret = snprintf (result, sizeof (result), "%.65537ls", winput);
++    TEST_COMPARE (ret, winputsize - 1);
++    TEST_COMPARE_BLOB (result, sizeof (result), expected, sizeof (expected));
++
++    ret = snprintf (result, sizeof (result), "%ls", winput);
++    TEST_COMPARE (ret, winputsize - 1);
++    TEST_COMPARE_BLOB (result, sizeof (result), expected, sizeof (expected));
++
++    free (winput);
++  }
++
++  /* For 's' converstion specifier the array is interpreted as a multibyte
++     character sequence and converted to wide characters up to the precision
++     specific value.  */
++  {
++    /* The input size value is to force a heap allocation on temporary
++       buffer (in the old implementation).  */
++    const size_t mbssize = 32 * 1024;
++    char *mbs = xmalloc (mbssize);
++    memset (mbs, 'a', mbssize - 1);
++    mbs[mbssize - 1] = '\0';
++
++    const size_t expectedsize = 32 * 1024;
++    wchar_t *expected = xmalloc (expectedsize * sizeof (wchar_t));
++    wmemset (expected, L'a', expectedsize - 1);
++    expected[expectedsize-1] = L'\0';
++
++    const size_t resultsize = mbssize * sizeof (wchar_t);
++    wchar_t *result = xmalloc (resultsize);
++    int ret;
++
++    ret = swprintf (result, resultsize, L"%.65537s", mbs);
++    TEST_COMPARE (ret, mbssize - 1);
++    TEST_COMPARE_BLOB (result, (ret + 1) * sizeof (wchar_t),
++		       expected, expectedsize * sizeof (wchar_t));
++
++    ret = swprintf (result, resultsize, L"%1$.65537s", mbs);
++    TEST_COMPARE (ret, mbssize - 1);
++    TEST_COMPARE_BLOB (result, (ret + 1) * sizeof (wchar_t),
++		       expected, expectedsize * sizeof (wchar_t));
++
++    /* Same test, but with an invalid multibyte sequence.  */
++    mbs[mbssize - 2] = 0xff;
++
++    ret = swprintf (result, resultsize, L"%.65537s", mbs);
++    TEST_COMPARE (ret, -1);
++
++    ret = swprintf (result, resultsize, L"%1$.65537s", mbs);
++    TEST_COMPARE (ret, -1);
++
++    free (mbs);
++    free (result);
++    free (expected);
++  }
++
++  return 0;
++}
++
++#include <support/test-driver.c>
diff --git a/SOURCES/glibc-rh2122501-3.patch b/SOURCES/glibc-rh2122501-3.patch
new file mode 100644
index 0000000..331dd92
--- /dev/null
+++ b/SOURCES/glibc-rh2122501-3.patch
@@ -0,0 +1,356 @@
+commit e1c0c00cc2bdd147bfcf362ada1443bee90465ec
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Tue Jul 7 14:54:12 2020 +0000
+
+    Remove most vfprintf width/precision-dependent allocations (bug 14231, bug 26211).
+    
+    The vfprintf implementation (used for all printf-family functions)
+    contains complicated logic to allocate internal buffers of a size
+    depending on the width and precision used for a format, using either
+    malloc or alloca depending on that size, and with consequent checks
+    for size overflow and allocation failure.
+    
+    As noted in bug 26211, the version of that logic used when '$' plus
+    argument number formats are in use is missing the overflow checks,
+    which can result in segfaults (quite possibly exploitable, I didn't
+    try to work that out) when the width or precision is in the range
+    0x7fffffe0 through 0x7fffffff (maybe smaller values as well in the
+    wprintf case on 32-bit systems, when the multiplication by sizeof
+    (CHAR_T) can overflow).
+    
+    All that complicated logic in fact appears to be useless.  As far as I
+    can tell, there has been no need (outside the floating-point printf
+    code, which does its own allocations) for allocations depending on
+    width or precision since commit
+    3e95f6602b226e0de06aaff686dc47b282d7cc16 ("Remove limitation on size
+    of precision for integers", Sun Sep 12 21:23:32 1999 +0000).  Thus,
+    this patch removes that logic completely, thereby fixing both problems
+    with excessive allocations for large width and precision for
+    non-floating-point formats, and the problem with missing overflow
+    checks with such allocations.  Note that this does have the
+    consequence that width and precision up to INT_MAX are now allowed
+    where previously INT_MAX / sizeof (CHAR_T) - EXTSIZ or more would have
+    been rejected, so could potentially expose any other overflows where
+    the value would previously have been rejected by those removed checks.
+    
+    I believe this completely fixes bugs 14231 and 26211.
+    
+    Excessive allocations are still possible in the floating-point case
+    (bug 21127), as are other integer or buffer overflows (see bug 26201).
+    This does not address the cases where a precision larger than INT_MAX
+    (embedded in the format string) would be meaningful without printf's
+    return value overflowing (when it's used with a string format, or %g
+    without the '#' flag, so the actual output will be much smaller), as
+    mentioned in bug 17829 comment 8; using size_t internally for
+    precision to handle that case would be complicated by struct
+    printf_info being a public ABI.  Nor does it address the matter of an
+    INT_MIN width being negated (bug 17829 comment 7; the same logic
+    appears a second time in the file as well, in the form of multiplying
+    by -1).  There may be other sources of memory allocations with malloc
+    in printf functions as well (bug 24988, bug 16060).  From inspection,
+    I think there are also integer overflows in two copies of "if ((width
+    -= len) < 0)" logic (where width is int, len is size_t and a very long
+    string could result in spurious padding being output on a 32-bit
+    system before printf overflows the count of output characters).
+    
+    Tested for x86-64 and x86.
+    
+    (cherry picked from commit 6caddd34bd7ffb5ac4f36c8e036eee100c2cc535)
+
+diff --git a/stdio-common/Makefile b/stdio-common/Makefile
+index 51062a7dbf698931..d76b47bd5f932f69 100644
+--- a/stdio-common/Makefile
++++ b/stdio-common/Makefile
+@@ -64,6 +64,7 @@ tests := tstscanf test_rdwr test-popen tstgetln test-fseek \
+ 	 tst-scanf-round \
+ 	 tst-renameat2 \
+ 	 tst-printf-bz25691 \
++	 tst-vfprintf-width-prec-alloc
+ 
+ test-srcs = tst-unbputc tst-printf tst-printfsz-islongdouble
+ 
+diff --git a/stdio-common/bug22.c b/stdio-common/bug22.c
+index b26399acb7dfc775..e12b01731e1b4ac8 100644
+--- a/stdio-common/bug22.c
++++ b/stdio-common/bug22.c
+@@ -42,7 +42,7 @@ do_test (void)
+ 
+   ret = fprintf (fp, "%." SN3 "d", 1);
+   printf ("ret = %d\n", ret);
+-  if (ret != -1 || errno != EOVERFLOW)
++  if (ret != N3)
+ 	  return 1;
+ 
+   ret = fprintf (fp, "%" SN2 "d%" SN2 "d", 1, 1);
+diff --git a/stdio-common/tst-vfprintf-width-prec-alloc.c b/stdio-common/tst-vfprintf-width-prec-alloc.c
+new file mode 100644
+index 0000000000000000..0a74b53a3389d699
+--- /dev/null
++++ b/stdio-common/tst-vfprintf-width-prec-alloc.c
+@@ -0,0 +1,41 @@
++/* Test large width or precision does not involve large allocation.
++   Copyright (C) 2020 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++#include <stdio.h>
++#include <sys/resource.h>
++#include <support/check.h>
++
++char test_string[] = "test";
++
++static int
++do_test (void)
++{
++  struct rlimit limit;
++  TEST_VERIFY_EXIT (getrlimit (RLIMIT_AS, &limit) == 0);
++  limit.rlim_cur = 200 * 1024 * 1024;
++  TEST_VERIFY_EXIT (setrlimit (RLIMIT_AS, &limit) == 0);
++  FILE *fp = fopen ("/dev/null", "w");
++  TEST_VERIFY_EXIT (fp != NULL);
++  TEST_COMPARE (fprintf (fp, "%1000000000d", 1), 1000000000);
++  TEST_COMPARE (fprintf (fp, "%.1000000000s", test_string), 4);
++  TEST_COMPARE (fprintf (fp, "%1000000000d %1000000000d", 1, 2), 2000000001);
++  TEST_COMPARE (fprintf (fp, "%2$.*1$s", 0x7fffffff, test_string), 4);
++  return 0;
++}
++
++#include <support/test-driver.c>
+diff --git a/stdio-common/vfprintf.c b/stdio-common/vfprintf.c
+index dab56b6ba2c7bdbe..6b83ba91a12cdcd5 100644
+--- a/stdio-common/vfprintf.c
++++ b/stdio-common/vfprintf.c
+@@ -42,10 +42,6 @@
+ 
+ #include <libioP.h>
+ 
+-/* In some cases we need extra space for all the output which is not
+-   counted in the width of the string. We assume 32 characters is
+-   enough.  */
+-#define EXTSIZ		32
+ #define ARGCHECK(S, Format) \
+   do									      \
+     {									      \
+@@ -1295,7 +1291,6 @@ vfprintf (FILE *s, const CHAR_T *format, va_list ap)
+ 
+   /* Buffer intermediate results.  */
+   CHAR_T work_buffer[WORK_BUFFER_SIZE];
+-  CHAR_T *workstart = NULL;
+   CHAR_T *workend;
+ 
+   /* We have to save the original argument pointer.  */
+@@ -1404,7 +1399,6 @@ vfprintf (FILE *s, const CHAR_T *format, va_list ap)
+       UCHAR_T pad = L_(' ');/* Padding character.  */
+       CHAR_T spec;
+ 
+-      workstart = NULL;
+       workend = work_buffer + WORK_BUFFER_SIZE;
+ 
+       /* Get current character in format string.  */
+@@ -1496,31 +1490,6 @@ vfprintf (FILE *s, const CHAR_T *format, va_list ap)
+ 	    pad = L_(' ');
+ 	    left = 1;
+ 	  }
+-
+-	if (__glibc_unlikely (width >= INT_MAX / sizeof (CHAR_T) - EXTSIZ))
+-	  {
+-	    __set_errno (EOVERFLOW);
+-	    done = -1;
+-	    goto all_done;
+-	  }
+-
+-	if (width >= WORK_BUFFER_SIZE - EXTSIZ)
+-	  {
+-	    /* We have to use a special buffer.  */
+-	    size_t needed = ((size_t) width + EXTSIZ) * sizeof (CHAR_T);
+-	    if (__libc_use_alloca (needed))
+-	      workend = (CHAR_T *) alloca (needed) + width + EXTSIZ;
+-	    else
+-	      {
+-		workstart = (CHAR_T *) malloc (needed);
+-		if (workstart == NULL)
+-		  {
+-		    done = -1;
+-		    goto all_done;
+-		  }
+-		workend = workstart + width + EXTSIZ;
+-	      }
+-	  }
+       }
+       JUMP (*f, step1_jumps);
+ 
+@@ -1528,31 +1497,13 @@ vfprintf (FILE *s, const CHAR_T *format, va_list ap)
+     LABEL (width):
+       width = read_int (&f);
+ 
+-      if (__glibc_unlikely (width == -1
+-			    || width >= INT_MAX / sizeof (CHAR_T) - EXTSIZ))
++      if (__glibc_unlikely (width == -1))
+ 	{
+ 	  __set_errno (EOVERFLOW);
+ 	  done = -1;
+ 	  goto all_done;
+ 	}
+ 
+-      if (width >= WORK_BUFFER_SIZE - EXTSIZ)
+-	{
+-	  /* We have to use a special buffer.  */
+-	  size_t needed = ((size_t) width + EXTSIZ) * sizeof (CHAR_T);
+-	  if (__libc_use_alloca (needed))
+-	    workend = (CHAR_T *) alloca (needed) + width + EXTSIZ;
+-	  else
+-	    {
+-	      workstart = (CHAR_T *) malloc (needed);
+-	      if (workstart == NULL)
+-		{
+-		  done = -1;
+-		  goto all_done;
+-		}
+-	      workend = workstart + width + EXTSIZ;
+-	    }
+-	}
+       if (*f == L_('$'))
+ 	/* Oh, oh.  The argument comes from a positional parameter.  */
+ 	goto do_positional;
+@@ -1601,34 +1552,6 @@ vfprintf (FILE *s, const CHAR_T *format, va_list ap)
+ 	}
+       else
+ 	prec = 0;
+-      if (prec > width && prec > WORK_BUFFER_SIZE - EXTSIZ)
+-	{
+-	  /* Deallocate any previously allocated buffer because it is
+-	     too small.  */
+-	  if (__glibc_unlikely (workstart != NULL))
+-	    free (workstart);
+-	  workstart = NULL;
+-	  if (__glibc_unlikely (prec >= INT_MAX / sizeof (CHAR_T) - EXTSIZ))
+-	    {
+-	      __set_errno (EOVERFLOW);
+-	      done = -1;
+-	      goto all_done;
+-	    }
+-	  size_t needed = ((size_t) prec + EXTSIZ) * sizeof (CHAR_T);
+-
+-	  if (__libc_use_alloca (needed))
+-	    workend = (CHAR_T *) alloca (needed) + prec + EXTSIZ;
+-	  else
+-	    {
+-	      workstart = (CHAR_T *) malloc (needed);
+-	      if (workstart == NULL)
+-		{
+-		  done = -1;
+-		  goto all_done;
+-		}
+-	      workend = workstart + prec + EXTSIZ;
+-	    }
+-	}
+       JUMP (*f, step2_jumps);
+ 
+       /* Process 'h' modifier.  There might another 'h' following.  */
+@@ -1692,10 +1615,6 @@ vfprintf (FILE *s, const CHAR_T *format, va_list ap)
+       /* The format is correctly handled.  */
+       ++nspecs_done;
+ 
+-      if (__glibc_unlikely (workstart != NULL))
+-	free (workstart);
+-      workstart = NULL;
+-
+       /* Look for next format specifier.  */
+ #ifdef COMPILE_WPRINTF
+       f = __find_specwc ((end_of_spec = ++f));
+@@ -1713,18 +1632,11 @@ vfprintf (FILE *s, const CHAR_T *format, va_list ap)
+ 
+   /* Hand off processing for positional parameters.  */
+ do_positional:
+-  if (__glibc_unlikely (workstart != NULL))
+-    {
+-      free (workstart);
+-      workstart = NULL;
+-    }
+   done = printf_positional (s, format, readonly_format, ap, &ap_save,
+ 			    done, nspecs_done, lead_str_end, work_buffer,
+ 			    save_errno, grouping, thousands_sep);
+ 
+  all_done:
+-  if (__glibc_unlikely (workstart != NULL))
+-    free (workstart);
+   /* Unlock the stream.  */
+   _IO_funlockfile (s);
+   _IO_cleanup_region_end (0);
+@@ -1767,8 +1679,6 @@ printf_positional (FILE *s, const CHAR_T *format, int readonly_format,
+   /* Just a counter.  */
+   size_t cnt;
+ 
+-  CHAR_T *workstart = NULL;
+-
+   if (grouping == (const char *) -1)
+     {
+ #ifdef COMPILE_WPRINTF
+@@ -1957,7 +1867,6 @@ printf_positional (FILE *s, const CHAR_T *format, int readonly_format,
+       char pad = specs[nspecs_done].info.pad;
+       CHAR_T spec = specs[nspecs_done].info.spec;
+ 
+-      workstart = NULL;
+       CHAR_T *workend = work_buffer + WORK_BUFFER_SIZE;
+ 
+       /* Fill in last information.  */
+@@ -1991,27 +1900,6 @@ printf_positional (FILE *s, const CHAR_T *format, int readonly_format,
+ 	  prec = specs[nspecs_done].info.prec;
+ 	}
+ 
+-      /* Maybe the buffer is too small.  */
+-      if (MAX (prec, width) + EXTSIZ > WORK_BUFFER_SIZE)
+-	{
+-	  if (__libc_use_alloca ((MAX (prec, width) + EXTSIZ)
+-				 * sizeof (CHAR_T)))
+-	    workend = ((CHAR_T *) alloca ((MAX (prec, width) + EXTSIZ)
+-					  * sizeof (CHAR_T))
+-		       + (MAX (prec, width) + EXTSIZ));
+-	  else
+-	    {
+-	      workstart = (CHAR_T *) malloc ((MAX (prec, width) + EXTSIZ)
+-					     * sizeof (CHAR_T));
+-	      if (workstart == NULL)
+-		{
+-		  done = -1;
+-		  goto all_done;
+-		}
+-	      workend = workstart + (MAX (prec, width) + EXTSIZ);
+-	    }
+-	}
+-
+       /* Process format specifiers.  */
+       while (1)
+ 	{
+@@ -2085,18 +1973,12 @@ printf_positional (FILE *s, const CHAR_T *format, int readonly_format,
+ 	  break;
+ 	}
+ 
+-      if (__glibc_unlikely (workstart != NULL))
+-	free (workstart);
+-      workstart = NULL;
+-
+       /* Write the following constant string.  */
+       outstring (specs[nspecs_done].end_of_fmt,
+ 		 specs[nspecs_done].next_fmt
+ 		 - specs[nspecs_done].end_of_fmt);
+     }
+  all_done:
+-  if (__glibc_unlikely (workstart != NULL))
+-    free (workstart);
+   scratch_buffer_free (&argsbuf);
+   scratch_buffer_free (&specsbuf);
+   return done;
diff --git a/SOURCES/glibc-rh2122501-4.patch b/SOURCES/glibc-rh2122501-4.patch
new file mode 100644
index 0000000..97436f4
--- /dev/null
+++ b/SOURCES/glibc-rh2122501-4.patch
@@ -0,0 +1,86 @@
+commit 211a30a92b72a18ea4caa35ed503b70bc644923e
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Nov 8 19:11:51 2021 +0000
+
+    Fix memmove call in vfprintf-internal.c:group_number
+    
+    A recent GCC mainline change introduces errors of the form:
+    
+    vfprintf-internal.c: In function 'group_number':
+    vfprintf-internal.c:2093:15: error: 'memmove' specified bound between 9223372036854775808 and 18446744073709551615 exceeds maximum object size 9223372036854775807 [-Werror=stringop-overflow=]
+     2093 |               memmove (w, s, (front_ptr -s) * sizeof (CHAR_T));
+          |               ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    
+    This is a genuine bug in the glibc code: s > front_ptr is always true
+    at this point in the code, and the intent is clearly for the
+    subtraction to be the other way round.  The other arguments to the
+    memmove call here also appear to be wrong; w and s point just *after*
+    the destination and source for copying the rest of the number, so the
+    size needs to be subtracted to get appropriate pointers for the
+    copying.  Adjust the memmove call to conform to the apparent intent of
+    the code, so fixing the -Wstringop-overflow error.
+    
+    Now, if the original code were ever executed, a buffer overrun would
+    result.  However, I believe this code (introduced in commit
+    edc1686af0c0fc2eb535f1d38cdf63c1a5a03675, "vfprintf: Reuse work_buffer
+    in group_number", so in glibc 2.26) is unreachable in prior glibc
+    releases (so there is no need for a bug in Bugzilla, no need to
+    consider any backports unless someone wants to build older glibc
+    releases with GCC 12 and no possibility of this buffer overrun
+    resulting in a security issue).
+    
+    work_buffer is 1000 bytes / 250 wide characters.  This case is only
+    reachable if an initial part of the number, plus a grouped copy of the
+    rest of the number, fail to fit in that space; that is, if the grouped
+    number fails to fit in the space.  In the wide character case,
+    grouping is always one wide character, so even with a locale (of which
+    there aren't any in glibc) grouping every digit, a number would need
+    to occupy at least 125 wide characters to overflow, and a 64-bit
+    integer occupies at most 23 characters in octal including a leading 0.
+    In the narrow character case, the multibyte encoding of the grouping
+    separator would need to be at least 42 bytes to overflow, again
+    supposing grouping every digit, but MB_LEN_MAX is 16.  So even if we
+    admit the case of artificially constructed locales not shipped with
+    glibc, given that such a locale would need to use one of the character
+    sets supported by glibc, this code cannot be reached at present.  (And
+    POSIX only actually specifies the ' flag for grouping for decimal
+    output, though glibc acts on it for other bases as well.)
+    
+    With binary output (if you consider use of grouping there to be
+    valid), you'd need a 15-byte multibyte character for overflow; I don't
+    know if any supported character set has such a character (if, again,
+    we admit constructed locales using grouping every digit and a grouping
+    separator chosen to have a multibyte encoding as long as possible, as
+    well as accepting use of grouping with binary), but given that we have
+    this code at all (clearly it's not *correct*, or in accordance with
+    the principle of avoiding arbitrary limits, to skip grouping on
+    running out of internal space like that), I don't think it should need
+    any further changes for binary printf support to go in.
+    
+    On the other hand, support for large sizes of _BitInt in printf (see
+    the N2858 proposal) *would* require something to be done about such
+    arbitrary limits (presumably using dynamic allocation in printf again,
+    for sufficiently large _BitInt arguments only - currently only
+    floating-point uses dynamic allocation, and, as previously discussed,
+    that could actually be replaced by bounded allocation given smarter
+    code).
+    
+    Tested with build-many-glibcs.py for aarch64-linux-gnu (GCC mainline).
+    Also tested natively for x86_64.
+    
+    (cherry picked from commit db6c4935fae6005d46af413b32aa92f4f6059dce)
+
+diff --git a/stdio-common/vfprintf.c b/stdio-common/vfprintf.c
+index 6b83ba91a12cdcd5..2d434ba45a67911e 100644
+--- a/stdio-common/vfprintf.c
++++ b/stdio-common/vfprintf.c
+@@ -2101,7 +2101,8 @@ group_number (CHAR_T *front_ptr, CHAR_T *w, CHAR_T *rear_ptr,
+ 	    copy_rest:
+ 	      /* No further grouping to be done.  Copy the rest of the
+ 		 number.  */
+-	      memmove (w, s, (front_ptr -s) * sizeof (CHAR_T));
++	      w -= s - front_ptr;
++	      memmove (w, front_ptr, (s - front_ptr) * sizeof (CHAR_T));
+ 	      break;
+ 	    }
+ 	  else if (*grouping != '\0')
diff --git a/SOURCES/glibc-rh2122501-5.patch b/SOURCES/glibc-rh2122501-5.patch
new file mode 100644
index 0000000..f088e5f
--- /dev/null
+++ b/SOURCES/glibc-rh2122501-5.patch
@@ -0,0 +1,81 @@
+commit 8b915921fbf4d32bf68fc3d637413cf96236b3fd
+Author: Andreas Schwab <schwab@suse.de>
+Date:   Mon Aug 29 15:05:40 2022 +0200
+
+    Add test for bug 29530
+    
+    This tests for a bug that was introduced in commit edc1686af0 ("vfprintf:
+    Reuse work_buffer in group_number") and fixed as a side effect of commit
+    6caddd34bd ("Remove most vfprintf width/precision-dependent allocations
+    (bug 14231, bug 26211).").
+    
+    (cherry picked from commit ca6466e8be32369a658035d69542d47603e58a99)
+
+diff --git a/stdio-common/Makefile b/stdio-common/Makefile
+index d76b47bd5f932f69..ac61093660ef9063 100644
+--- a/stdio-common/Makefile
++++ b/stdio-common/Makefile
+@@ -64,7 +64,9 @@ tests := tstscanf test_rdwr test-popen tstgetln test-fseek \
+ 	 tst-scanf-round \
+ 	 tst-renameat2 \
+ 	 tst-printf-bz25691 \
+-	 tst-vfprintf-width-prec-alloc
++	 tst-vfprintf-width-prec-alloc \
++	 tst-grouping2 \
++  # tests
+ 
+ test-srcs = tst-unbputc tst-printf tst-printfsz-islongdouble
+ 
+@@ -91,6 +93,7 @@ $(objpfx)bug14.out: $(gen-locales)
+ $(objpfx)scanf13.out: $(gen-locales)
+ $(objpfx)test-vfprintf.out: $(gen-locales)
+ $(objpfx)tst-grouping.out: $(gen-locales)
++$(objpfx)tst-grouping2.out: $(gen-locales)
+ $(objpfx)tst-sprintf.out: $(gen-locales)
+ $(objpfx)tst-sscanf.out: $(gen-locales)
+ $(objpfx)tst-swprintf.out: $(gen-locales)
+diff --git a/stdio-common/tst-grouping2.c b/stdio-common/tst-grouping2.c
+new file mode 100644
+index 0000000000000000..3024c942a60e51bf
+--- /dev/null
++++ b/stdio-common/tst-grouping2.c
+@@ -0,0 +1,39 @@
++/* Test printf with grouping and large width (bug 29530)
++   Copyright (C) 2022 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++#include <locale.h>
++#include <stdio.h>
++#include <support/check.h>
++#include <support/support.h>
++
++static int
++do_test (void)
++{
++  const int field_width = 1000;
++  char buf[field_width + 1];
++
++  xsetlocale (LC_NUMERIC, "de_DE.UTF-8");
++
++  /* This used to crash in group_number.  */
++  TEST_COMPARE (sprintf (buf, "%'*d", field_width, 1000), field_width);
++  TEST_COMPARE_STRING (buf + field_width - 6, " 1.000");
++
++  return 0;
++}
++
++#include <support/test-driver.c>
diff --git a/SOURCES/glibc-rh2125222.patch b/SOURCES/glibc-rh2125222.patch
new file mode 100644
index 0000000..7ee14f4
--- /dev/null
+++ b/SOURCES/glibc-rh2125222.patch
@@ -0,0 +1,54 @@
+commit a23820f6052a740246fdc7dcd9c43ce8eed0c45a
+Author: Javier Pello <devel@otheo.eu>
+Date:   Mon Sep 5 20:09:01 2022 +0200
+
+    elf: Fix hwcaps string size overestimation
+    
+    Commit dad90d528259b669342757c37dedefa8577e2636 added glibc-hwcaps
+    support for LD_LIBRARY_PATH and, for this, it adjusted the total
+    string size required in _dl_important_hwcaps. However, in doing so
+    it inadvertently altered the calculation of the size required for
+    the power set strings, as the computation of the power set string
+    size depended on the first value assigned to the total variable,
+    which is later shifted, resulting in overallocation of string
+    space. Fix this now by using a different variable to hold the
+    string size required for glibc-hwcaps.
+    
+    Signed-off-by: Javier Pello <devel@otheo.eu>
+
+diff --git a/elf/dl-hwcaps.c b/elf/dl-hwcaps.c
+index 2fc4ae67a0f5d051..7ac27fd689187edc 100644
+--- a/elf/dl-hwcaps.c
++++ b/elf/dl-hwcaps.c
+@@ -193,7 +193,7 @@ _dl_important_hwcaps (const char *glibc_hwcaps_prepend,
+   /* Each hwcaps subdirectory has a GLIBC_HWCAPS_PREFIX string prefix
+      and a "/" suffix once stored in the result.  */
+   hwcaps_counts.maximum_length += strlen (GLIBC_HWCAPS_PREFIX) + 1;
+-  size_t total = (hwcaps_counts.count * (strlen (GLIBC_HWCAPS_PREFIX) + 1)
++  size_t hwcaps_sz = (hwcaps_counts.count * (strlen (GLIBC_HWCAPS_PREFIX) + 1)
+ 		  + hwcaps_counts.total_length);
+ 
+   /* Count the number of bits set in the masked value.  */
+@@ -229,11 +229,12 @@ _dl_important_hwcaps (const char *glibc_hwcaps_prepend,
+   assert (m == cnt);
+ 
+   /* Determine the total size of all strings together.  */
++  size_t total;
+   if (cnt == 1)
+-    total += temp[0].len + 1;
++    total = temp[0].len + 1;
+   else
+     {
+-      total += temp[0].len + temp[cnt - 1].len + 2;
++      total = temp[0].len + temp[cnt - 1].len + 2;
+       if (cnt > 2)
+ 	{
+ 	  total <<= 1;
+@@ -255,6 +256,7 @@ _dl_important_hwcaps (const char *glibc_hwcaps_prepend,
+   /* This is the overall result, including both glibc-hwcaps
+      subdirectories and the legacy hwcaps subdirectories using the
+      power set construction.  */
++  total += hwcaps_sz;
+   struct r_strlenpair *overall_result
+     = malloc (*sz * sizeof (*result) + total);
+   if (overall_result == NULL)
diff --git a/SOURCES/glibc-rh2139875-1.patch b/SOURCES/glibc-rh2139875-1.patch
new file mode 100644
index 0000000..32091ab
--- /dev/null
+++ b/SOURCES/glibc-rh2139875-1.patch
@@ -0,0 +1,32 @@
+commit acb55dcb892d4321ada6fd9b663b28fada432682
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Wed Jan 2 18:35:50 2019 +0000
+
+    Update Linux kernel version in tst-mman-consts.py.
+    
+    This patch updates the Linux kernel version in tst-mman-consts.py to
+    4.20 (meaning that's the version for which glibc is expected to have
+    the same constants as the kernel, up to the exceptions listed in the
+    test).  (Once we have more such tests sharing common infrastructure, I
+    expect the kernel version will be something set in the infrastructure
+    shared by all such tests, rather than something needing updating
+    separately for each test for each new kernel version.)
+    
+    Tested with build-many-glibcs.py.
+    
+            * sysdeps/unix/sysv/linux/tst-mman-consts.py (main): Expect
+            constants to match with Linux 4.20.
+
+diff --git a/sysdeps/unix/sysv/linux/tst-mman-consts.py b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+index 1a613beec0da16fb..4a2ddd49c4c7282b 100644
+--- a/sysdeps/unix/sysv/linux/tst-mman-consts.py
++++ b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+@@ -41,7 +41,7 @@ def main():
+                         help='C compiler (including options) to use')
+     args = parser.parse_args()
+     linux_version_headers = linux_kernel_version(args.cc)
+-    linux_version_glibc = (4, 19)
++    linux_version_glibc = (4, 20)
+     sys.exit(glibcextract.compare_macro_consts(
+         '#define _GNU_SOURCE 1\n'
+         '#include <sys/mman.h>\n',
diff --git a/SOURCES/glibc-rh2139875-2.patch b/SOURCES/glibc-rh2139875-2.patch
new file mode 100644
index 0000000..1c3ac5b
--- /dev/null
+++ b/SOURCES/glibc-rh2139875-2.patch
@@ -0,0 +1,31 @@
+commit c7a26cba2ab949216ac9ef245ca78696815ea4c4
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Fri Aug 2 11:36:07 2019 +0000
+
+    Update Linux kernel version number in tst-mman-consts.py to 5.2.
+    
+    The tst-mman-consts.py test includes a kernel version number, to avoid
+    failures because of newly added constants in the kernel (if kernel
+    headers are newer than this version of glibc) or missing constants in
+    the kernel (if kernel headers are older than this version of glibc).
+    This patch updates it to 5.2 to reflect that the MAP_* constants in
+    glibc are still current as of that kernel version.
+    
+    Tested with build-many-glibcs.py.
+    
+            * sysdeps/unix/sysv/linux/tst-mman-consts.py (main): Update Linux
+            kernel version number to 5.2.
+
+diff --git a/sysdeps/unix/sysv/linux/tst-mman-consts.py b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+index 4a2ddd49c4c7282b..9e326b1f31799a72 100644
+--- a/sysdeps/unix/sysv/linux/tst-mman-consts.py
++++ b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+@@ -41,7 +41,7 @@ def main():
+                         help='C compiler (including options) to use')
+     args = parser.parse_args()
+     linux_version_headers = linux_kernel_version(args.cc)
+-    linux_version_glibc = (4, 20)
++    linux_version_glibc = (5, 2)
+     sys.exit(glibcextract.compare_macro_consts(
+         '#define _GNU_SOURCE 1\n'
+         '#include <sys/mman.h>\n',
diff --git a/SOURCES/glibc-rh2139875-3.patch b/SOURCES/glibc-rh2139875-3.patch
new file mode 100644
index 0000000..6c48115
--- /dev/null
+++ b/SOURCES/glibc-rh2139875-3.patch
@@ -0,0 +1,61 @@
+commit 71bdf29ac1de04efcce96bc5ce50af3263851ac7
+Author: Joseph Myers <joseph@codesourcery.com>
+Date:   Mon Sep 30 15:49:25 2019 +0000
+
+    Update bits/mman.h constants and tst-mman-consts.py for Linux 5.3.
+    
+    The Linux 5.3 uapi headers have some rearrangement relating to MAP_*
+    constants, which includes the effect of adding definitions of MAP_SYNC
+    on powerpc and sparc.  This patch updates the corresponding glibc
+    bits/mman.h headers accordingly, and updates the Linux kernel version
+    number in tst-mman-consts.py to reflect that these constants are now
+    current with that kernel version.
+    
+    Tested with build-many-glibcs.py.
+    
+            * sysdeps/unix/sysv/linux/powerpc/bits/mman.h [__USE_MISC]
+            (MAP_SYNC): New macro.
+            * sysdeps/unix/sysv/linux/sparc/bits/mman.h [__USE_MISC]
+            (MAP_SYNC): Likewise.
+            * sysdeps/unix/sysv/linux/tst-mman-consts.py (main): Update Linux
+            kernel version number to 5.3.
+
+diff --git a/sysdeps/unix/sysv/linux/powerpc/bits/mman.h b/sysdeps/unix/sysv/linux/powerpc/bits/mman.h
+index e652467c8c091381..0e7fa647793ed585 100644
+--- a/sysdeps/unix/sysv/linux/powerpc/bits/mman.h
++++ b/sysdeps/unix/sysv/linux/powerpc/bits/mman.h
+@@ -36,6 +36,8 @@
+ # define MAP_NONBLOCK	0x10000		/* Do not block on IO.  */
+ # define MAP_STACK	0x20000		/* Allocation is for a stack.  */
+ # define MAP_HUGETLB	0x40000		/* Create huge page mapping.  */
++# define MAP_SYNC	0x80000		/* Perform synchronous page
++					   faults for the mapping.  */
+ # define MAP_FIXED_NOREPLACE 0x100000	/* MAP_FIXED but do not unmap
+ 					   underlying mapping.  */
+ #endif
+diff --git a/sysdeps/unix/sysv/linux/sparc/bits/mman.h b/sysdeps/unix/sysv/linux/sparc/bits/mman.h
+index 3a3ffb994631e2b6..03f6f732bb5efbe2 100644
+--- a/sysdeps/unix/sysv/linux/sparc/bits/mman.h
++++ b/sysdeps/unix/sysv/linux/sparc/bits/mman.h
+@@ -36,6 +36,8 @@
+ # define MAP_NONBLOCK	0x10000		/* Do not block on IO.  */
+ # define MAP_STACK	0x20000		/* Allocation is for a stack.  */
+ # define MAP_HUGETLB	0x40000		/* Create huge page mapping.  */
++# define MAP_SYNC	0x80000		/* Perform synchronous page
++					   faults for the mapping.  */
+ # define MAP_FIXED_NOREPLACE 0x100000	/* MAP_FIXED but do not unmap
+ 					   underlying mapping.  */
+ #endif
+diff --git a/sysdeps/unix/sysv/linux/tst-mman-consts.py b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+index 9e326b1f31799a72..42914e4e0ba84712 100644
+--- a/sysdeps/unix/sysv/linux/tst-mman-consts.py
++++ b/sysdeps/unix/sysv/linux/tst-mman-consts.py
+@@ -41,7 +41,7 @@ def main():
+                         help='C compiler (including options) to use')
+     args = parser.parse_args()
+     linux_version_headers = linux_kernel_version(args.cc)
+-    linux_version_glibc = (5, 2)
++    linux_version_glibc = (5, 3)
+     sys.exit(glibcextract.compare_macro_consts(
+         '#define _GNU_SOURCE 1\n'
+         '#include <sys/mman.h>\n',
diff --git a/SOURCES/glibc-rh2141989.patch b/SOURCES/glibc-rh2141989.patch
new file mode 100644
index 0000000..70ab8e4
--- /dev/null
+++ b/SOURCES/glibc-rh2141989.patch
@@ -0,0 +1,101 @@
+This change is equivalent to this upstream change:
+
+commit 22a46dee24351fd5f4f188ad80554cad79c82524
+Author: Florian Weimer <fweimer@redhat.com>
+Date:   Tue Nov 8 14:15:02 2022 +0100
+
+    Linux: Support __IPC_64 in sysvctl *ctl command arguments (bug 29771)
+
+    Old applications pass __IPC_64 as part of the command argument because
+    old glibc did not check for unknown commands, and passed through the
+    arguments directly to the kernel, without adding __IPC_64.
+    Applications need to continue doing that for old glibc compatibility,
+    so this commit enables this approach in current glibc.
+
+    For msgctl and shmctl, if no translation is required, make
+    direct system calls, as we did before the time64 changes.  If
+    translation is required, mask __IPC_64 from the command argument.
+
+    For semctl, the union-in-vararg argument handling means that
+    translation is needed on all architectures.
+
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+The downstream versions of shmctl and msgctl did not produce
+errors because they lacked a -1 error return path.  There is no
+translation requirement downstream on any architecture, so we
+can remove the switch from shmctl and msgctl.
+
+For semctl, we have to do the varargs translation, so this patch adds
+the same masking as the upstream commit.
+
+diff --git a/sysdeps/unix/sysv/linux/msgctl.c b/sysdeps/unix/sysv/linux/msgctl.c
+index 3362f4562f58f28b..7280cba31a8815a2 100644
+--- a/sysdeps/unix/sysv/linux/msgctl.c
++++ b/sysdeps/unix/sysv/linux/msgctl.c
+@@ -29,20 +29,6 @@
+ int
+ __new_msgctl (int msqid, int cmd, struct msqid_ds *buf)
+ {
+-  switch (cmd)
+-    {
+-    case IPC_RMID:
+-    case IPC_SET:
+-    case IPC_STAT:
+-    case MSG_STAT:
+-    case MSG_STAT_ANY:
+-    case IPC_INFO:
+-    case MSG_INFO:
+-      break;
+-    default:
+-      __set_errno (EINVAL);
+-      return -1;
+-    }
+ #ifdef __ASSUME_DIRECT_SYSVIPC_SYSCALLS
+   return INLINE_SYSCALL_CALL (msgctl, msqid, cmd | __IPC_64, buf);
+ #else
+diff --git a/sysdeps/unix/sysv/linux/semctl.c b/sysdeps/unix/sysv/linux/semctl.c
+index 03c56c69a5412c82..16d3f04fadd039ab 100644
+--- a/sysdeps/unix/sysv/linux/semctl.c
++++ b/sysdeps/unix/sysv/linux/semctl.c
+@@ -42,6 +42,13 @@ __new_semctl (int semid, int semnum, int cmd, ...)
+   union semun arg = { 0 };
+   va_list ap;
+ 
++  /* Some applications pass the __IPC_64 flag in cmd, to invoke
++     previously unsupported commands back when there was no EINVAL
++     error checking in glibc.  Mask the flag for the switch statements
++     below.  msgctl_syscall adds back the __IPC_64 flag for the actual
++     system call.  */
++  cmd &= ~__IPC_64;
++
+   /* Get the argument only if required.  */
+   switch (cmd)
+     {
+diff --git a/sysdeps/unix/sysv/linux/shmctl.c b/sysdeps/unix/sysv/linux/shmctl.c
+index 00768bc47614f9aa..25c5152944a6fcf3 100644
+--- a/sysdeps/unix/sysv/linux/shmctl.c
++++ b/sysdeps/unix/sysv/linux/shmctl.c
+@@ -33,22 +33,6 @@
+ int
+ __new_shmctl (int shmid, int cmd, struct shmid_ds *buf)
+ {
+-  switch (cmd)
+-    {
+-    case IPC_RMID:
+-    case SHM_LOCK:
+-    case SHM_UNLOCK:
+-    case IPC_SET:
+-    case IPC_STAT:
+-    case SHM_STAT:
+-    case SHM_STAT_ANY:
+-    case IPC_INFO:
+-    case SHM_INFO:
+-      break;
+-    default:
+-      __set_errno (EINVAL);
+-      break;
+-    }
+ #ifdef __ASSUME_DIRECT_SYSVIPC_SYSCALLS
+   return INLINE_SYSCALL_CALL (shmctl, shmid, cmd | __IPC_64, buf);
+ #else
diff --git a/SOURCES/glibc-rh2142937-1.patch b/SOURCES/glibc-rh2142937-1.patch
new file mode 100644
index 0000000..c9279a7
--- /dev/null
+++ b/SOURCES/glibc-rh2142937-1.patch
@@ -0,0 +1,354 @@
+commit 2fe64148a81f0d78050c302f34a6853d21f7cae4
+Author: DJ Delorie <dj@redhat.com>
+Date:   Mon Mar 28 23:53:33 2022 -0400
+
+    Allow for unpriviledged nested containers
+    
+    If the build itself is run in a container, we may not be able to
+    fully set up a nested container for test-container testing.
+    Notably is the mounting of /proc, since it's critical that it
+    be mounted from within the same PID namespace as its users, and
+    thus cannot be bind mounted from outside the container like other
+    mounts.
+    
+    This patch defaults to using the parent's PID namespace instead of
+    creating a new one, as this is more likely to be allowed.
+    
+    If the test needs an isolated PID namespace, it should add the "pidns"
+    command to its init script.
+    
+    Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+
+Conflicts:
+	nss/tst-reload2.c
+          (not in RHEL-8)
+	support/Makefile
+          (RHEL-8 missing some routines in libsupport-routines)
+
+diff --git a/elf/tst-pldd.c b/elf/tst-pldd.c
+index f381cb0fa7e6b93d..45ac033a0f897088 100644
+--- a/elf/tst-pldd.c
++++ b/elf/tst-pldd.c
+@@ -85,6 +85,8 @@ in_str_list (const char *libname, const char *const strlist[])
+ static int
+ do_test (void)
+ {
++  support_need_proc ("needs /proc/sys/kernel/yama/ptrace_scope and /proc/$child");
++
+   /* Check if our subprocess can be debugged with ptrace.  */
+   {
+     int ptrace_scope = support_ptrace_scope ();
+diff --git a/nptl/tst-pthread-getattr.c b/nptl/tst-pthread-getattr.c
+index 273b6073abe9cb60..f1c0b39f3a27724c 100644
+--- a/nptl/tst-pthread-getattr.c
++++ b/nptl/tst-pthread-getattr.c
+@@ -28,6 +28,8 @@
+ #include <unistd.h>
+ #include <inttypes.h>
+ 
++#include <support/support.h>
++
+ /* There is an obscure bug in the kernel due to which RLIMIT_STACK is sometimes
+    returned as unlimited when it is not, which may cause this test to fail.
+    There is also the other case where RLIMIT_STACK is intentionally set as
+@@ -152,6 +154,8 @@ check_stack_top (void)
+ static int
+ do_test (void)
+ {
++  support_need_proc ("Reads /proc/self/maps to get stack size.");
++
+   pagesize = sysconf (_SC_PAGESIZE);
+   return check_stack_top ();
+ }
+diff --git a/support/Makefile b/support/Makefile
+index 636d69c4f8e7e139..e184fccbe7d2310c 100644
+--- a/support/Makefile
++++ b/support/Makefile
+@@ -59,6 +59,7 @@ libsupport-routines = \
+   support_format_hostent \
+   support_format_netent \
+   support_isolate_in_subprocess \
++  support_need_proc \
+   support_process_state \
+   support_ptrace \
+   support_openpty \
+diff --git a/support/support.h b/support/support.h
+index 96833bd4e992e6d3..1466eb29f840fa59 100644
+--- a/support/support.h
++++ b/support/support.h
+@@ -81,6 +81,11 @@ char *support_quote_string (const char *);
+    regular file open for writing, and initially empty.  */
+ int support_descriptor_supports_holes (int fd);
+ 
++/* Predicates that a test requires a working /proc filesystem.  This
++   call will exit with UNSUPPORTED if /proc is not available, printing
++   WHY_MSG as part of the diagnostic.  */
++void support_need_proc (const char *why_msg);
++
+ /* Error-checking wrapper functions which terminate the process on
+    error.  */
+ 
+diff --git a/support/support_need_proc.c b/support/support_need_proc.c
+new file mode 100644
+index 0000000000000000..9b4eab7539b2d6c3
+--- /dev/null
++++ b/support/support_need_proc.c
+@@ -0,0 +1,35 @@
++/* Indicate that a test requires a working /proc.
++   Copyright (C) 2022 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, see
++   <https://www.gnu.org/licenses/>.  */
++
++#include <unistd.h>
++#include <support/check.h>
++#include <support/support.h>
++
++/* We test for /proc/self/maps since that's one of the files that one
++   of our tests actually uses, but the general idea is if Linux's
++   /proc/ (procfs) filesystem is mounted.  If not, the process exits
++   with an UNSUPPORTED result code.  */
++
++void
++support_need_proc (const char *why_msg)
++{
++#ifdef __linux__
++  if (access ("/proc/self/maps", R_OK))
++    FAIL_UNSUPPORTED ("/proc is not available, %s", why_msg);
++#endif
++}
+diff --git a/support/test-container.c b/support/test-container.c
+index 9975c8cb7bc9a955..2bce4db841ff7668 100644
+--- a/support/test-container.c
++++ b/support/test-container.c
+@@ -95,6 +95,7 @@ int verbose = 0;
+    * mytest.root/mytest.script has a list of "commands" to run:
+        syntax:
+          # comment
++	 pidns <comment>
+          su
+          mv FILE FILE
+ 	 cp FILE FILE
+@@ -120,6 +121,8 @@ int verbose = 0;
+ 
+        details:
+          - '#': A comment.
++	 - 'pidns': Require a separate PID namespace, prints comment if it can't
++	    (default is a shared pid namespace)
+          - 'su': Enables running test as root in the container.
+          - 'mv': A minimal move files command.
+          - 'cp': A minimal copy files command.
+@@ -143,7 +146,7 @@ int verbose = 0;
+    * Simple, easy to review code (i.e. prefer simple naive code over
+      complex efficient code)
+ 
+-   * The current implementation ist parallel-make-safe, but only in
++   * The current implementation is parallel-make-safe, but only in
+      that it uses a lock to prevent parallel access to the testroot.  */
+ 
+ 
+@@ -222,11 +225,37 @@ concat (const char *str, ...)
+   return bufs[n];
+ }
+ 
++/* Like the above, but put spaces between words.  Caller frees.  */
++static char *
++concat_words (char **words, int num_words)
++{
++  int len = 0;
++  int i;
++  char *rv, *p;
++
++  for (i = 0; i < num_words; i ++)
++    {
++      len += strlen (words[i]);
++      len ++;
++    }
++
++  p = rv = (char *) xmalloc (len);
++
++  for (i = 0; i < num_words; i ++)
++    {
++      if (i > 0)
++	p = stpcpy (p, " ");
++      p = stpcpy (p, words[i]);
++    }
++
++  return rv;
++}
++
+ /* Try to mount SRC onto DEST.  */
+ static void
+ trymount (const char *src, const char *dest)
+ {
+-  if (mount (src, dest, "", MS_BIND, NULL) < 0)
++  if (mount (src, dest, "", MS_BIND | MS_REC, NULL) < 0)
+     FAIL_EXIT1 ("can't mount %s onto %s\n", src, dest);
+ }
+ 
+@@ -709,6 +738,9 @@ main (int argc, char **argv)
+   gid_t original_gid;
+   /* If set, the test runs as root instead of the user running the testsuite.  */
+   int be_su = 0;
++  int require_pidns = 0;
++  const char *pidns_comment = NULL;
++  int do_proc_mounts = 0;
+   int UMAP;
+   int GMAP;
+   /* Used for "%lld %lld 1" so need not be large.  */
+@@ -991,6 +1023,12 @@ main (int argc, char **argv)
+ 	      {
+ 		be_su = 1;
+ 	      }
++	    else if (nt >= 1 && strcmp (the_words[0], "pidns") == 0)
++	      {
++		require_pidns = 1;
++		if (nt > 1)
++		  pidns_comment = concat_words (the_words + 1, nt - 1);
++	      }
+ 	    else if (nt == 3 && strcmp (the_words[0], "mkdirp") == 0)
+ 	      {
+ 		long int m;
+@@ -1048,7 +1086,8 @@ main (int argc, char **argv)
+ 
+ #ifdef CLONE_NEWNS
+   /* The unshare here gives us our own spaces and capabilities.  */
+-  if (unshare (CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS) < 0)
++  if (unshare (CLONE_NEWUSER | CLONE_NEWNS
++	       | (require_pidns ? CLONE_NEWPID : 0)) < 0)
+     {
+       /* Older kernels may not support all the options, or security
+ 	 policy may block this call.  */
+@@ -1059,6 +1098,11 @@ main (int argc, char **argv)
+ 	    check_for_unshare_hints ();
+ 	  FAIL_UNSUPPORTED ("unable to unshare user/fs: %s", strerror (saved_errno));
+ 	}
++      /* We're about to exit anyway, it's "safe" to call unshare again
++	 just to see if the CLONE_NEWPID caused the error.  */
++      else if (require_pidns && unshare (CLONE_NEWUSER | CLONE_NEWNS) >= 0)
++	FAIL_EXIT1 ("unable to unshare pid ns: %s : %s", strerror (errno),
++		    pidns_comment ? pidns_comment : "required by test");
+       else
+ 	FAIL_EXIT1 ("unable to unshare user/fs: %s", strerror (errno));
+     }
+@@ -1074,6 +1118,15 @@ main (int argc, char **argv)
+   trymount (support_srcdir_root, new_srcdir_path);
+   trymount (support_objdir_root, new_objdir_path);
+ 
++  /* It may not be possible to mount /proc directly.  */
++  if (! require_pidns)
++  {
++    char *new_proc = concat (new_root_path, "/proc", NULL);
++    xmkdirp (new_proc, 0755);
++    trymount ("/proc", new_proc);
++    do_proc_mounts = 1;
++  }
++
+   xmkdirp (concat (new_root_path, "/dev", NULL), 0755);
+   devmount (new_root_path, "null");
+   devmount (new_root_path, "zero");
+@@ -1136,42 +1189,60 @@ main (int argc, char **argv)
+ 
+   maybe_xmkdir ("/tmp", 0755);
+ 
+-  /* Now that we're pid 1 (effectively "root") we can mount /proc  */
+-  maybe_xmkdir ("/proc", 0777);
+-  if (mount ("proc", "/proc", "proc", 0, NULL) < 0)
+-    FAIL_EXIT1 ("Unable to mount /proc: ");
+-
+-  /* We map our original UID to the same UID in the container so we
+-     can own our own files normally.  */
+-  UMAP = open ("/proc/self/uid_map", O_WRONLY);
+-  if (UMAP < 0)
+-    FAIL_EXIT1 ("can't write to /proc/self/uid_map\n");
+-
+-  sprintf (tmp, "%lld %lld 1\n",
+-	   (long long) (be_su ? 0 : original_uid), (long long) original_uid);
+-  write (UMAP, tmp, strlen (tmp));
+-  xclose (UMAP);
+-
+-  /* We must disable setgroups () before we can map our groups, else we
+-     get EPERM.  */
+-  GMAP = open ("/proc/self/setgroups", O_WRONLY);
+-  if (GMAP >= 0)
++  if (require_pidns)
+     {
+-      /* We support kernels old enough to not have this.  */
+-      write (GMAP, "deny\n", 5);
+-      xclose (GMAP);
++      /* Now that we're pid 1 (effectively "root") we can mount /proc  */
++      maybe_xmkdir ("/proc", 0777);
++      if (mount ("proc", "/proc", "proc", 0, NULL) != 0)
++	{
++	  /* This happens if we're trying to create a nested container,
++	     like if the build is running under podman, and we lack
++	     priviledges.
++
++	     Ideally we would WARN here, but that would just add noise to
++	     *every* test-container test, and the ones that care should
++	     have their own relevent diagnostics.
++
++	     FAIL_EXIT1 ("Unable to mount /proc: ");  */
++	}
++      else
++	do_proc_mounts = 1;
+     }
+ 
+-  /* We map our original GID to the same GID in the container so we
+-     can own our own files normally.  */
+-  GMAP = open ("/proc/self/gid_map", O_WRONLY);
+-  if (GMAP < 0)
+-    FAIL_EXIT1 ("can't write to /proc/self/gid_map\n");
++  if (do_proc_mounts)
++    {
++      /* We map our original UID to the same UID in the container so we
++	 can own our own files normally.  */
++      UMAP = open ("/proc/self/uid_map", O_WRONLY);
++      if (UMAP < 0)
++	FAIL_EXIT1 ("can't write to /proc/self/uid_map\n");
++
++      sprintf (tmp, "%lld %lld 1\n",
++	       (long long) (be_su ? 0 : original_uid), (long long) original_uid);
++      write (UMAP, tmp, strlen (tmp));
++      xclose (UMAP);
++
++      /* We must disable setgroups () before we can map our groups, else we
++	 get EPERM.  */
++      GMAP = open ("/proc/self/setgroups", O_WRONLY);
++      if (GMAP >= 0)
++	{
++	  /* We support kernels old enough to not have this.  */
++	  write (GMAP, "deny\n", 5);
++	  xclose (GMAP);
++	}
+ 
+-  sprintf (tmp, "%lld %lld 1\n",
+-	   (long long) (be_su ? 0 : original_gid), (long long) original_gid);
+-  write (GMAP, tmp, strlen (tmp));
+-  xclose (GMAP);
++      /* We map our original GID to the same GID in the container so we
++	 can own our own files normally.  */
++      GMAP = open ("/proc/self/gid_map", O_WRONLY);
++      if (GMAP < 0)
++	FAIL_EXIT1 ("can't write to /proc/self/gid_map\n");
++
++      sprintf (tmp, "%lld %lld 1\n",
++	       (long long) (be_su ? 0 : original_gid), (long long) original_gid);
++      write (GMAP, tmp, strlen (tmp));
++      xclose (GMAP);
++    }
+ 
+   if (change_cwd)
+     {
diff --git a/SOURCES/glibc-rh2142937-2.patch b/SOURCES/glibc-rh2142937-2.patch
new file mode 100644
index 0000000..e9102b6
--- /dev/null
+++ b/SOURCES/glibc-rh2142937-2.patch
@@ -0,0 +1,24 @@
+commit b2cd93fce666fdc8c9a5c64af2741a8a6940ac99
+Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date:   Fri Mar 25 11:16:49 2022 -0300
+
+    elf: Fix wrong fscanf usage on tst-pldd
+    
+    To take in consideration the extra '\0'.
+    
+    Checked on x86_64-linux-gnu.
+
+diff --git a/elf/tst-pldd.c b/elf/tst-pldd.c
+index 45ac033a0f897088..ab89798e250fdccc 100644
+--- a/elf/tst-pldd.c
++++ b/elf/tst-pldd.c
+@@ -115,7 +115,8 @@ do_test (void)
+     TEST_VERIFY (out != NULL);
+ 
+     /* First line is in the form of <pid>: <full path of executable>  */
+-    TEST_COMPARE (fscanf (out, "%u: " STRINPUT (512), &pid, buffer), 2);
++    TEST_COMPARE (fscanf (out, "%u: " STRINPUT (sizeof (buffer) - 1), &pid,
++			  buffer), 2);
+ 
+     TEST_COMPARE (pid, *target_pid_ptr);
+     TEST_COMPARE (strcmp (basename (buffer), "tst-pldd"), 0);
diff --git a/SOURCES/glibc-rh2142937-3.patch b/SOURCES/glibc-rh2142937-3.patch
new file mode 100644
index 0000000..a61dfde
--- /dev/null
+++ b/SOURCES/glibc-rh2142937-3.patch
@@ -0,0 +1,37 @@
+commit c353689e49e72f3aafa1a9e68d4f7a4f33a79cbe
+Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Date:   Tue Jul 5 12:58:40 2022 -0300
+
+    elf: Fix wrong fscanf usage on tst-pldd
+    
+    The fix done b2cd93fce666fdc8c9a5c64af2741a8a6940ac99 does not really
+    work since macro strification does not expand the sizeof nor the
+    arithmetic operation.
+    
+    Checked on x86_64-linux-gnu.
+
+diff --git a/elf/tst-pldd.c b/elf/tst-pldd.c
+index ab89798e250fdccc..52c0a75be5a808d1 100644
+--- a/elf/tst-pldd.c
++++ b/elf/tst-pldd.c
+@@ -108,15 +108,16 @@ do_test (void)
+      loader and libc.  */
+   {
+     pid_t pid;
+-    char buffer[512];
+-#define STRINPUT(size) "%" # size "s"
++#define BUFFERLEN 511
++    char buffer[BUFFERLEN + 1];
++#define STRINPUT(size)  XSTRINPUT(size)
++#define XSTRINPUT(size) "%" # size "s"
+ 
+     FILE *out = fmemopen (pldd.out.buffer, pldd.out.length, "r");
+     TEST_VERIFY (out != NULL);
+ 
+     /* First line is in the form of <pid>: <full path of executable>  */
+-    TEST_COMPARE (fscanf (out, "%u: " STRINPUT (sizeof (buffer) - 1), &pid,
+-			  buffer), 2);
++    TEST_COMPARE (fscanf (out, "%u: " STRINPUT (BUFFERLEN), &pid, buffer), 2);
+ 
+     TEST_COMPARE (pid, *target_pid_ptr);
+     TEST_COMPARE (strcmp (basename (buffer), "tst-pldd"), 0);
diff --git a/SOURCES/glibc-rh2144568.patch b/SOURCES/glibc-rh2144568.patch
new file mode 100644
index 0000000..82f86ad
--- /dev/null
+++ b/SOURCES/glibc-rh2144568.patch
@@ -0,0 +1,45 @@
+commit eb4181e9f4a512de37dad4ba623c921671584dea
+Author: Vladislav Khmelevsky <och95@yandex.ru>
+Date:   Thu Nov 17 12:47:29 2022 +0400
+
+    elf: Fix rtld-audit trampoline for aarch64
+    
+    This patch fixes two problems with audit:
+    
+      1. The DL_OFFSET_RV_VPCS offset was mixed up with DL_OFFSET_RG_VPCS,
+         resulting in x2 register value nulling in RG structure.
+    
+      2. We need to preserve the x8 register before function call, but
+         don't have to save it's new value and restore it before return.
+    
+    Anyway the final restore was using OFFSET_RV instead of OFFSET_RG value
+    which is wrong (althoug doesn't affect anything).
+    
+    Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+diff --git a/sysdeps/aarch64/dl-trampoline.S b/sysdeps/aarch64/dl-trampoline.S
+index a83e7fc5f97047e2..b4b9c86224785a2c 100644
+--- a/sysdeps/aarch64/dl-trampoline.S
++++ b/sysdeps/aarch64/dl-trampoline.S
+@@ -282,12 +282,11 @@ _dl_runtime_profile:
+ 	stp	x2, x3, [x29, #OFFSET_RV + DL_OFFSET_RV_X0 + 16*1]
+ 	stp	x4, x5, [x29, #OFFSET_RV + DL_OFFSET_RV_X0 + 16*2]
+ 	stp	x6, x7, [x29, #OFFSET_RV + DL_OFFSET_RV_X0 + 16*3]
+-	str	x8,     [x29, #OFFSET_RG + DL_OFFSET_RG_X0 + 16*4]
+ 	stp	q0, q1, [x29, #OFFSET_RV + DL_OFFSET_RV_V0 + 32*0]
+ 	stp	q2, q3, [x29, #OFFSET_RV + DL_OFFSET_RV_V0 + 32*1]
+ 	stp	q4, q5, [x29, #OFFSET_RV + DL_OFFSET_RV_V0 + 32*2]
+ 	stp	q6, q7, [x29, #OFFSET_RV + DL_OFFSET_RV_V0 + 32*3]
+-	str	xzr,    [X29, #OFFSET_RV + DL_OFFSET_RG_VPCS]
++	str	xzr,    [X29, #OFFSET_RV + DL_OFFSET_RV_VPCS]
+ 
+ 	/* Setup call to pltexit  */
+ 	ldp	x0, x1, [x29, #OFFSET_SAVED_CALL_X0]
+@@ -299,7 +298,6 @@ _dl_runtime_profile:
+ 	ldp	x2, x3, [x29, #OFFSET_RV + DL_OFFSET_RV_X0 + 16*1]
+ 	ldp	x4, x5, [x29, #OFFSET_RV + DL_OFFSET_RV_X0 + 16*2]
+ 	ldp	x6, x7, [x29, #OFFSET_RV + DL_OFFSET_RV_X0 + 16*3]
+-	ldr	x8,     [x29, #OFFSET_RV + DL_OFFSET_RV_X0 + 16*4]
+ 	ldp	q0, q1, [x29, #OFFSET_RV + DL_OFFSET_RV_V0 + 32*0]
+ 	ldp	q2, q3, [x29, #OFFSET_RV + DL_OFFSET_RV_V0 + 32*1]
+ 	ldp	q4, q5, [x29, #OFFSET_RV + DL_OFFSET_RV_V0 + 32*2]
diff --git a/SOURCES/glibc-rh2154914-1.patch b/SOURCES/glibc-rh2154914-1.patch
new file mode 100644
index 0000000..3eafe16
--- /dev/null
+++ b/SOURCES/glibc-rh2154914-1.patch
@@ -0,0 +1,297 @@
+Maintain an explicit order of tunables, so that the tunable_list
+array and the tunable_id_t constant can remain consistent over time.
+
+Related to this upstream bug:
+
+  Internal tunables ABI depends on awk array iteration order
+  <https://sourceware.org/bugzilla/show_bug.cgi?id=30027>
+
+The new dl-tunables.list files are already on the sysdeps search
+path, which is why the existing Makeconfig rule picks them up.
+The files for RHEL 8.7.z were created by applying the gen-tunables.awk
+part of this patch to RHEL 8.7.0 (glibc-2.28-211.el8_7, to be precise).
+The sysdeps/unix/sysv/linux/**/dl-tunables.list files were created
+based on the generated error message during the RHEL 8.7.z build.
+Afterwards, the glibc.rtld.dynamic_sort tunable was added at the
+end of the files, for the RHEL 8.8.0 build.
+
+Going forward, new tunables will have to be added manually to the end
+of those files.  Existing tunables should not be deleted.  For
+deletion, the script would have to be extended to be able to create
+gaps in the tunable_list array.
+
+diff --git a/scripts/gen-tunables.awk b/scripts/gen-tunables.awk
+index 622199061a140ccd..8ebfb560976ead41 100644
+--- a/scripts/gen-tunables.awk
++++ b/scripts/gen-tunables.awk
+@@ -12,6 +12,7 @@ BEGIN {
+   tunable=""
+   ns=""
+   top_ns=""
++  tunable_order_count = 0
+ }
+ 
+ # Skip over blank lines and comments.
+@@ -78,6 +79,37 @@ $1 == "}" {
+   next
+ }
+ 
++$1 == "@order" {
++    if (top_ns != "") {
++	printf("%s:%d: error: invalid @order directive inside namespace %s\n",
++               FILENAME, FNR, top_ns) > "/dev/stderr"
++	exit 1
++    }
++    if (NF != 2) {
++	printf("%s:%d: error: invalid argument count in @order directive\n",
++               FILENAME, FNR) > "/dev/stderr"
++        exit 1
++    }
++    order_arg = $2
++    if (split(order_arg, indices, /\./) != 3) {
++	printf("%s:%d: error: invalid tunable syntax in @order directive\n",
++               FILENAME, FNR) > "/dev/stderr"
++        exit 1
++    }
++    t = indices[1]
++    n = indices[2]
++    m = indices[3]
++    if ((t, n, m) in tunable_order) {
++	printf("%s:%d: error: duplicate\"@order %s\"\n" \
++               FILENAME, FNR, order_arg) > "/dev/stderr"
++        exit 1
++    }
++    ++tunable_order_count
++    tunable_order[t,n,m] = tunable_order_count
++    tunable_order_list[tunable_order_count] = t SUBSEP n SUBSEP m
++    next
++}
++
+ # Everything else, which could either be a tunable without any attributes or a
+ # tunable attribute.
+ {
+@@ -137,6 +169,31 @@ END {
+     exit 1
+   }
+ 
++  missing_order = 0
++  for (tnm in types) {
++      if (!(tnm in tunable_order)) {
++	  if (!missing_order) {
++	      print "error: Missing @order directives:" > "/dev/stderr"
++	      missing_order = 1
++	  }
++	  split(tnm, indices, SUBSEP)
++	  printf("@order %s.%s.%s\n", indices[1], indices[2], indices[3]) \
++	      > "/dev/stderr"
++      }
++  }
++  for (i = 1; i <= tunable_order_count; ++i) {
++    tnm = tunable_order_list[i]
++    if (!(tnm in types)) {
++	split(tnm, indices, SUBSEP)
++	printf("error: tunable in \"@order %s.%s.%s\" not known\n", \
++	       indices[1], indices[2], indices[3]) > "/dev/stderr"
++	missing_order = 1
++    }
++  }
++  if (missing_order) {
++      exit 1
++  }
++
+   print "/* AUTOGENERATED by gen-tunables.awk.  */"
+   print "#ifndef _TUNABLES_H_"
+   print "# error \"Do not include this file directly.\""
+@@ -147,7 +204,8 @@ END {
+   # Now, the enum names
+   print "\ntypedef enum"
+   print "{"
+-  for (tnm in types) {
++  for (i = 1; i <= tunable_order_count; ++i) {
++    tnm = tunable_order_list[i]
+     split (tnm, indices, SUBSEP);
+     t = indices[1];
+     n = indices[2];
+@@ -159,7 +217,8 @@ END {
+   # Finally, the tunable list.
+   print "\n#ifdef TUNABLES_INTERNAL"
+   print "static tunable_t tunable_list[] attribute_relro = {"
+-  for (tnm in types) {
++  for (i = 1; i <= tunable_order_count; ++i) {
++    tnm = tunable_order_list[i]
+     split (tnm, indices, SUBSEP);
+     t = indices[1];
+     n = indices[2];
+diff --git a/sysdeps/unix/sysv/linux/aarch64/dl-tunables.list b/sysdeps/unix/sysv/linux/aarch64/dl-tunables.list
+new file mode 100644
+index 0000000000000000..5c3c5292025607a1
+--- /dev/null
++++ b/sysdeps/unix/sysv/linux/aarch64/dl-tunables.list
+@@ -0,0 +1,26 @@
++# Order of tunables in RHEL 8.7.0.
++@order glibc.rtld.nns
++@order glibc.elision.skip_lock_after_retries
++@order glibc.malloc.trim_threshold
++@order glibc.malloc.perturb
++@order glibc.cpu.name
++@order glibc.elision.tries
++@order glibc.elision.enable
++@order glibc.malloc.mxfast
++@order glibc.elision.skip_lock_busy
++@order glibc.malloc.top_pad
++@order glibc.cpu.hwcap_mask
++@order glibc.malloc.mmap_max
++@order glibc.elision.skip_trylock_internal_abort
++@order glibc.malloc.tcache_unsorted_limit
++@order glibc.elision.skip_lock_internal_abort
++@order glibc.malloc.arena_max
++@order glibc.malloc.mmap_threshold
++@order glibc.malloc.tcache_count
++@order glibc.malloc.arena_test
++@order glibc.rtld.optional_static_tls
++@order glibc.malloc.tcache_max
++@order glibc.malloc.check
++
++# Tunables added in RHEL 8.8.0
++@order glibc.rtld.dynamic_sort
+diff --git a/sysdeps/unix/sysv/linux/i386/dl-tunables.list b/sysdeps/unix/sysv/linux/i386/dl-tunables.list
+new file mode 100644
+index 0000000000000000..b9cad4af62d9f2e5
+--- /dev/null
++++ b/sysdeps/unix/sysv/linux/i386/dl-tunables.list
+@@ -0,0 +1,33 @@
++# Order of tunables in RHEL 8.7.0.
++@order glibc.rtld.nns
++@order glibc.elision.skip_lock_after_retries
++@order glibc.malloc.trim_threshold
++@order glibc.malloc.perturb
++@order glibc.cpu.x86_shared_cache_size
++@order glibc.elision.tries
++@order glibc.elision.enable
++@order glibc.cpu.x86_rep_movsb_threshold
++@order glibc.malloc.mxfast
++@order glibc.elision.skip_lock_busy
++@order glibc.malloc.top_pad
++@order glibc.cpu.x86_rep_stosb_threshold
++@order glibc.cpu.x86_non_temporal_threshold
++@order glibc.cpu.x86_shstk
++@order glibc.cpu.hwcap_mask
++@order glibc.malloc.mmap_max
++@order glibc.elision.skip_trylock_internal_abort
++@order glibc.malloc.tcache_unsorted_limit
++@order glibc.cpu.x86_ibt
++@order glibc.cpu.hwcaps
++@order glibc.elision.skip_lock_internal_abort
++@order glibc.malloc.arena_max
++@order glibc.malloc.mmap_threshold
++@order glibc.cpu.x86_data_cache_size
++@order glibc.malloc.tcache_count
++@order glibc.malloc.arena_test
++@order glibc.rtld.optional_static_tls
++@order glibc.malloc.tcache_max
++@order glibc.malloc.check
++
++# Tunables added in RHEL 8.8.0
++@order glibc.rtld.dynamic_sort
+diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/le/dl-tunables.list b/sysdeps/unix/sysv/linux/powerpc/powerpc64/le/dl-tunables.list
+new file mode 100644
+index 0000000000000000..ee1e6fca95e1f2da
+--- /dev/null
++++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/le/dl-tunables.list
+@@ -0,0 +1,26 @@
++# Order of tunables in RHEL 8.7.0.
++@order glibc.rtld.nns
++@order glibc.elision.skip_lock_after_retries
++@order glibc.malloc.trim_threshold
++@order glibc.malloc.perturb
++@order glibc.elision.tries
++@order glibc.elision.enable
++@order glibc.malloc.mxfast
++@order glibc.elision.skip_lock_busy
++@order glibc.malloc.top_pad
++@order glibc.cpu.hwcap_mask
++@order glibc.malloc.mmap_max
++@order glibc.elision.skip_trylock_internal_abort
++@order glibc.malloc.tcache_unsorted_limit
++@order glibc.elision.skip_lock_internal_abort
++@order glibc.malloc.arena_max
++@order glibc.malloc.mmap_threshold
++@order glibc.cpu.cached_memopt
++@order glibc.malloc.tcache_count
++@order glibc.malloc.arena_test
++@order glibc.rtld.optional_static_tls
++@order glibc.malloc.tcache_max
++@order glibc.malloc.check
++
++# Tunables added in RHEL 8.8.0
++@order glibc.rtld.dynamic_sort
+diff --git a/sysdeps/unix/sysv/linux/s390/s390-64/dl-tunables.list b/sysdeps/unix/sysv/linux/s390/s390-64/dl-tunables.list
+new file mode 100644
+index 0000000000000000..099e28d8f8e67944
+--- /dev/null
++++ b/sysdeps/unix/sysv/linux/s390/s390-64/dl-tunables.list
+@@ -0,0 +1,25 @@
++# Order of tunables in RHEL 8.7.0.
++@order glibc.rtld.nns
++@order glibc.elision.skip_lock_after_retries
++@order glibc.malloc.trim_threshold
++@order glibc.malloc.perturb
++@order glibc.elision.tries
++@order glibc.elision.enable
++@order glibc.malloc.mxfast
++@order glibc.elision.skip_lock_busy
++@order glibc.malloc.top_pad
++@order glibc.cpu.hwcap_mask
++@order glibc.malloc.mmap_max
++@order glibc.elision.skip_trylock_internal_abort
++@order glibc.malloc.tcache_unsorted_limit
++@order glibc.elision.skip_lock_internal_abort
++@order glibc.malloc.arena_max
++@order glibc.malloc.mmap_threshold
++@order glibc.malloc.tcache_count
++@order glibc.malloc.arena_test
++@order glibc.rtld.optional_static_tls
++@order glibc.malloc.tcache_max
++@order glibc.malloc.check
++
++# Tunables added in RHEL 8.8.0
++@order glibc.rtld.dynamic_sort
+diff --git a/sysdeps/unix/sysv/linux/x86_64/64/dl-tunables.list b/sysdeps/unix/sysv/linux/x86_64/64/dl-tunables.list
+new file mode 100644
+index 0000000000000000..b9cad4af62d9f2e5
+--- /dev/null
++++ b/sysdeps/unix/sysv/linux/x86_64/64/dl-tunables.list
+@@ -0,0 +1,33 @@
++# Order of tunables in RHEL 8.7.0.
++@order glibc.rtld.nns
++@order glibc.elision.skip_lock_after_retries
++@order glibc.malloc.trim_threshold
++@order glibc.malloc.perturb
++@order glibc.cpu.x86_shared_cache_size
++@order glibc.elision.tries
++@order glibc.elision.enable
++@order glibc.cpu.x86_rep_movsb_threshold
++@order glibc.malloc.mxfast
++@order glibc.elision.skip_lock_busy
++@order glibc.malloc.top_pad
++@order glibc.cpu.x86_rep_stosb_threshold
++@order glibc.cpu.x86_non_temporal_threshold
++@order glibc.cpu.x86_shstk
++@order glibc.cpu.hwcap_mask
++@order glibc.malloc.mmap_max
++@order glibc.elision.skip_trylock_internal_abort
++@order glibc.malloc.tcache_unsorted_limit
++@order glibc.cpu.x86_ibt
++@order glibc.cpu.hwcaps
++@order glibc.elision.skip_lock_internal_abort
++@order glibc.malloc.arena_max
++@order glibc.malloc.mmap_threshold
++@order glibc.cpu.x86_data_cache_size
++@order glibc.malloc.tcache_count
++@order glibc.malloc.arena_test
++@order glibc.rtld.optional_static_tls
++@order glibc.malloc.tcache_max
++@order glibc.malloc.check
++
++# Tunables added in RHEL 8.8.0
++@order glibc.rtld.dynamic_sort
diff --git a/SOURCES/glibc-rh2154914-2.patch b/SOURCES/glibc-rh2154914-2.patch
new file mode 100644
index 0000000..4c89744
--- /dev/null
+++ b/SOURCES/glibc-rh2154914-2.patch
@@ -0,0 +1,81 @@
+Move _dl_dso_sort_algo out of _rtld_global_ro.  It is only used
+locally in elf/dl-sort-maps.c.  This avoids changing the internal
+_rtld_global_ro ABI.
+
+diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
+index 6f5c17b47b98fbc7..aeb79b40b45054c0 100644
+--- a/elf/dl-sort-maps.c
++++ b/elf/dl-sort-maps.c
+@@ -290,12 +290,21 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
+     }
+ }
+ 
++/* DSO sort algorithm to use.  */
++enum dso_sort_algorithm
++  {
++    dso_sort_algorithm_original,
++    dso_sort_algorithm_dfs
++  };
++
++static enum dso_sort_algorithm _dl_dso_sort_algo;
++
+ void
+ _dl_sort_maps_init (void)
+ {
+   int32_t algorithm = TUNABLE_GET (glibc, rtld, dynamic_sort, int32_t, NULL);
+-  GLRO(dl_dso_sort_algo) = algorithm == 1 ? dso_sort_algorithm_original
+-					  : dso_sort_algorithm_dfs;
++  _dl_dso_sort_algo = (algorithm == 1 ? dso_sort_algorithm_original
++		       : dso_sort_algorithm_dfs);
+ }
+ 
+ void
+@@ -309,7 +318,7 @@ _dl_sort_maps (struct link_map **maps, unsigned int nmaps,
+      PTR_MANGLE/DEMANGLE, further impairing performance of small, common
+      input cases. A simple if-case with direct function calls appears to
+      be the fastest.  */
+-  if (__glibc_likely (GLRO(dl_dso_sort_algo) == dso_sort_algorithm_original))
++  if (__glibc_likely (_dl_dso_sort_algo == dso_sort_algorithm_original))
+     _dl_sort_maps_original (maps, nmaps, force_first, for_fini);
+   else
+     _dl_sort_maps_dfs (maps, nmaps, force_first, for_fini);
+diff --git a/elf/dl-support.c b/elf/dl-support.c
+index ae03aec9764e29d3..e9943e889ef447ad 100644
+--- a/elf/dl-support.c
++++ b/elf/dl-support.c
+@@ -155,8 +155,6 @@ size_t _dl_phnum;
+ uint64_t _dl_hwcap __attribute__ ((nocommon));
+ uint64_t _dl_hwcap2 __attribute__ ((nocommon));
+ 
+-enum dso_sort_algorithm _dl_dso_sort_algo;
+-
+ /* The value of the FPU control word the kernel will preset in hardware.  */
+ fpu_control_t _dl_fpu_control = _FPU_DEFAULT;
+ 
+diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
+index 2c1b4c47c6a6c643..29bbde3e83e37d7e 100644
+--- a/sysdeps/generic/ldsodefs.h
++++ b/sysdeps/generic/ldsodefs.h
+@@ -240,13 +240,6 @@ enum allowmask
+   };
+ 
+ 
+-/* DSO sort algorithm to use (check dl-sort-maps.c).  */
+-enum dso_sort_algorithm
+-  {
+-    dso_sort_algorithm_original,
+-    dso_sort_algorithm_dfs
+-  };
+-
+ struct audit_ifaces
+ {
+   void (*activity) (uintptr_t *, unsigned int);
+@@ -640,8 +633,6 @@ struct rtld_global_ro
+      platforms.  */
+   EXTERN uint64_t _dl_hwcap2;
+ 
+-  EXTERN enum dso_sort_algorithm _dl_dso_sort_algo;
+-
+ #ifdef SHARED
+   /* We add a function table to _rtld_global which is then used to
+      call the function instead of going through the PLT.  The result
diff --git a/SOURCES/wrap-find-debuginfo.sh b/SOURCES/wrap-find-debuginfo.sh
index 6eeb802..8479217 100755
--- a/SOURCES/wrap-find-debuginfo.sh
+++ b/SOURCES/wrap-find-debuginfo.sh
@@ -17,6 +17,8 @@
 set -evx
 
 tar_tmp="$(mktemp)"
+declare -A libc_dlink_tmp_list
+ldso_annobin_sym_tmp_list=""
 
 # Prefer a separately installed debugedit over the RPM-integrated one.
 if command -v debugedit >/dev/null ; then
@@ -26,7 +28,7 @@ else
 fi
 
 cleanup () {
-    rm -f "$tar_tmp"
+    rm -f "$tar_tmp" ${libc_dlink_tmp_list[@]} $ldso_annobin_sym_tmp_list
 }
 trap cleanup 0
 
@@ -51,6 +53,15 @@ full_list="$ldso_list $libc_list $libdl_list $libpthread_list $librt_list"
 # Run the debuginfo extraction.
 "$script_path" "$@"
 
+# libc.so.6: Extract the .gnu_debuglink section
+for f in $libc_list
+do
+  dlink_tmp="$(mktemp)"
+  libc_dlink_tmp_list["$f"]="$dlink_tmp"
+  objcopy -j.gnu_debuglink --set-section-flags .gnu_debuglink=alloc \
+      -O binary "$sysroot_path/$f" "$dlink_tmp"
+done
+
 # Restore the original files.
 (cd "$sysroot_path"; tar xf "$tar_tmp")
 (cd "$sysroot_path"; ls -l $full_list)
@@ -61,6 +72,20 @@ do
     objcopy --merge-notes "$sysroot_path/$p"
 done
 
+# libc.so.6: Restore the .gnu_debuglink section
+for f in ${!libc_dlink_tmp_list[@]}
+do
+  dlink_tmp="${libc_dlink_tmp_list[$f]}"
+  objcopy --add-section .gnu_debuglink="$dlink_tmp" "$sysroot_path/$f"
+done
+
+# ld.so does not have separated debuginfo and so the debuginfo file
+# generated by find-debuginfo is redundant.  Therefore, remove it.
+for ldso_debug in `find "$sysroot_path" -name 'ld-*.so*.debug' -type f`
+do
+  rm -f "$ldso_debug"
+done
+
 # libc.so.6 and other shared objects: Reduce to valuable symbols.
 # Eliminate file symbols, annobin symbols, and symbols used by the
 # glibc build to implement hidden aliases (__EI_*).  We would also
@@ -103,6 +128,14 @@ debug_base_name=${last_arg:-$RPM_BUILD_ROOT}
 for p in $ldso_list
 do
     $debugedit -b "$debug_base_name" -d "$debug_dest_name" -n "$sysroot_path/$p"
+
+    # Remove the .annobin* symbols (and only them).
+    ldso_annobin_sym_tmp="$(mktemp)"
+    ldso_annobin_sym_tmp_list+=" $ldso_annobin_sym_tmp"
+    if nm --format=posix "$sysroot_path/$p" | cut -d' ' -f1 \
+        | grep '^\.annobin' > "$ldso_annobin_sym_tmp"; then
+        objcopy --strip-symbols="$ldso_annobin_sym_tmp" "$sysroot_path/$p"
+    fi
 done
 
 # Apply single-file DWARF optimization.
diff --git a/SPECS/glibc.spec b/SPECS/glibc.spec
index 848f4d1..c1e7d19 100644
--- a/SPECS/glibc.spec
+++ b/SPECS/glibc.spec
@@ -1,6 +1,6 @@
 %define glibcsrcdir glibc-2.28
 %define glibcversion 2.28
-%define glibcrelease 211%{?dist}
+%define glibcrelease 225%{?dist}
 # Pre-release tarballs are pulled in from git using a command that is
 # effectively:
 #
@@ -968,6 +968,69 @@ Patch775: glibc-rh2104907.patch
 Patch776: glibc-rh2119304-1.patch
 Patch777: glibc-rh2119304-2.patch
 Patch778: glibc-rh2119304-3.patch
+Patch779: glibc-rh2118667.patch
+Patch780: glibc-rh2122498.patch
+Patch781: glibc-rh2125222.patch
+Patch782: glibc-rh1871383-1.patch
+Patch783: glibc-rh1871383-2.patch
+Patch784: glibc-rh1871383-3.patch
+Patch785: glibc-rh1871383-4.patch
+Patch786: glibc-rh1871383-5.patch
+Patch787: glibc-rh1871383-6.patch
+Patch788: glibc-rh1871383-7.patch
+Patch789: glibc-rh2122501-1.patch
+Patch790: glibc-rh2122501-2.patch
+Patch791: glibc-rh2122501-3.patch
+Patch792: glibc-rh2122501-4.patch
+Patch793: glibc-rh2122501-5.patch
+Patch794: glibc-rh2121746-1.patch
+Patch795: glibc-rh2121746-2.patch
+Patch796: glibc-rh2116938.patch
+Patch797: glibc-rh2109510-1.patch
+Patch798: glibc-rh2109510-2.patch
+Patch799: glibc-rh2109510-3.patch
+Patch800: glibc-rh2109510-4.patch
+Patch801: glibc-rh2109510-5.patch
+Patch802: glibc-rh2109510-6.patch
+Patch803: glibc-rh2109510-7.patch
+Patch804: glibc-rh2109510-8.patch
+Patch805: glibc-rh2109510-9.patch
+Patch806: glibc-rh2109510-10.patch
+Patch807: glibc-rh2109510-11.patch
+Patch808: glibc-rh2109510-12.patch
+Patch809: glibc-rh2109510-13.patch
+Patch810: glibc-rh2109510-14.patch
+Patch811: glibc-rh2109510-15.patch
+Patch812: glibc-rh2109510-16.patch
+Patch813: glibc-rh2109510-17.patch
+Patch814: glibc-rh2109510-18.patch
+Patch815: glibc-rh2109510-19.patch
+Patch816: glibc-rh2109510-20.patch
+Patch817: glibc-rh2109510-21.patch
+Patch818: glibc-rh2109510-22.patch
+Patch819: glibc-rh2109510-23.patch
+Patch820: glibc-rh2139875-1.patch
+Patch821: glibc-rh2139875-2.patch
+Patch822: glibc-rh2139875-3.patch
+Patch823: glibc-rh1159809-1.patch
+Patch824: glibc-rh1159809-2.patch
+Patch825: glibc-rh1159809-3.patch
+Patch826: glibc-rh1159809-4.patch
+Patch827: glibc-rh1159809-5.patch
+Patch828: glibc-rh1159809-6.patch
+Patch829: glibc-rh1159809-7.patch
+Patch830: glibc-rh1159809-8.patch
+Patch831: glibc-rh1159809-9.patch
+Patch832: glibc-rh1159809-10.patch
+Patch833: glibc-rh1159809-11.patch
+Patch834: glibc-rh1159809-12.patch
+Patch835: glibc-rh2141989.patch
+Patch836: glibc-rh2142937-1.patch
+Patch837: glibc-rh2142937-2.patch
+Patch838: glibc-rh2142937-3.patch
+Patch839: glibc-rh2144568.patch
+Patch840: glibc-rh2154914-1.patch
+Patch841: glibc-rh2154914-2.patch
 
 ##############################################################################
 # Continued list of core "glibc" package information:
@@ -2798,6 +2861,52 @@ fi
 %files -f compat-libpthread-nonshared.filelist -n compat-libpthread-nonshared
 
 %changelog
+* Fri Jan 20 2023 Florian Weimer <fweimer@redhat.com> - 2.28-225
+- Enforce a specififc internal ordering for tunables (#2154914)
+
+* Wed Nov 30 2022 Arjun Shankar <arjun@redhat.com> - 2.28-224
+- Fix rtld-audit trampoline for aarch64 (#2144568)
+
+* Fri Nov 25 2022 Arjun Shankar <arjun@redhat.com> - 2.28-223
+- Backport upstream fixes to tst-pldd (#2142937)
+
+* Tue Nov 22 2022 Florian Weimer <fweimer@redhat.com> - 2.28-222
+- Restore IPC_64 support in sysvipc *ctl functions (#2141989)
+
+* Fri Nov 18 2022 Florian Weimer <fweimer@redhat.com> - 2.28-221
+- Switch to fast DSO dependency sorting algorithm (#1159809)
+
+* Thu Nov  3 2022 Florian Weimer <fweimer@redhat.com> - 2.28-220
+- Explicitly switch to --with-default-link=no (#2109510)
+- Define MAP_SYNC on ppc64le (#2139875)
+
+* Mon Oct 24 2022 Arjun Shankar <arjun@redhat.com> - 2.28-219
+- Fix -Wstrict-overflow warning when using CMSG_NXTHDR macro (#2116938)
+
+* Fri Oct 14 2022 DJ Delorie <dj@redhat.com> - 2.28-218
+- Fix dlmopen/dlclose/dlmopen sequence and libc initialization (#2121746)
+
+* Thu Oct 13 2022 Arjun Shankar <arjun@redhat.com> - 2.28-217
+- Fix memory corruption in printf with thousands separators and large
+  integer width (#2122501)
+
+* Wed Oct 05 2022 Arjun Shankar <arjun@redhat.com> - 2.28-216
+- Retain .gnu_debuglink section for libc.so.6 (#2115830)
+- Remove .annobin* symbols from ld.so
+- Remove redundant ld.so debuginfo file
+
+* Wed Sep 28 2022 DJ Delorie <dj@redhat.com> - 2.28-215
+- Improve malloc implementation (#1871383)
+
+* Tue Sep 20 2022 Florian Weimer <fweimer@redhat.com> - 2.28-214
+- Fix hwcaps search path size computation (#2125222)
+
+* Tue Sep 20 2022 Florian Weimer <fweimer@redhat.com> - 2.28-213
+- Fix nscd netlink cache invalidation if epoll is used (#2122498)
+
+* Tue Sep 20 2022 Florian Weimer <fweimer@redhat.com> - 2.28-212
+- Run tst-audit-tlsdesc, tst-audit-tlsdesc-dlopen everywhere (#2118667)
+
 * Thu Aug 25 2022 Florian Weimer <fweimer@redhat.com> - 2.28-211
 - Preserve GLRO (dl_naudit) internal ABI (#2119304)
 - Avoid s390x ABI change due to z16 recognition on s390x (#2119304)
-- 
GitLab