diff --git a/.glibc.checksum b/.glibc.checksum index 1882d6a12f4c8d728e7e3024e7441b6a59f07a36..1353042f5eab068b248251e95015a88122ee2c88 100644 --- a/.glibc.checksum +++ b/.glibc.checksum @@ -1 +1 @@ -a80b272db684a3386983a445e07d1b36c3e6fc723eb93ecf89cae7d08970fd83 +cdfeb758d583631ede7bac4d5ab0c6554febaa4dd5aa66b9bdddfccd4aa8d803 diff --git a/SOURCES/glibc-RHEL-69003.patch b/SOURCES/glibc-RHEL-69003.patch new file mode 100644 index 0000000000000000000000000000000000000000..a0e38e7e33a90774c6095c0d95d0692521bf1fc5 --- /dev/null +++ b/SOURCES/glibc-RHEL-69003.patch @@ -0,0 +1,845 @@ +From e1d3312015e8f70344620375aedf91afe7e7e7a4 Mon Sep 17 00:00:00 2001 +From: lijianglin <lijianglin2@huawei.com> +Date: Tue, 27 Jun 2023 20:15:49 +0800 +Subject: add GB18030-2022 charmap and test the entire GB18030 charmap [BZ + #30243] + +support GB18030-2022 after add and change some transcoding relationship +of GB18030-2022.Details are as follows: +add 25 transcoding relationship + UE81E 0x82359037 + UE826 0x82359038 + UE82B 0x82359039 + UE82C 0x82359130 + UE832 0x82359131 + UE843 0x82359132 + UE854 0x82359133 + UE864 0x82359134 + UE78D 0x84318236 + UE78F 0x84318237 + UE78E 0x84318238 + UE790 0x84318239 + UE791 0x84318330 + UE792 0x84318331 + UE793 0x84318332 + UE794 0x84318333 + UE795 0x84318334 + UE796 0x84318335 + UE816 0xfe51 + UE817 0xfe52 + UE818 0xfe53 + UE831 0xfe6c + UE83B 0xfe76 + UE855 0xfe91 +change 6 transcoding relationship + U20087 0x95329031 + U20089 0x95329033 + U200CC 0x95329730 + U215D7 0x9536b937 + U2298F 0x9630ba35 + U241FE 0x9635b630 +Test the entire GB18030 charmap, not only the Unicode BMP part. + +Co-authored-by: yangyanchao <yangyanchao6@huawei.com> +Co-authored-by: liqingqing <liqingqing3@huawei.com> +Co-authored-by: Bruno Haible <bruno@clisp.org> +Reviewed-by: Andreas Schwab <schwab@suse.de> +Reviewed-by: Mike FABIAN <mfabian@redhat.com> + +diff --git a/iconvdata/gb18030.c b/iconvdata/gb18030.c +index 9996a59eaf..be6cfe652c 100644 +--- a/iconvdata/gb18030.c ++++ b/iconvdata/gb18030.c +@@ -6009,49 +6009,50 @@ static const uint16_t __twobyte_to_ucs[] = + [0x5dc2] = 0xfa0e, [0x5dc3] = 0xfa0f, [0x5dc4] = 0xfa11, [0x5dc5] = 0xfa13, + [0x5dc6] = 0xfa14, [0x5dc7] = 0xfa18, [0x5dc8] = 0xfa1f, [0x5dc9] = 0xfa20, + [0x5dca] = 0xfa21, [0x5dcb] = 0xfa23, [0x5dcc] = 0xfa24, [0x5dcd] = 0xfa27, +- [0x5dce] = 0xfa28, [0x5dcf] = 0xfa29, [0x5dd0] = 0x2e81, [0x5dd4] = 0x2e84, +- [0x5dd5] = 0x3473, [0x5dd6] = 0x3447, [0x5dd7] = 0x2e88, [0x5dd8] = 0x2e8b, +- [0x5dd9] = 0x9fb4, [0x5dda] = 0x359e, [0x5ddb] = 0x361a, [0x5ddc] = 0x360e, +- [0x5ddd] = 0x2e8c, [0x5dde] = 0x2e97, [0x5ddf] = 0x396e, [0x5de0] = 0x3918, +- [0x5de1] = 0x9fb5, [0x5de2] = 0x39cf, [0x5de3] = 0x39df, [0x5de4] = 0x3a73, +- [0x5de5] = 0x39d0, [0x5de6] = 0x9fb6, [0x5de7] = 0x9fb7, [0x5de8] = 0x3b4e, +- [0x5de9] = 0x3c6e, [0x5dea] = 0x3ce0, [0x5deb] = 0x2ea7, [0x5ded] = 0x9fb8, ++ [0x5dce] = 0xfa28, [0x5dcf] = 0xfa29, [0x5dd0] = 0x2e81, [0x5dd1] = 0xe816, ++ [0x5dd2] = 0xe817, [0x5dd3] = 0xe818, [0x5dd4] = 0x2e84, [0x5dd5] = 0x3473, ++ [0x5dd6] = 0x3447, [0x5dd7] = 0x2e88, [0x5dd8] = 0x2e8b, [0x5dd9] = 0x9fb4, ++ [0x5dda] = 0x359e, [0x5ddb] = 0x361a, [0x5ddc] = 0x360e, [0x5ddd] = 0x2e8c, ++ [0x5dde] = 0x2e97, [0x5ddf] = 0x396e, [0x5de0] = 0x3918, [0x5de1] = 0x9fb5, ++ [0x5de2] = 0x39cf, [0x5de3] = 0x39df, [0x5de4] = 0x3a73, [0x5de5] = 0x39d0, ++ [0x5de6] = 0x9fb6, [0x5de7] = 0x9fb7, [0x5de8] = 0x3b4e, [0x5de9] = 0x3c6e, ++ [0x5dea] = 0x3ce0, [0x5deb] = 0x2ea7, [0x5dec] = 0xe831, [0x5ded] = 0x9fb8, + [0x5dee] = 0x2eaa, [0x5def] = 0x4056, [0x5df0] = 0x415f, [0x5df1] = 0x2eae, + [0x5df2] = 0x4337, [0x5df3] = 0x2eb3, [0x5df4] = 0x2eb6, [0x5df5] = 0x2eb7, +- [0x5df7] = 0x43b1, [0x5df8] = 0x43ac, [0x5df9] = 0x2ebb, [0x5dfa] = 0x43dd, +- [0x5dfb] = 0x44d6, [0x5dfc] = 0x4661, [0x5dfd] = 0x464c, [0x5dfe] = 0x9fb9, +- [0x5e00] = 0x4723, [0x5e01] = 0x4729, [0x5e02] = 0x477c, [0x5e03] = 0x478d, +- [0x5e04] = 0x2eca, [0x5e05] = 0x4947, [0x5e06] = 0x497a, [0x5e07] = 0x497d, +- [0x5e08] = 0x4982, [0x5e09] = 0x4983, [0x5e0a] = 0x4985, [0x5e0b] = 0x4986, +- [0x5e0c] = 0x499f, [0x5e0d] = 0x499b, [0x5e0e] = 0x49b7, [0x5e0f] = 0x49b6, +- [0x5e10] = 0x9fba, [0x5e12] = 0x4ca3, [0x5e13] = 0x4c9f, [0x5e14] = 0x4ca0, +- [0x5e15] = 0x4ca1, [0x5e16] = 0x4c77, [0x5e17] = 0x4ca2, [0x5e18] = 0x4d13, +- [0x5e19] = 0x4d14, [0x5e1a] = 0x4d15, [0x5e1b] = 0x4d16, [0x5e1c] = 0x4d17, +- [0x5e1d] = 0x4d18, [0x5e1e] = 0x4d19, [0x5e1f] = 0x4dae, [0x5e20] = 0x9fbb, +- [0x5e21] = 0xe468, [0x5e22] = 0xe469, [0x5e23] = 0xe46a, [0x5e24] = 0xe46b, +- [0x5e25] = 0xe46c, [0x5e26] = 0xe46d, [0x5e27] = 0xe46e, [0x5e28] = 0xe46f, +- [0x5e29] = 0xe470, [0x5e2a] = 0xe471, [0x5e2b] = 0xe472, [0x5e2c] = 0xe473, +- [0x5e2d] = 0xe474, [0x5e2e] = 0xe475, [0x5e2f] = 0xe476, [0x5e30] = 0xe477, +- [0x5e31] = 0xe478, [0x5e32] = 0xe479, [0x5e33] = 0xe47a, [0x5e34] = 0xe47b, +- [0x5e35] = 0xe47c, [0x5e36] = 0xe47d, [0x5e37] = 0xe47e, [0x5e38] = 0xe47f, +- [0x5e39] = 0xe480, [0x5e3a] = 0xe481, [0x5e3b] = 0xe482, [0x5e3c] = 0xe483, +- [0x5e3d] = 0xe484, [0x5e3e] = 0xe485, [0x5e3f] = 0xe486, [0x5e40] = 0xe487, +- [0x5e41] = 0xe488, [0x5e42] = 0xe489, [0x5e43] = 0xe48a, [0x5e44] = 0xe48b, +- [0x5e45] = 0xe48c, [0x5e46] = 0xe48d, [0x5e47] = 0xe48e, [0x5e48] = 0xe48f, +- [0x5e49] = 0xe490, [0x5e4a] = 0xe491, [0x5e4b] = 0xe492, [0x5e4c] = 0xe493, +- [0x5e4d] = 0xe494, [0x5e4e] = 0xe495, [0x5e4f] = 0xe496, [0x5e50] = 0xe497, +- [0x5e51] = 0xe498, [0x5e52] = 0xe499, [0x5e53] = 0xe49a, [0x5e54] = 0xe49b, +- [0x5e55] = 0xe49c, [0x5e56] = 0xe49d, [0x5e57] = 0xe49e, [0x5e58] = 0xe49f, +- [0x5e59] = 0xe4a0, [0x5e5a] = 0xe4a1, [0x5e5b] = 0xe4a2, [0x5e5c] = 0xe4a3, +- [0x5e5d] = 0xe4a4, [0x5e5e] = 0xe4a5, [0x5e5f] = 0xe4a6, [0x5e60] = 0xe4a7, +- [0x5e61] = 0xe4a8, [0x5e62] = 0xe4a9, [0x5e63] = 0xe4aa, [0x5e64] = 0xe4ab, +- [0x5e65] = 0xe4ac, [0x5e66] = 0xe4ad, [0x5e67] = 0xe4ae, [0x5e68] = 0xe4af, +- [0x5e69] = 0xe4b0, [0x5e6a] = 0xe4b1, [0x5e6b] = 0xe4b2, [0x5e6c] = 0xe4b3, +- [0x5e6d] = 0xe4b4, [0x5e6e] = 0xe4b5, [0x5e6f] = 0xe4b6, [0x5e70] = 0xe4b7, +- [0x5e71] = 0xe4b8, [0x5e72] = 0xe4b9, [0x5e73] = 0xe4ba, [0x5e74] = 0xe4bb, +- [0x5e75] = 0xe4bc, [0x5e76] = 0xe4bd, [0x5e77] = 0xe4be, [0x5e78] = 0xe4bf, +- [0x5e79] = 0xe4c0, [0x5e7a] = 0xe4c1, [0x5e7b] = 0xe4c2, [0x5e7c] = 0xe4c3, +- [0x5e7d] = 0xe4c4, [0x5e7e] = 0xe4c5, ++ [0x5df6] = 0xe83b, [0x5df7] = 0x43b1, [0x5df8] = 0x43ac, [0x5df9] = 0x2ebb, ++ [0x5dfa] = 0x43dd, [0x5dfb] = 0x44d6, [0x5dfc] = 0x4661, [0x5dfd] = 0x464c, ++ [0x5dfe] = 0x9fb9, [0x5e00] = 0x4723, [0x5e01] = 0x4729, [0x5e02] = 0x477c, ++ [0x5e03] = 0x478d, [0x5e04] = 0x2eca, [0x5e05] = 0x4947, [0x5e06] = 0x497a, ++ [0x5e07] = 0x497d, [0x5e08] = 0x4982, [0x5e09] = 0x4983, [0x5e0a] = 0x4985, ++ [0x5e0b] = 0x4986, [0x5e0c] = 0x499f, [0x5e0d] = 0x499b, [0x5e0e] = 0x49b7, ++ [0x5e0f] = 0x49b6, [0x5e10] = 0x9fba, [0x5e11] = 0xe855, [0x5e12] = 0x4ca3, ++ [0x5e13] = 0x4c9f, [0x5e14] = 0x4ca0, [0x5e15] = 0x4ca1, [0x5e16] = 0x4c77, ++ [0x5e17] = 0x4ca2, [0x5e18] = 0x4d13, [0x5e19] = 0x4d14, [0x5e1a] = 0x4d15, ++ [0x5e1b] = 0x4d16, [0x5e1c] = 0x4d17, [0x5e1d] = 0x4d18, [0x5e1e] = 0x4d19, ++ [0x5e1f] = 0x4dae, [0x5e20] = 0x9fbb, [0x5e21] = 0xe468, [0x5e22] = 0xe469, ++ [0x5e23] = 0xe46a, [0x5e24] = 0xe46b, [0x5e25] = 0xe46c, [0x5e26] = 0xe46d, ++ [0x5e27] = 0xe46e, [0x5e28] = 0xe46f, [0x5e29] = 0xe470, [0x5e2a] = 0xe471, ++ [0x5e2b] = 0xe472, [0x5e2c] = 0xe473, [0x5e2d] = 0xe474, [0x5e2e] = 0xe475, ++ [0x5e2f] = 0xe476, [0x5e30] = 0xe477, [0x5e31] = 0xe478, [0x5e32] = 0xe479, ++ [0x5e33] = 0xe47a, [0x5e34] = 0xe47b, [0x5e35] = 0xe47c, [0x5e36] = 0xe47d, ++ [0x5e37] = 0xe47e, [0x5e38] = 0xe47f, [0x5e39] = 0xe480, [0x5e3a] = 0xe481, ++ [0x5e3b] = 0xe482, [0x5e3c] = 0xe483, [0x5e3d] = 0xe484, [0x5e3e] = 0xe485, ++ [0x5e3f] = 0xe486, [0x5e40] = 0xe487, [0x5e41] = 0xe488, [0x5e42] = 0xe489, ++ [0x5e43] = 0xe48a, [0x5e44] = 0xe48b, [0x5e45] = 0xe48c, [0x5e46] = 0xe48d, ++ [0x5e47] = 0xe48e, [0x5e48] = 0xe48f, [0x5e49] = 0xe490, [0x5e4a] = 0xe491, ++ [0x5e4b] = 0xe492, [0x5e4c] = 0xe493, [0x5e4d] = 0xe494, [0x5e4e] = 0xe495, ++ [0x5e4f] = 0xe496, [0x5e50] = 0xe497, [0x5e51] = 0xe498, [0x5e52] = 0xe499, ++ [0x5e53] = 0xe49a, [0x5e54] = 0xe49b, [0x5e55] = 0xe49c, [0x5e56] = 0xe49d, ++ [0x5e57] = 0xe49e, [0x5e58] = 0xe49f, [0x5e59] = 0xe4a0, [0x5e5a] = 0xe4a1, ++ [0x5e5b] = 0xe4a2, [0x5e5c] = 0xe4a3, [0x5e5d] = 0xe4a4, [0x5e5e] = 0xe4a5, ++ [0x5e5f] = 0xe4a6, [0x5e60] = 0xe4a7, [0x5e61] = 0xe4a8, [0x5e62] = 0xe4a9, ++ [0x5e63] = 0xe4aa, [0x5e64] = 0xe4ab, [0x5e65] = 0xe4ac, [0x5e66] = 0xe4ad, ++ [0x5e67] = 0xe4ae, [0x5e68] = 0xe4af, [0x5e69] = 0xe4b0, [0x5e6a] = 0xe4b1, ++ [0x5e6b] = 0xe4b2, [0x5e6c] = 0xe4b3, [0x5e6d] = 0xe4b4, [0x5e6e] = 0xe4b5, ++ [0x5e6f] = 0xe4b6, [0x5e70] = 0xe4b7, [0x5e71] = 0xe4b8, [0x5e72] = 0xe4b9, ++ [0x5e73] = 0xe4ba, [0x5e74] = 0xe4bb, [0x5e75] = 0xe4bc, [0x5e76] = 0xe4bd, ++ [0x5e77] = 0xe4be, [0x5e78] = 0xe4bf, [0x5e79] = 0xe4c0, [0x5e7a] = 0xe4c1, ++ [0x5e7b] = 0xe4c2, [0x5e7c] = 0xe4c3, [0x5e7d] = 0xe4c4, [0x5e7e] = 0xe4c5, + }; + + /* Table for GB18030 -> UCS-4, containing the four-byte characters only, +@@ -8680,7 +8681,9 @@ static const uint16_t __fourbyte_to_ucs[0x99e2 - 6637 - 2110 - 14404 - 4295] = + [0x2838] = 0x9fa6, [0x2839] = 0x9fa7, [0x283a] = 0x9fa8, [0x283b] = 0x9fa9, + [0x283c] = 0x9faa, [0x283d] = 0x9fab, [0x283e] = 0x9fac, [0x283f] = 0x9fad, + [0x2840] = 0x9fae, [0x2841] = 0x9faf, [0x2842] = 0x9fb0, [0x2843] = 0x9fb1, +- [0x2844] = 0x9fb2, [0x2845] = 0x9fb3, [0x284e] = 0xe76c, [0x284f] = 0xe7c8, ++ [0x2844] = 0x9fb2, [0x2845] = 0x9fb3, [0x2846] = 0xe81e, [0x2847] = 0xe826, ++ [0x2848] = 0xe82b, [0x2849] = 0xe82c, [0x284a] = 0xe832, [0x284b] = 0xe843, ++ [0x284c] = 0xe854, [0x284d] = 0xe864, [0x284e] = 0xe76c, [0x284f] = 0xe7c8, + [0x2850] = 0xe7e7, [0x2851] = 0xe7e8, [0x2852] = 0xe7e9, [0x2853] = 0xe7ea, + [0x2854] = 0xe7eb, [0x2855] = 0xe7ec, [0x2856] = 0xe7ed, [0x2857] = 0xe7ee, + [0x2858] = 0xe7ef, [0x2859] = 0xe7f0, [0x285a] = 0xe7f1, [0x285b] = 0xe7f2, +@@ -9008,84 +9011,86 @@ static const uint16_t __fourbyte_to_ucs[0x99e2 - 6637 - 2110 - 14404 - 4295] = + [0x2d60] = 0xfe02, [0x2d61] = 0xfe03, [0x2d62] = 0xfe04, [0x2d63] = 0xfe05, + [0x2d64] = 0xfe06, [0x2d65] = 0xfe07, [0x2d66] = 0xfe08, [0x2d67] = 0xfe09, + [0x2d68] = 0xfe0a, [0x2d69] = 0xfe0b, [0x2d6a] = 0xfe0c, [0x2d6b] = 0xfe0d, +- [0x2d6c] = 0xfe0e, [0x2d6d] = 0xfe0f, [0x2d78] = 0xfe1a, [0x2d79] = 0xfe1b, +- [0x2d7a] = 0xfe1c, [0x2d7b] = 0xfe1d, [0x2d7c] = 0xfe1e, [0x2d7d] = 0xfe1f, +- [0x2d7e] = 0xfe20, [0x2d7f] = 0xfe21, [0x2d80] = 0xfe22, [0x2d81] = 0xfe23, +- [0x2d82] = 0xfe24, [0x2d83] = 0xfe25, [0x2d84] = 0xfe26, [0x2d85] = 0xfe27, +- [0x2d86] = 0xfe28, [0x2d87] = 0xfe29, [0x2d88] = 0xfe2a, [0x2d89] = 0xfe2b, +- [0x2d8a] = 0xfe2c, [0x2d8b] = 0xfe2d, [0x2d8c] = 0xfe2e, [0x2d8d] = 0xfe2f, +- [0x2d8e] = 0xfe32, [0x2d8f] = 0xfe45, [0x2d90] = 0xfe46, [0x2d91] = 0xfe47, +- [0x2d92] = 0xfe48, [0x2d93] = 0xfe53, [0x2d94] = 0xfe58, [0x2d95] = 0xfe67, +- [0x2d96] = 0xfe6c, [0x2d97] = 0xfe6d, [0x2d98] = 0xfe6e, [0x2d99] = 0xfe6f, +- [0x2d9a] = 0xfe70, [0x2d9b] = 0xfe71, [0x2d9c] = 0xfe72, [0x2d9d] = 0xfe73, +- [0x2d9e] = 0xfe74, [0x2d9f] = 0xfe75, [0x2da0] = 0xfe76, [0x2da1] = 0xfe77, +- [0x2da2] = 0xfe78, [0x2da3] = 0xfe79, [0x2da4] = 0xfe7a, [0x2da5] = 0xfe7b, +- [0x2da6] = 0xfe7c, [0x2da7] = 0xfe7d, [0x2da8] = 0xfe7e, [0x2da9] = 0xfe7f, +- [0x2daa] = 0xfe80, [0x2dab] = 0xfe81, [0x2dac] = 0xfe82, [0x2dad] = 0xfe83, +- [0x2dae] = 0xfe84, [0x2daf] = 0xfe85, [0x2db0] = 0xfe86, [0x2db1] = 0xfe87, +- [0x2db2] = 0xfe88, [0x2db3] = 0xfe89, [0x2db4] = 0xfe8a, [0x2db5] = 0xfe8b, +- [0x2db6] = 0xfe8c, [0x2db7] = 0xfe8d, [0x2db8] = 0xfe8e, [0x2db9] = 0xfe8f, +- [0x2dba] = 0xfe90, [0x2dbb] = 0xfe91, [0x2dbc] = 0xfe92, [0x2dbd] = 0xfe93, +- [0x2dbe] = 0xfe94, [0x2dbf] = 0xfe95, [0x2dc0] = 0xfe96, [0x2dc1] = 0xfe97, +- [0x2dc2] = 0xfe98, [0x2dc3] = 0xfe99, [0x2dc4] = 0xfe9a, [0x2dc5] = 0xfe9b, +- [0x2dc6] = 0xfe9c, [0x2dc7] = 0xfe9d, [0x2dc8] = 0xfe9e, [0x2dc9] = 0xfe9f, +- [0x2dca] = 0xfea0, [0x2dcb] = 0xfea1, [0x2dcc] = 0xfea2, [0x2dcd] = 0xfea3, +- [0x2dce] = 0xfea4, [0x2dcf] = 0xfea5, [0x2dd0] = 0xfea6, [0x2dd1] = 0xfea7, +- [0x2dd2] = 0xfea8, [0x2dd3] = 0xfea9, [0x2dd4] = 0xfeaa, [0x2dd5] = 0xfeab, +- [0x2dd6] = 0xfeac, [0x2dd7] = 0xfead, [0x2dd8] = 0xfeae, [0x2dd9] = 0xfeaf, +- [0x2dda] = 0xfeb0, [0x2ddb] = 0xfeb1, [0x2ddc] = 0xfeb2, [0x2ddd] = 0xfeb3, +- [0x2dde] = 0xfeb4, [0x2ddf] = 0xfeb5, [0x2de0] = 0xfeb6, [0x2de1] = 0xfeb7, +- [0x2de2] = 0xfeb8, [0x2de3] = 0xfeb9, [0x2de4] = 0xfeba, [0x2de5] = 0xfebb, +- [0x2de6] = 0xfebc, [0x2de7] = 0xfebd, [0x2de8] = 0xfebe, [0x2de9] = 0xfebf, +- [0x2dea] = 0xfec0, [0x2deb] = 0xfec1, [0x2dec] = 0xfec2, [0x2ded] = 0xfec3, +- [0x2dee] = 0xfec4, [0x2def] = 0xfec5, [0x2df0] = 0xfec6, [0x2df1] = 0xfec7, +- [0x2df2] = 0xfec8, [0x2df3] = 0xfec9, [0x2df4] = 0xfeca, [0x2df5] = 0xfecb, +- [0x2df6] = 0xfecc, [0x2df7] = 0xfecd, [0x2df8] = 0xfece, [0x2df9] = 0xfecf, +- [0x2dfa] = 0xfed0, [0x2dfb] = 0xfed1, [0x2dfc] = 0xfed2, [0x2dfd] = 0xfed3, +- [0x2dfe] = 0xfed4, [0x2dff] = 0xfed5, [0x2e00] = 0xfed6, [0x2e01] = 0xfed7, +- [0x2e02] = 0xfed8, [0x2e03] = 0xfed9, [0x2e04] = 0xfeda, [0x2e05] = 0xfedb, +- [0x2e06] = 0xfedc, [0x2e07] = 0xfedd, [0x2e08] = 0xfede, [0x2e09] = 0xfedf, +- [0x2e0a] = 0xfee0, [0x2e0b] = 0xfee1, [0x2e0c] = 0xfee2, [0x2e0d] = 0xfee3, +- [0x2e0e] = 0xfee4, [0x2e0f] = 0xfee5, [0x2e10] = 0xfee6, [0x2e11] = 0xfee7, +- [0x2e12] = 0xfee8, [0x2e13] = 0xfee9, [0x2e14] = 0xfeea, [0x2e15] = 0xfeeb, +- [0x2e16] = 0xfeec, [0x2e17] = 0xfeed, [0x2e18] = 0xfeee, [0x2e19] = 0xfeef, +- [0x2e1a] = 0xfef0, [0x2e1b] = 0xfef1, [0x2e1c] = 0xfef2, [0x2e1d] = 0xfef3, +- [0x2e1e] = 0xfef4, [0x2e1f] = 0xfef5, [0x2e20] = 0xfef6, [0x2e21] = 0xfef7, +- [0x2e22] = 0xfef8, [0x2e23] = 0xfef9, [0x2e24] = 0xfefa, [0x2e25] = 0xfefb, +- [0x2e26] = 0xfefc, [0x2e27] = 0xfefd, [0x2e28] = 0xfefe, [0x2e29] = 0xfeff, +- [0x2e2a] = 0xff00, [0x2e2b] = 0xff5f, [0x2e2c] = 0xff60, [0x2e2d] = 0xff61, +- [0x2e2e] = 0xff62, [0x2e2f] = 0xff63, [0x2e30] = 0xff64, [0x2e31] = 0xff65, +- [0x2e32] = 0xff66, [0x2e33] = 0xff67, [0x2e34] = 0xff68, [0x2e35] = 0xff69, +- [0x2e36] = 0xff6a, [0x2e37] = 0xff6b, [0x2e38] = 0xff6c, [0x2e39] = 0xff6d, +- [0x2e3a] = 0xff6e, [0x2e3b] = 0xff6f, [0x2e3c] = 0xff70, [0x2e3d] = 0xff71, +- [0x2e3e] = 0xff72, [0x2e3f] = 0xff73, [0x2e40] = 0xff74, [0x2e41] = 0xff75, +- [0x2e42] = 0xff76, [0x2e43] = 0xff77, [0x2e44] = 0xff78, [0x2e45] = 0xff79, +- [0x2e46] = 0xff7a, [0x2e47] = 0xff7b, [0x2e48] = 0xff7c, [0x2e49] = 0xff7d, +- [0x2e4a] = 0xff7e, [0x2e4b] = 0xff7f, [0x2e4c] = 0xff80, [0x2e4d] = 0xff81, +- [0x2e4e] = 0xff82, [0x2e4f] = 0xff83, [0x2e50] = 0xff84, [0x2e51] = 0xff85, +- [0x2e52] = 0xff86, [0x2e53] = 0xff87, [0x2e54] = 0xff88, [0x2e55] = 0xff89, +- [0x2e56] = 0xff8a, [0x2e57] = 0xff8b, [0x2e58] = 0xff8c, [0x2e59] = 0xff8d, +- [0x2e5a] = 0xff8e, [0x2e5b] = 0xff8f, [0x2e5c] = 0xff90, [0x2e5d] = 0xff91, +- [0x2e5e] = 0xff92, [0x2e5f] = 0xff93, [0x2e60] = 0xff94, [0x2e61] = 0xff95, +- [0x2e62] = 0xff96, [0x2e63] = 0xff97, [0x2e64] = 0xff98, [0x2e65] = 0xff99, +- [0x2e66] = 0xff9a, [0x2e67] = 0xff9b, [0x2e68] = 0xff9c, [0x2e69] = 0xff9d, +- [0x2e6a] = 0xff9e, [0x2e6b] = 0xff9f, [0x2e6c] = 0xffa0, [0x2e6d] = 0xffa1, +- [0x2e6e] = 0xffa2, [0x2e6f] = 0xffa3, [0x2e70] = 0xffa4, [0x2e71] = 0xffa5, +- [0x2e72] = 0xffa6, [0x2e73] = 0xffa7, [0x2e74] = 0xffa8, [0x2e75] = 0xffa9, +- [0x2e76] = 0xffaa, [0x2e77] = 0xffab, [0x2e78] = 0xffac, [0x2e79] = 0xffad, +- [0x2e7a] = 0xffae, [0x2e7b] = 0xffaf, [0x2e7c] = 0xffb0, [0x2e7d] = 0xffb1, +- [0x2e7e] = 0xffb2, [0x2e7f] = 0xffb3, [0x2e80] = 0xffb4, [0x2e81] = 0xffb5, +- [0x2e82] = 0xffb6, [0x2e83] = 0xffb7, [0x2e84] = 0xffb8, [0x2e85] = 0xffb9, +- [0x2e86] = 0xffba, [0x2e87] = 0xffbb, [0x2e88] = 0xffbc, [0x2e89] = 0xffbd, +- [0x2e8a] = 0xffbe, [0x2e8b] = 0xffbf, [0x2e8c] = 0xffc0, [0x2e8d] = 0xffc1, +- [0x2e8e] = 0xffc2, [0x2e8f] = 0xffc3, [0x2e90] = 0xffc4, [0x2e91] = 0xffc5, +- [0x2e92] = 0xffc6, [0x2e93] = 0xffc7, [0x2e94] = 0xffc8, [0x2e95] = 0xffc9, +- [0x2e96] = 0xffca, [0x2e97] = 0xffcb, [0x2e98] = 0xffcc, [0x2e99] = 0xffcd, +- [0x2e9a] = 0xffce, [0x2e9b] = 0xffcf, [0x2e9c] = 0xffd0, [0x2e9d] = 0xffd1, +- [0x2e9e] = 0xffd2, [0x2e9f] = 0xffd3, [0x2ea0] = 0xffd4, [0x2ea1] = 0xffd5, +- [0x2ea2] = 0xffd6, [0x2ea3] = 0xffd7, [0x2ea4] = 0xffd8, [0x2ea5] = 0xffd9, +- [0x2ea6] = 0xffda, [0x2ea7] = 0xffdb, [0x2ea8] = 0xffdc, [0x2ea9] = 0xffdd, +- [0x2eaa] = 0xffde, [0x2eab] = 0xffdf, ++ [0x2d6c] = 0xfe0e, [0x2d6d] = 0xfe0f, [0x2d6e] = 0xe78d, [0x2d6f] = 0xe78f, ++ [0x2d70] = 0xe78e, [0x2d71] = 0xe790, [0x2d72] = 0xe791, [0x2d73] = 0xe792, ++ [0x2d74] = 0xe793, [0x2d75] = 0xe794, [0x2d76] = 0xe795, [0x2d77] = 0xe796, ++ [0x2d78] = 0xfe1a, [0x2d79] = 0xfe1b, [0x2d7a] = 0xfe1c, [0x2d7b] = 0xfe1d, ++ [0x2d7c] = 0xfe1e, [0x2d7d] = 0xfe1f, [0x2d7e] = 0xfe20, [0x2d7f] = 0xfe21, ++ [0x2d80] = 0xfe22, [0x2d81] = 0xfe23, [0x2d82] = 0xfe24, [0x2d83] = 0xfe25, ++ [0x2d84] = 0xfe26, [0x2d85] = 0xfe27, [0x2d86] = 0xfe28, [0x2d87] = 0xfe29, ++ [0x2d88] = 0xfe2a, [0x2d89] = 0xfe2b, [0x2d8a] = 0xfe2c, [0x2d8b] = 0xfe2d, ++ [0x2d8c] = 0xfe2e, [0x2d8d] = 0xfe2f, [0x2d8e] = 0xfe32, [0x2d8f] = 0xfe45, ++ [0x2d90] = 0xfe46, [0x2d91] = 0xfe47, [0x2d92] = 0xfe48, [0x2d93] = 0xfe53, ++ [0x2d94] = 0xfe58, [0x2d95] = 0xfe67, [0x2d96] = 0xfe6c, [0x2d97] = 0xfe6d, ++ [0x2d98] = 0xfe6e, [0x2d99] = 0xfe6f, [0x2d9a] = 0xfe70, [0x2d9b] = 0xfe71, ++ [0x2d9c] = 0xfe72, [0x2d9d] = 0xfe73, [0x2d9e] = 0xfe74, [0x2d9f] = 0xfe75, ++ [0x2da0] = 0xfe76, [0x2da1] = 0xfe77, [0x2da2] = 0xfe78, [0x2da3] = 0xfe79, ++ [0x2da4] = 0xfe7a, [0x2da5] = 0xfe7b, [0x2da6] = 0xfe7c, [0x2da7] = 0xfe7d, ++ [0x2da8] = 0xfe7e, [0x2da9] = 0xfe7f, [0x2daa] = 0xfe80, [0x2dab] = 0xfe81, ++ [0x2dac] = 0xfe82, [0x2dad] = 0xfe83, [0x2dae] = 0xfe84, [0x2daf] = 0xfe85, ++ [0x2db0] = 0xfe86, [0x2db1] = 0xfe87, [0x2db2] = 0xfe88, [0x2db3] = 0xfe89, ++ [0x2db4] = 0xfe8a, [0x2db5] = 0xfe8b, [0x2db6] = 0xfe8c, [0x2db7] = 0xfe8d, ++ [0x2db8] = 0xfe8e, [0x2db9] = 0xfe8f, [0x2dba] = 0xfe90, [0x2dbb] = 0xfe91, ++ [0x2dbc] = 0xfe92, [0x2dbd] = 0xfe93, [0x2dbe] = 0xfe94, [0x2dbf] = 0xfe95, ++ [0x2dc0] = 0xfe96, [0x2dc1] = 0xfe97, [0x2dc2] = 0xfe98, [0x2dc3] = 0xfe99, ++ [0x2dc4] = 0xfe9a, [0x2dc5] = 0xfe9b, [0x2dc6] = 0xfe9c, [0x2dc7] = 0xfe9d, ++ [0x2dc8] = 0xfe9e, [0x2dc9] = 0xfe9f, [0x2dca] = 0xfea0, [0x2dcb] = 0xfea1, ++ [0x2dcc] = 0xfea2, [0x2dcd] = 0xfea3, [0x2dce] = 0xfea4, [0x2dcf] = 0xfea5, ++ [0x2dd0] = 0xfea6, [0x2dd1] = 0xfea7, [0x2dd2] = 0xfea8, [0x2dd3] = 0xfea9, ++ [0x2dd4] = 0xfeaa, [0x2dd5] = 0xfeab, [0x2dd6] = 0xfeac, [0x2dd7] = 0xfead, ++ [0x2dd8] = 0xfeae, [0x2dd9] = 0xfeaf, [0x2dda] = 0xfeb0, [0x2ddb] = 0xfeb1, ++ [0x2ddc] = 0xfeb2, [0x2ddd] = 0xfeb3, [0x2dde] = 0xfeb4, [0x2ddf] = 0xfeb5, ++ [0x2de0] = 0xfeb6, [0x2de1] = 0xfeb7, [0x2de2] = 0xfeb8, [0x2de3] = 0xfeb9, ++ [0x2de4] = 0xfeba, [0x2de5] = 0xfebb, [0x2de6] = 0xfebc, [0x2de7] = 0xfebd, ++ [0x2de8] = 0xfebe, [0x2de9] = 0xfebf, [0x2dea] = 0xfec0, [0x2deb] = 0xfec1, ++ [0x2dec] = 0xfec2, [0x2ded] = 0xfec3, [0x2dee] = 0xfec4, [0x2def] = 0xfec5, ++ [0x2df0] = 0xfec6, [0x2df1] = 0xfec7, [0x2df2] = 0xfec8, [0x2df3] = 0xfec9, ++ [0x2df4] = 0xfeca, [0x2df5] = 0xfecb, [0x2df6] = 0xfecc, [0x2df7] = 0xfecd, ++ [0x2df8] = 0xfece, [0x2df9] = 0xfecf, [0x2dfa] = 0xfed0, [0x2dfb] = 0xfed1, ++ [0x2dfc] = 0xfed2, [0x2dfd] = 0xfed3, [0x2dfe] = 0xfed4, [0x2dff] = 0xfed5, ++ [0x2e00] = 0xfed6, [0x2e01] = 0xfed7, [0x2e02] = 0xfed8, [0x2e03] = 0xfed9, ++ [0x2e04] = 0xfeda, [0x2e05] = 0xfedb, [0x2e06] = 0xfedc, [0x2e07] = 0xfedd, ++ [0x2e08] = 0xfede, [0x2e09] = 0xfedf, [0x2e0a] = 0xfee0, [0x2e0b] = 0xfee1, ++ [0x2e0c] = 0xfee2, [0x2e0d] = 0xfee3, [0x2e0e] = 0xfee4, [0x2e0f] = 0xfee5, ++ [0x2e10] = 0xfee6, [0x2e11] = 0xfee7, [0x2e12] = 0xfee8, [0x2e13] = 0xfee9, ++ [0x2e14] = 0xfeea, [0x2e15] = 0xfeeb, [0x2e16] = 0xfeec, [0x2e17] = 0xfeed, ++ [0x2e18] = 0xfeee, [0x2e19] = 0xfeef, [0x2e1a] = 0xfef0, [0x2e1b] = 0xfef1, ++ [0x2e1c] = 0xfef2, [0x2e1d] = 0xfef3, [0x2e1e] = 0xfef4, [0x2e1f] = 0xfef5, ++ [0x2e20] = 0xfef6, [0x2e21] = 0xfef7, [0x2e22] = 0xfef8, [0x2e23] = 0xfef9, ++ [0x2e24] = 0xfefa, [0x2e25] = 0xfefb, [0x2e26] = 0xfefc, [0x2e27] = 0xfefd, ++ [0x2e28] = 0xfefe, [0x2e29] = 0xfeff, [0x2e2a] = 0xff00, [0x2e2b] = 0xff5f, ++ [0x2e2c] = 0xff60, [0x2e2d] = 0xff61, [0x2e2e] = 0xff62, [0x2e2f] = 0xff63, ++ [0x2e30] = 0xff64, [0x2e31] = 0xff65, [0x2e32] = 0xff66, [0x2e33] = 0xff67, ++ [0x2e34] = 0xff68, [0x2e35] = 0xff69, [0x2e36] = 0xff6a, [0x2e37] = 0xff6b, ++ [0x2e38] = 0xff6c, [0x2e39] = 0xff6d, [0x2e3a] = 0xff6e, [0x2e3b] = 0xff6f, ++ [0x2e3c] = 0xff70, [0x2e3d] = 0xff71, [0x2e3e] = 0xff72, [0x2e3f] = 0xff73, ++ [0x2e40] = 0xff74, [0x2e41] = 0xff75, [0x2e42] = 0xff76, [0x2e43] = 0xff77, ++ [0x2e44] = 0xff78, [0x2e45] = 0xff79, [0x2e46] = 0xff7a, [0x2e47] = 0xff7b, ++ [0x2e48] = 0xff7c, [0x2e49] = 0xff7d, [0x2e4a] = 0xff7e, [0x2e4b] = 0xff7f, ++ [0x2e4c] = 0xff80, [0x2e4d] = 0xff81, [0x2e4e] = 0xff82, [0x2e4f] = 0xff83, ++ [0x2e50] = 0xff84, [0x2e51] = 0xff85, [0x2e52] = 0xff86, [0x2e53] = 0xff87, ++ [0x2e54] = 0xff88, [0x2e55] = 0xff89, [0x2e56] = 0xff8a, [0x2e57] = 0xff8b, ++ [0x2e58] = 0xff8c, [0x2e59] = 0xff8d, [0x2e5a] = 0xff8e, [0x2e5b] = 0xff8f, ++ [0x2e5c] = 0xff90, [0x2e5d] = 0xff91, [0x2e5e] = 0xff92, [0x2e5f] = 0xff93, ++ [0x2e60] = 0xff94, [0x2e61] = 0xff95, [0x2e62] = 0xff96, [0x2e63] = 0xff97, ++ [0x2e64] = 0xff98, [0x2e65] = 0xff99, [0x2e66] = 0xff9a, [0x2e67] = 0xff9b, ++ [0x2e68] = 0xff9c, [0x2e69] = 0xff9d, [0x2e6a] = 0xff9e, [0x2e6b] = 0xff9f, ++ [0x2e6c] = 0xffa0, [0x2e6d] = 0xffa1, [0x2e6e] = 0xffa2, [0x2e6f] = 0xffa3, ++ [0x2e70] = 0xffa4, [0x2e71] = 0xffa5, [0x2e72] = 0xffa6, [0x2e73] = 0xffa7, ++ [0x2e74] = 0xffa8, [0x2e75] = 0xffa9, [0x2e76] = 0xffaa, [0x2e77] = 0xffab, ++ [0x2e78] = 0xffac, [0x2e79] = 0xffad, [0x2e7a] = 0xffae, [0x2e7b] = 0xffaf, ++ [0x2e7c] = 0xffb0, [0x2e7d] = 0xffb1, [0x2e7e] = 0xffb2, [0x2e7f] = 0xffb3, ++ [0x2e80] = 0xffb4, [0x2e81] = 0xffb5, [0x2e82] = 0xffb6, [0x2e83] = 0xffb7, ++ [0x2e84] = 0xffb8, [0x2e85] = 0xffb9, [0x2e86] = 0xffba, [0x2e87] = 0xffbb, ++ [0x2e88] = 0xffbc, [0x2e89] = 0xffbd, [0x2e8a] = 0xffbe, [0x2e8b] = 0xffbf, ++ [0x2e8c] = 0xffc0, [0x2e8d] = 0xffc1, [0x2e8e] = 0xffc2, [0x2e8f] = 0xffc3, ++ [0x2e90] = 0xffc4, [0x2e91] = 0xffc5, [0x2e92] = 0xffc6, [0x2e93] = 0xffc7, ++ [0x2e94] = 0xffc8, [0x2e95] = 0xffc9, [0x2e96] = 0xffca, [0x2e97] = 0xffcb, ++ [0x2e98] = 0xffcc, [0x2e99] = 0xffcd, [0x2e9a] = 0xffce, [0x2e9b] = 0xffcf, ++ [0x2e9c] = 0xffd0, [0x2e9d] = 0xffd1, [0x2e9e] = 0xffd2, [0x2e9f] = 0xffd3, ++ [0x2ea0] = 0xffd4, [0x2ea1] = 0xffd5, [0x2ea2] = 0xffd6, [0x2ea3] = 0xffd7, ++ [0x2ea4] = 0xffd8, [0x2ea5] = 0xffd9, [0x2ea6] = 0xffda, [0x2ea7] = 0xffdb, ++ [0x2ea8] = 0xffdc, [0x2ea9] = 0xffdd, [0x2eaa] = 0xffde, [0x2eab] = 0xffdf, + }; + + /* Table for UCS-4 -> GB18030, for the range U+0080..U+9FBB. +@@ -23437,71 +23442,79 @@ static const unsigned char __ucs_to_gb18030_tab2[][2] = + [0x0783] = "\xa5\xfd", [0x0784] = "\xa5\xfe", [0x0785] = "\xa6\xb9", + [0x0786] = "\xa6\xba", [0x0787] = "\xa6\xbb", [0x0788] = "\xa6\xbc", + [0x0789] = "\xa6\xbd", [0x078a] = "\xa6\xbe", [0x078b] = "\xa6\xbf", +- [0x078c] = "\xa6\xc0", [0x0797] = "\xa6\xf6", [0x0798] = "\xa6\xf7", +- [0x0799] = "\xa6\xf8", [0x079a] = "\xa6\xf9", [0x079b] = "\xa6\xfa", +- [0x079c] = "\xa6\xfb", [0x079d] = "\xa6\xfc", [0x079e] = "\xa6\xfd", +- [0x079f] = "\xa6\xfe", [0x07a0] = "\xa7\xc2", [0x07a1] = "\xa7\xc3", +- [0x07a2] = "\xa7\xc4", [0x07a3] = "\xa7\xc5", [0x07a4] = "\xa7\xc6", +- [0x07a5] = "\xa7\xc7", [0x07a6] = "\xa7\xc8", [0x07a7] = "\xa7\xc9", +- [0x07a8] = "\xa7\xca", [0x07a9] = "\xa7\xcb", [0x07aa] = "\xa7\xcc", +- [0x07ab] = "\xa7\xcd", [0x07ac] = "\xa7\xce", [0x07ad] = "\xa7\xcf", +- [0x07ae] = "\xa7\xd0", [0x07af] = "\xa7\xf2", [0x07b0] = "\xa7\xf3", +- [0x07b1] = "\xa7\xf4", [0x07b2] = "\xa7\xf5", [0x07b3] = "\xa7\xf6", +- [0x07b4] = "\xa7\xf7", [0x07b5] = "\xa7\xf8", [0x07b6] = "\xa7\xf9", +- [0x07b7] = "\xa7\xfa", [0x07b8] = "\xa7\xfb", [0x07b9] = "\xa7\xfc", +- [0x07ba] = "\xa7\xfd", [0x07bb] = "\xa7\xfe", [0x07bc] = "\xa8\x96", +- [0x07bd] = "\xa8\x97", [0x07be] = "\xa8\x98", [0x07bf] = "\xa8\x99", +- [0x07c0] = "\xa8\x9a", [0x07c1] = "\xa8\x9b", [0x07c2] = "\xa8\x9c", +- [0x07c3] = "\xa8\x9d", [0x07c4] = "\xa8\x9e", [0x07c5] = "\xa8\x9f", +- [0x07c6] = "\xa8\xa0", [0x07c7] = "\x00\x01", [0x07c8] = "\x65\x9e", +- [0x07c9] = "\xa8\xc1", [0x07ca] = "\xa8\xc2", [0x07cb] = "\xa8\xc3", +- [0x07cc] = "\xa8\xc4", [0x07cd] = "\xa8\xea", [0x07ce] = "\xa8\xeb", +- [0x07cf] = "\xa8\xec", [0x07d0] = "\xa8\xed", [0x07d1] = "\xa8\xee", +- [0x07d2] = "\xa8\xef", [0x07d3] = "\xa8\xf0", [0x07d4] = "\xa8\xf1", +- [0x07d5] = "\xa8\xf2", [0x07d6] = "\xa8\xf3", [0x07d7] = "\xa8\xf4", +- [0x07d8] = "\xa8\xf5", [0x07d9] = "\xa8\xf6", [0x07da] = "\xa8\xf7", +- [0x07db] = "\xa8\xf8", [0x07dc] = "\xa8\xf9", [0x07dd] = "\xa8\xfa", +- [0x07de] = "\xa8\xfb", [0x07df] = "\xa8\xfc", [0x07e0] = "\xa8\xfd", +- [0x07e1] = "\xa8\xfe", [0x07e2] = "\xa9\x58", [0x07e3] = "\xa9\x5b", +- [0x07e4] = "\xa9\x5d", [0x07e5] = "\xa9\x5e", [0x07e6] = "\xa9\x5f", +- [0x07e7] = "\x65\x9f", [0x07e8] = "\x65\xa0", [0x07e9] = "\x65\xa1", +- [0x07ea] = "\x65\xa2", [0x07eb] = "\x65\xa3", [0x07ec] = "\x65\xa4", +- [0x07ed] = "\x65\xa5", [0x07ee] = "\x65\xa6", [0x07ef] = "\x65\xa7", +- [0x07f0] = "\x65\xa8", [0x07f1] = "\x65\xa9", [0x07f2] = "\x65\xaa", +- [0x07f3] = "\x65\xab", [0x07f4] = "\xa9\x97", [0x07f5] = "\xa9\x98", +- [0x07f6] = "\xa9\x99", [0x07f7] = "\xa9\x9a", [0x07f8] = "\xa9\x9b", +- [0x07f9] = "\xa9\x9c", [0x07fa] = "\xa9\x9d", [0x07fb] = "\xa9\x9e", +- [0x07fc] = "\xa9\x9f", [0x07fd] = "\xa9\xa0", [0x07fe] = "\xa9\xa1", +- [0x07ff] = "\xa9\xa2", [0x0800] = "\xa9\xa3", [0x0801] = "\xa9\xf0", +- [0x0802] = "\xa9\xf1", [0x0803] = "\xa9\xf2", [0x0804] = "\xa9\xf3", +- [0x0805] = "\xa9\xf4", [0x0806] = "\xa9\xf5", [0x0807] = "\xa9\xf6", +- [0x0808] = "\xa9\xf7", [0x0809] = "\xa9\xf8", [0x080a] = "\xa9\xf9", +- [0x080b] = "\xa9\xfa", [0x080c] = "\xa9\xfb", [0x080d] = "\xa9\xfc", +- [0x080e] = "\xa9\xfd", [0x080f] = "\xa9\xfe", [0x0810] = "\xd7\xfa", +- [0x0811] = "\xd7\xfb", [0x0812] = "\xd7\xfc", [0x0813] = "\xd7\xfd", +- [0x0814] = "\xd7\xfe", [0x0815] = "\x65\xac", [0x0819] = "\x65\xad", +- [0x081a] = "\x65\xae", [0x081b] = "\x65\xaf", [0x081c] = "\x65\xb0", +- [0x081d] = "\x65\xb1", [0x081f] = "\x65\xb2", [0x0820] = "\x65\xb3", +- [0x0821] = "\x65\xb4", [0x0822] = "\x65\xb5", [0x0823] = "\x65\xb6", +- [0x0824] = "\x65\xb7", [0x0825] = "\x65\xb8", [0x0827] = "\x65\xb9", ++ [0x078c] = "\xa6\xc0", [0x078d] = "\x7b\x84", [0x078e] = "\x7b\x86", ++ [0x078f] = "\x7b\x85", [0x0790] = "\x7b\x87", [0x0791] = "\x7b\x88", ++ [0x0792] = "\x7b\x89", [0x0793] = "\x7b\x8a", [0x0794] = "\x7b\x8b", ++ [0x0795] = "\x7b\x8c", [0x0796] = "\x7b\x8d", [0x0797] = "\xa6\xf6", ++ [0x0798] = "\xa6\xf7", [0x0799] = "\xa6\xf8", [0x079a] = "\xa6\xf9", ++ [0x079b] = "\xa6\xfa", [0x079c] = "\xa6\xfb", [0x079d] = "\xa6\xfc", ++ [0x079e] = "\xa6\xfd", [0x079f] = "\xa6\xfe", [0x07a0] = "\xa7\xc2", ++ [0x07a1] = "\xa7\xc3", [0x07a2] = "\xa7\xc4", [0x07a3] = "\xa7\xc5", ++ [0x07a4] = "\xa7\xc6", [0x07a5] = "\xa7\xc7", [0x07a6] = "\xa7\xc8", ++ [0x07a7] = "\xa7\xc9", [0x07a8] = "\xa7\xca", [0x07a9] = "\xa7\xcb", ++ [0x07aa] = "\xa7\xcc", [0x07ab] = "\xa7\xcd", [0x07ac] = "\xa7\xce", ++ [0x07ad] = "\xa7\xcf", [0x07ae] = "\xa7\xd0", [0x07af] = "\xa7\xf2", ++ [0x07b0] = "\xa7\xf3", [0x07b1] = "\xa7\xf4", [0x07b2] = "\xa7\xf5", ++ [0x07b3] = "\xa7\xf6", [0x07b4] = "\xa7\xf7", [0x07b5] = "\xa7\xf8", ++ [0x07b6] = "\xa7\xf9", [0x07b7] = "\xa7\xfa", [0x07b8] = "\xa7\xfb", ++ [0x07b9] = "\xa7\xfc", [0x07ba] = "\xa7\xfd", [0x07bb] = "\xa7\xfe", ++ [0x07bc] = "\xa8\x96", [0x07bd] = "\xa8\x97", [0x07be] = "\xa8\x98", ++ [0x07bf] = "\xa8\x99", [0x07c0] = "\xa8\x9a", [0x07c1] = "\xa8\x9b", ++ [0x07c2] = "\xa8\x9c", [0x07c3] = "\xa8\x9d", [0x07c4] = "\xa8\x9e", ++ [0x07c5] = "\xa8\x9f", [0x07c6] = "\xa8\xa0", [0x07c7] = "\x00\x01", ++ [0x07c8] = "\x65\x9e", [0x07c9] = "\xa8\xc1", [0x07ca] = "\xa8\xc2", ++ [0x07cb] = "\xa8\xc3", [0x07cc] = "\xa8\xc4", [0x07cd] = "\xa8\xea", ++ [0x07ce] = "\xa8\xeb", [0x07cf] = "\xa8\xec", [0x07d0] = "\xa8\xed", ++ [0x07d1] = "\xa8\xee", [0x07d2] = "\xa8\xef", [0x07d3] = "\xa8\xf0", ++ [0x07d4] = "\xa8\xf1", [0x07d5] = "\xa8\xf2", [0x07d6] = "\xa8\xf3", ++ [0x07d7] = "\xa8\xf4", [0x07d8] = "\xa8\xf5", [0x07d9] = "\xa8\xf6", ++ [0x07da] = "\xa8\xf7", [0x07db] = "\xa8\xf8", [0x07dc] = "\xa8\xf9", ++ [0x07dd] = "\xa8\xfa", [0x07de] = "\xa8\xfb", [0x07df] = "\xa8\xfc", ++ [0x07e0] = "\xa8\xfd", [0x07e1] = "\xa8\xfe", [0x07e2] = "\xa9\x58", ++ [0x07e3] = "\xa9\x5b", [0x07e4] = "\xa9\x5d", [0x07e5] = "\xa9\x5e", ++ [0x07e6] = "\xa9\x5f", [0x07e7] = "\x65\x9f", [0x07e8] = "\x65\xa0", ++ [0x07e9] = "\x65\xa1", [0x07ea] = "\x65\xa2", [0x07eb] = "\x65\xa3", ++ [0x07ec] = "\x65\xa4", [0x07ed] = "\x65\xa5", [0x07ee] = "\x65\xa6", ++ [0x07ef] = "\x65\xa7", [0x07f0] = "\x65\xa8", [0x07f1] = "\x65\xa9", ++ [0x07f2] = "\x65\xaa", [0x07f3] = "\x65\xab", [0x07f4] = "\xa9\x97", ++ [0x07f5] = "\xa9\x98", [0x07f6] = "\xa9\x99", [0x07f7] = "\xa9\x9a", ++ [0x07f8] = "\xa9\x9b", [0x07f9] = "\xa9\x9c", [0x07fa] = "\xa9\x9d", ++ [0x07fb] = "\xa9\x9e", [0x07fc] = "\xa9\x9f", [0x07fd] = "\xa9\xa0", ++ [0x07fe] = "\xa9\xa1", [0x07ff] = "\xa9\xa2", [0x0800] = "\xa9\xa3", ++ [0x0801] = "\xa9\xf0", [0x0802] = "\xa9\xf1", [0x0803] = "\xa9\xf2", ++ [0x0804] = "\xa9\xf3", [0x0805] = "\xa9\xf4", [0x0806] = "\xa9\xf5", ++ [0x0807] = "\xa9\xf6", [0x0808] = "\xa9\xf7", [0x0809] = "\xa9\xf8", ++ [0x080a] = "\xa9\xf9", [0x080b] = "\xa9\xfa", [0x080c] = "\xa9\xfb", ++ [0x080d] = "\xa9\xfc", [0x080e] = "\xa9\xfd", [0x080f] = "\xa9\xfe", ++ [0x0810] = "\xd7\xfa", [0x0811] = "\xd7\xfb", [0x0812] = "\xd7\xfc", ++ [0x0813] = "\xd7\xfd", [0x0814] = "\xd7\xfe", [0x0815] = "\x65\xac", ++ [0x0816] = "\xfe\x51", [0x0817] = "\xfe\x52", [0x0818] = "\xfe\x53", ++ [0x0819] = "\x65\xad", [0x081a] = "\x65\xae", [0x081b] = "\x65\xaf", ++ [0x081c] = "\x65\xb0", [0x081d] = "\x65\xb1", [0x081e] = "\x2d\x51", ++ [0x081f] = "\x65\xb2", [0x0820] = "\x65\xb3", [0x0821] = "\x65\xb4", ++ [0x0822] = "\x65\xb5", [0x0823] = "\x65\xb6", [0x0824] = "\x65\xb7", ++ [0x0825] = "\x65\xb8", [0x0826] = "\x2d\x52", [0x0827] = "\x65\xb9", + [0x0828] = "\x65\xba", [0x0829] = "\x65\xbb", [0x082a] = "\x65\xbc", +- [0x082d] = "\x65\xbd", [0x082e] = "\x65\xbe", [0x082f] = "\x65\xbf", +- [0x0830] = "\x65\xc0", [0x0833] = "\x65\xc1", [0x0834] = "\x65\xc2", +- [0x0835] = "\x65\xc3", [0x0836] = "\x65\xc4", [0x0837] = "\x65\xc5", +- [0x0838] = "\x65\xc6", [0x0839] = "\x65\xc7", [0x083a] = "\x65\xc8", +- [0x083c] = "\x65\xc9", [0x083d] = "\x65\xca", [0x083e] = "\x65\xcb", +- [0x083f] = "\x65\xcc", [0x0840] = "\x65\xcd", [0x0841] = "\x65\xce", +- [0x0842] = "\x65\xcf", [0x0844] = "\x65\xd0", [0x0845] = "\x65\xd1", ++ [0x082b] = "\x2d\x53", [0x082c] = "\x2d\x54", [0x082d] = "\x65\xbd", ++ [0x082e] = "\x65\xbe", [0x082f] = "\x65\xbf", [0x0830] = "\x65\xc0", ++ [0x0831] = "\xfe\x6c", [0x0832] = "\x2d\x55", [0x0833] = "\x65\xc1", ++ [0x0834] = "\x65\xc2", [0x0835] = "\x65\xc3", [0x0836] = "\x65\xc4", ++ [0x0837] = "\x65\xc5", [0x0838] = "\x65\xc6", [0x0839] = "\x65\xc7", ++ [0x083a] = "\x65\xc8", [0x083b] = "\xfe\x76", [0x083c] = "\x65\xc9", ++ [0x083d] = "\x65\xca", [0x083e] = "\x65\xcb", [0x083f] = "\x65\xcc", ++ [0x0840] = "\x65\xcd", [0x0841] = "\x65\xce", [0x0842] = "\x65\xcf", ++ [0x0843] = "\x2d\x56", [0x0844] = "\x65\xd0", [0x0845] = "\x65\xd1", + [0x0846] = "\x65\xd2", [0x0847] = "\x65\xd3", [0x0848] = "\x65\xd4", + [0x0849] = "\x65\xd5", [0x084a] = "\x65\xd6", [0x084b] = "\x65\xd7", + [0x084c] = "\x65\xd8", [0x084d] = "\x65\xd9", [0x084e] = "\x65\xda", + [0x084f] = "\x65\xdb", [0x0850] = "\x65\xdc", [0x0851] = "\x65\xdd", +- [0x0852] = "\x65\xde", [0x0853] = "\x65\xdf", [0x0856] = "\x65\xe0", +- [0x0857] = "\x65\xe1", [0x0858] = "\x65\xe2", [0x0859] = "\x65\xe3", +- [0x085a] = "\x65\xe4", [0x085b] = "\x65\xe5", [0x085c] = "\x65\xe6", +- [0x085d] = "\x65\xe7", [0x085e] = "\x65\xe8", [0x085f] = "\x65\xe9", +- [0x0860] = "\x65\xea", [0x0861] = "\x65\xeb", [0x0862] = "\x65\xec", +- [0x0863] = "\x65\xed", [0x0865] = "\xfd\x9c", [0x0866] = "\x76\xb5", ++ [0x0852] = "\x65\xde", [0x0853] = "\x65\xdf", [0x0854] = "\x2d\x57", ++ [0x0855] = "\xfe\x91", [0x0856] = "\x65\xe0", [0x0857] = "\x65\xe1", ++ [0x0858] = "\x65\xe2", [0x0859] = "\x65\xe3", [0x085a] = "\x65\xe4", ++ [0x085b] = "\x65\xe5", [0x085c] = "\x65\xe6", [0x085d] = "\x65\xe7", ++ [0x085e] = "\x65\xe8", [0x085f] = "\x65\xe9", [0x0860] = "\x65\xea", ++ [0x0861] = "\x65\xeb", [0x0862] = "\x65\xec", [0x0863] = "\x65\xed", ++ [0x0864] = "\x2d\x58", [0x0865] = "\xfd\x9c", [0x0866] = "\x76\xb5", + [0x0867] = "\x76\xb6", [0x0868] = "\x76\xb7", [0x0869] = "\x76\xb8", + [0x086a] = "\x76\xb9", [0x086b] = "\x76\xba", [0x086c] = "\x76\xbb", + [0x086d] = "\x76\xbc", [0x086e] = "\x76\xbd", [0x086f] = "\x76\xbe", +@@ -24211,24 +24224,8 @@ static const unsigned char __ucs_to_gb18030_tab2[][2] = + || (ch = __twobyte_to_ucs[idx], \ + ch == 0 && *inptr != '\0')) \ + { \ +- /* Handle a few special cases. */ \ +- if (idx == 0x5dd1) \ +- ch = 0x20087; \ +- else if (idx == 0x5dd2) \ +- ch = 0x20089; \ +- else if (idx == 0x5dd3) \ +- ch = 0x200cc; \ +- else if (idx == 0x5dec) \ +- ch = 0x215D7; \ +- else if (idx == 0x5df6) \ +- ch = 0x2298F; \ +- else if (idx == 0x5e11) \ +- ch = 0x241FE; \ +- else \ +- { \ +- /* This is an illegal character. */ \ +- STANDARD_FROM_LOOP_ERR_HANDLER (2); \ +- } \ ++ /* This is an illegal character. */ \ ++ STANDARD_FROM_LOOP_ERR_HANDLER (2); \ + } \ + \ + inptr += 2; \ +@@ -24320,17 +24317,35 @@ static const unsigned char __ucs_to_gb18030_tab2[][2] = + len = 4; \ + } \ + else if (ch == 0x20087) \ +- cp = (const unsigned char *) "\xfe\x51"; \ ++ { \ ++ idx = 0x3E2CF; \ ++ len = 4; \ ++ } \ + else if (ch == 0x20089) \ +- cp = (const unsigned char *) "\xfe\x52"; \ ++ { \ ++ idx = 0x3E2D1; \ ++ len = 4; \ ++ } \ + else if (ch == 0x200CC) \ +- cp = (const unsigned char *) "\xfe\x53"; \ ++ { \ ++ idx = 0x3E314; \ ++ len = 4; \ ++ } \ + else if (ch == 0x215d7) \ +- cp = (const unsigned char *) "\xfe\x6c"; \ ++ { \ ++ idx = 0x3F81F; \ ++ len = 4; \ ++ } \ + else if (ch == 0x2298F) \ +- cp = (const unsigned char *) "\xfe\x76"; \ ++ { \ ++ idx = 0x40BD7; \ ++ len = 4; \ ++ } \ + else if (ch == 0x241FE) \ +- cp = (const unsigned char *) "\xfe\x91"; \ ++ { \ ++ idx = 0x42446; \ ++ len = 4; \ ++ } \ + else if (ch >= 0x10000 && ch <= 0x10FFFF) \ + { \ + idx = ch + 0x1E248; \ +diff --git a/iconvdata/tst-table-from.c b/iconvdata/tst-table-from.c +index 09aaaf0942..55a7113d8c 100644 +--- a/iconvdata/tst-table-from.c ++++ b/iconvdata/tst-table-from.c +@@ -194,10 +194,9 @@ main (int argc, char *argv[]) + exit (1); + } + +- /* When testing UTF-8 or GB18030, stop at 0x10000, otherwise the output ++ /* When testing UTF-8, stop at 0x10000, otherwise the output + file gets too big. */ +- bmp_only = (strcmp (charset, "UTF-8") == 0 +- || strcmp (charset, "GB18030") == 0); ++ bmp_only = (strcmp (charset, "UTF-8") == 0); + search_depth = (strcmp (charset, "UTF-8") == 0 ? 3 : 4); + + { +diff --git a/iconvdata/tst-table-to.c b/iconvdata/tst-table-to.c +index 4dec4acad1..2b75f0c6e8 100644 +--- a/iconvdata/tst-table-to.c ++++ b/iconvdata/tst-table-to.c +@@ -32,6 +32,7 @@ main (int argc, char *argv[]) + const char *charset; + iconv_t cd; + int bmp_only; ++ int no_tags; + + if (argc != 2) + { +@@ -47,16 +48,19 @@ main (int argc, char *argv[]) + return 1; + } + +- /* When testing UTF-8 or GB18030, stop at 0x10000, otherwise the output ++ /* When testing UTF-8, stop at 0x10000, otherwise the output + file gets too big. */ +- bmp_only = (strcmp (charset, "UTF-8") == 0 ++ bmp_only = (strcmp (charset, "UTF-8") == 0); ++ /* When testing any encoding other than UTF-8 or GB18030, stop at 0xE0000, ++ because the conversion drops Unicode tag characters (range ++ U+E0000..U+E007F). */ ++ no_tags = !(strcmp (charset, "UTF-8") == 0 + || strcmp (charset, "GB18030") == 0); + + { + unsigned int i; + unsigned char buf[10]; +- +- for (i = 0; i < (bmp_only ? 0x10000 : 0x30000); i++) ++ for (i = 0; i < (bmp_only ? 0x10000 : no_tags ? 0xE0000 : 0x110000); i++) + { + unsigned char in[6]; + unsigned int incount = +diff --git a/iconvdata/tst-table.sh b/iconvdata/tst-table.sh +index bc6f542b24..7ba15bbf5c 100755 +--- a/iconvdata/tst-table.sh ++++ b/iconvdata/tst-table.sh +@@ -37,7 +37,8 @@ set -e + < ../localedata/charmaps/${charmap:-$charset} \ + > ${objpfx}tst-${charset}.charmap.table + # When the charset is GB18030, truncate this table because for this encoding, +-# the tst-table-from and tst-table-to programs scan the Unicode BMP only. ++# the charmap contains ranges (<Unnnn>..<Ummmm> notation), which the ++# tst-table-charmap.sh script does not grok. + if test ${charset} = GB18030; then + grep '0x....$' < ${objpfx}tst-${charset}.charmap.table \ + > ${objpfx}tst-${charset}.truncated.table +@@ -73,25 +74,42 @@ diff ${objpfx}tst-${charset}.charmap.table ${objpfx}tst-${charset}.inverse.table + + # Check 1: charmap and iconv forward should be identical, except for + # precomposed characters. +-if test -f ${precomposed}; then +- cat ${objpfx}tst-${charset}.table ${precomposed} | sort | uniq -u \ +- > ${objpfx}tst-${charset}.tmp.table +- cmp -s ${objpfx}tst-${charset}.charmap.table ${objpfx}tst-${charset}.tmp.table || ++{ if test -f ${precomposed}; then ++ cat ${objpfx}tst-${charset}.table ${precomposed} | sort | uniq -u ++ else ++ cat ${objpfx}tst-${charset}.table ++ fi ++} | { if test ${charset} = GB18030; then grep '0x....$'; else cat; fi; } \ ++ > ${objpfx}tst-${charset}.tmp1.table ++cmp -s ${objpfx}tst-${charset}.charmap.table ${objpfx}tst-${charset}.tmp1.table || + exit 1 +-else +- cmp -s ${objpfx}tst-${charset}.charmap.table ${objpfx}tst-${charset}.table || +- exit 1 +-fi + + # Check 2: the difference between the charmap and iconv backward. +-if test -f ${irreversible}; then +- cat ${objpfx}tst-${charset}.charmap.table ${irreversible} | sort | uniq -u \ +- > ${objpfx}tst-${charset}.tmp.table +- cmp -s ${objpfx}tst-${charset}.tmp.table ${objpfx}tst-${charset}.inverse.table || +- exit 1 +-else +- cmp -s ${objpfx}tst-${charset}.charmap.table ${objpfx}tst-${charset}.inverse.table || ++{ if test -f ${irreversible}; then ++ cat ${objpfx}tst-${charset}.charmap.table ${irreversible} | sort | uniq -u ++ else ++ cat ${objpfx}tst-${charset}.charmap.table ++ fi ++} | { if test ${charset} = GB18030; then grep '0x....$'; else cat; fi; } \ ++ > ${objpfx}tst-${charset}.tmp2c.table ++cat ${objpfx}tst-${charset}.inverse.table \ ++ | { if test ${charset} = GB18030; then grep '0x....$'; else cat; fi; } \ ++ > ${objpfx}tst-${charset}.tmp2i.table ++cmp -s ${objpfx}tst-${charset}.tmp2c.table ${objpfx}tst-${charset}.tmp2i.table || + exit 1 ++ ++# Check 3: the difference between iconv forward and iconv backward. This is ++# necessary only for GB18030, because ${objpfx}tst-${charset}.charmap.table ++# is truncated for this encoding (see above). ++if test ${charset} = GB18030; then ++ { if test -f ${irreversible}; then ++ cat ${objpfx}tst-${charset}.table ${irreversible} | sort | uniq -u ++ else ++ cat ${objpfx}tst-${charset}.table ++ fi ++ } > ${objpfx}tst-${charset}.tmp3.table ++ cmp -s ${objpfx}tst-${charset}.tmp3.table ${objpfx}tst-${charset}.inverse.table || ++ exit 1 + fi + + exit 0 +diff --git a/localedata/charmaps/GB18030 b/localedata/charmaps/GB18030 +index ad6728c5bd..fc3b1d2d40 100644 +--- a/localedata/charmaps/GB18030 ++++ b/localedata/charmaps/GB18030 +@@ -57234,32 +57234,16 @@ CHARMAP + <UE78A> /xa6/xbe <Private Use> + <UE78B> /xa6/xbf <Private Use> + <UE78C> /xa6/xc0 <Private Use> +-% The newest GB 18030-2005 standard still uses some private use area +-% code points. Any implementation which has Unicode 4.1 or newer +-% support should not use these PUA code points, and instead should +-% map these entries to their equivalent non-PUA code points. There +-% are 24 idiograms in GB 18030-2005 which have non-PUA equivalents. +-% In glibc we only support roundtrip code points, and so must choose +-% between supporting the old PUA code points, or using the newer +-% non-PUA code points. We choose to use the non-PUA code points to +-% be compatible with ICU's similar choice. In choosing the non-PUA +-% code points we can no longer convert the old PUA code points back +-% to GB-18030-2005 (technically only fixable if we added support +-% for non-roundtrip code points e.g. ICU's "fallback mapping"). +-% The recommendation to use the non-PUA code points, where available, +-% is based on "CJKV Information Processing" 2nd Ed. by Dr. Ken Lunde. +-% +-% These 10 PUA mappings use equivalents from <UFE10> to <UFE19>. +-% <UE78D> /xa6/xd9 <Private Use> +-% <UE78E> /xa6/xda <Private Use> +-% <UE78F> /xa6/xdb <Private Use> +-% <UE790> /xa6/xdc <Private Use> +-% <UE791> /xa6/xdd <Private Use> +-% <UE792> /xa6/xde <Private Use> +-% <UE793> /xa6/xdf <Private Use> +-% <UE794> /xa6/xec <Private Use> +-% <UE795> /xa6/xed <Private Use> +-% <UE796> /xa6/xf3 <Private Use> ++<UE78D> /x84/x31/x82/x36 <Private Use> ++<UE78E> /x84/x31/x82/x38 <Private Use> ++<UE78F> /x84/x31/x82/x37 <Private Use> ++<UE790> /x84/x31/x82/x39 <Private Use> ++<UE791> /x84/x31/x83/x30 <Private Use> ++<UE792> /x84/x31/x83/x31 <Private Use> ++<UE793> /x84/x31/x83/x32 <Private Use> ++<UE794> /x84/x31/x83/x33 <Private Use> ++<UE795> /x84/x31/x83/x34 <Private Use> ++<UE796> /x84/x31/x83/x35 <Private Use> + <UE797> /xa6/xf6 <Private Use> + <UE798> /xa6/xf7 <Private Use> + <UE799> /xa6/xf8 <Private Use> +@@ -57387,17 +57371,15 @@ CHARMAP + <UE813> /xd7/xfd <Private Use> + <UE814> /xd7/xfe <Private Use> + <UE815> /x83/x36/xc9/x34 <Private Use> +-% These 3 PUA mappings use equivalents <U20087>, <U20089> and <U200CC>. +-% <UE816> /xfe/x51 <Private Use> +-% <UE817> /xfe/x52 <Private Use> +-% <UE818> /xfe/x53 <Private Use> ++<UE816> /xfe/x51 <Private Use> ++<UE817> /xfe/x52 <Private Use> ++<UE818> /xfe/x53 <Private Use> + <UE819> /x83/x36/xc9/x35 <Private Use> + <UE81A> /x83/x36/xc9/x36 <Private Use> + <UE81B> /x83/x36/xc9/x37 <Private Use> + <UE81C> /x83/x36/xc9/x38 <Private Use> + <UE81D> /x83/x36/xc9/x39 <Private Use> +-% This 1 PUA mapping uses the equivalent <U9FB4>. +-% <UE81E> /xfe/x59 <Private Use> ++<UE81E> /x82/x35/x90/x37 <Private Use> + <UE81F> /x83/x36/xca/x30 <Private Use> + <UE820> /x83/x36/xca/x31 <Private Use> + <UE821> /x83/x36/xca/x32 <Private Use> +@@ -57405,22 +57387,19 @@ CHARMAP + <UE823> /x83/x36/xca/x34 <Private Use> + <UE824> /x83/x36/xca/x35 <Private Use> + <UE825> /x83/x36/xca/x36 <Private Use> +-% This 1 PUA mapping uses the equivalent <U9FB5>. +-% <UE826> /xfe/x61 <Private Use> ++<UE826> /x82/x35/x90/x38 <Private Use> + <UE827> /x83/x36/xca/x37 <Private Use> + <UE828> /x83/x36/xca/x38 <Private Use> + <UE829> /x83/x36/xca/x39 <Private Use> + <UE82A> /x83/x36/xcb/x30 <Private Use> +-% These 2 PUA mappings use the equivalents <U9FB6> and <U9FB7>. +-% <UE82B> /xfe/x66 <Private Use> +-% <UE82C> /xfe/x67 <Private Use> ++<UE82B> /x82/x35/x90/x39 <Private Use> ++<UE82C> /x82/x35/x91/x30 <Private Use> + <UE82D> /x83/x36/xcb/x31 <Private Use> + <UE82E> /x83/x36/xcb/x32 <Private Use> + <UE82F> /x83/x36/xcb/x33 <Private Use> + <UE830> /x83/x36/xcb/x34 <Private Use> +-% These 2 PUA mappings use the equivalents <U215D7> and <U9FB8>. +-% <UE831> /xfe/x6c <Private Use> +-% <UE832> /xfe/x6d <Private Use> ++<UE831> /xfe/x6c <Private Use> ++<UE832> /x82/x35/x91/x31 <Private Use> + <UE833> /x83/x36/xcb/x35 <Private Use> + <UE834> /x83/x36/xcb/x36 <Private Use> + <UE835> /x83/x36/xcb/x37 <Private Use> +@@ -57429,8 +57408,7 @@ CHARMAP + <UE838> /x83/x36/xcc/x30 <Private Use> + <UE839> /x83/x36/xcc/x31 <Private Use> + <UE83A> /x83/x36/xcc/x32 <Private Use> +-% This 1 PUA mapping uses the equivalent <U2298F>. +-% <UE83B> /xfe/x76 <Private Use> ++<UE83B> /xfe/x76 <Private Use> + <UE83C> /x83/x36/xcc/x33 <Private Use> + <UE83D> /x83/x36/xcc/x34 <Private Use> + <UE83E> /x83/x36/xcc/x35 <Private Use> +@@ -57438,8 +57416,7 @@ CHARMAP + <UE840> /x83/x36/xcc/x37 <Private Use> + <UE841> /x83/x36/xcc/x38 <Private Use> + <UE842> /x83/x36/xcc/x39 <Private Use> +-% This 1 PUA mapping uses the equivalent <U9FB9>. +-% <UE843> /xfe/x7e <Private Use> ++<UE843> /x82/x35/x91/x32 <Private Use> + <UE844> /x83/x36/xcd/x30 <Private Use> + <UE845> /x83/x36/xcd/x31 <Private Use> + <UE846> /x83/x36/xcd/x32 <Private Use> +@@ -57456,9 +57433,8 @@ CHARMAP + <UE851> /x83/x36/xce/x33 <Private Use> + <UE852> /x83/x36/xce/x34 <Private Use> + <UE853> /x83/x36/xce/x35 <Private Use> +-% These 2 PUA mappings use the equivalents <U9FBA> and <U241FE>. +-% <UE854> /xfe/x90 <Private Use> +-% <UE855> /xfe/x91 <Private Use> ++<UE854> /x82/x35/x91/x33 <Private Use> ++<UE855> /xfe/x91 <Private Use> + <UE856> /x83/x36/xce/x36 <Private Use> + <UE857> /x83/x36/xce/x37 <Private Use> + <UE858> /x83/x36/xce/x38 <Private Use> +@@ -57473,8 +57449,7 @@ CHARMAP + <UE861> /x83/x36/xcf/x37 <Private Use> + <UE862> /x83/x36/xcf/x38 <Private Use> + <UE863> /x83/x36/xcf/x39 <Private Use> +-% This 1 PUA mapping uses the equivalent <U9FBB>. +-% <UE864> /xfe/xa0 <Private Use> ++<UE864> /x82/x35/x91/x34 <Private Use> + <UE865> /x83/x36/xd0/x30 <Private Use> + <UE866> /x83/x36/xd0/x31 <Private Use> + <UE867> /x83/x36/xd0/x32 <Private Use> +@@ -70447,19 +70422,14 @@ CHARMAP + <U00020068>..<U00020071> /x95/x32/x8d/x30 <CJK> + <U00020072>..<U0002007B> /x95/x32/x8e/x30 <CJK> + <U0002007C>..<U00020085> /x95/x32/x8f/x30 <CJK> +-<U00020086> /x95/x32/x90/x30 <CJK> +-<U00020087> /xfe/x51 <CJK> +-<U00020088> /x95/x32/x90/x32 <CJK> +-<U00020089> /xfe/x52 <CJK> +-<U0002008A>..<U0002008F> /x95/x32/x90/x34 <CJK> ++<U00020086>..<U0002008F> /x95/x32/x90/x30 <CJK> + <U00020090>..<U00020099> /x95/x32/x91/x30 <CJK> + <U0002009A>..<U000200A3> /x95/x32/x92/x30 <CJK> + <U000200A4>..<U000200AD> /x95/x32/x93/x30 <CJK> + <U000200AE>..<U000200B7> /x95/x32/x94/x30 <CJK> + <U000200B8>..<U000200C1> /x95/x32/x95/x30 <CJK> + <U000200C2>..<U000200CB> /x95/x32/x96/x30 <CJK> +-<U000200CC> /xfe/x53 <CJK> +-<U000200CD>..<U000200D5> /x95/x32/x97/x31 <CJK> ++<U000200CC>..<U000200D5> /x95/x32/x97/x30 <CJK> + <U000200D6>..<U000200DF> /x95/x32/x98/x30 <CJK> + <U000200E0>..<U000200E9> /x95/x32/x99/x30 <CJK> + <U000200EA>..<U000200F3> /x95/x32/x9a/x30 <CJK> +@@ -70998,8 +70968,7 @@ CHARMAP + <U000215BC>..<U000215C5> /x95/x36/xb7/x30 <CJK> + <U000215C6>..<U000215CF> /x95/x36/xb8/x30 <CJK> + <U000215D0>..<U000215D6> /x95/x36/xb9/x30 <CJK> +-<U000215D7> /xfe/x6c <CJK> +-<U000215D8>..<U000215D9> /x95/x36/xb9/x38 <CJK> ++<U000215D7>..<U000215D9> /x95/x36/xb9/x37 <CJK> + <U000215DA>..<U000215E3> /x95/x36/xba/x30 <CJK> + <U000215E4>..<U000215ED> /x95/x36/xbb/x30 <CJK> + <U000215EE>..<U000215F7> /x95/x36/xbc/x30 <CJK> +@@ -71505,8 +71474,7 @@ CHARMAP + <U00022976>..<U0002297F> /x96/x30/xb8/x30 <CJK> + <U00022980>..<U00022989> /x96/x30/xb9/x30 <CJK> + <U0002298A>..<U0002298E> /x96/x30/xba/x30 <CJK> +-<U0002298F> /xfe/x76 <CJK> +-<U00022990>..<U00022993> /x96/x30/xba/x36 <CJK> ++<U0002298F>..<U00022993> /x96/x30/xba/x35 <CJK> + <U00022994>..<U0002299D> /x96/x30/xbb/x30 <CJK> + <U0002299E>..<U000229A7> /x96/x30/xbc/x30 <CJK> + <U000229A8>..<U000229B1> /x96/x30/xbd/x30 <CJK> +@@ -72132,8 +72100,7 @@ CHARMAP + <U000241E0>..<U000241E9> /x96/x35/xb3/x30 <CJK> + <U000241EA>..<U000241F3> /x96/x35/xb4/x30 <CJK> + <U000241F4>..<U000241FD> /x96/x35/xb5/x30 <CJK> +-<U000241FE> /xfe/x91 <CJK> +-<U000241FF>..<U00024207> /x96/x35/xb6/x31 <CJK> ++<U000241FE>..<U00024207> /x96/x35/xb6/x30 <CJK> + <U00024208>..<U00024211> /x96/x35/xb7/x30 <CJK> + <U00024212>..<U0002421B> /x96/x35/xb8/x30 <CJK> + <U0002421C>..<U00024225> /x96/x35/xb9/x30 <CJK> diff --git a/SOURCES/glibc-RHEL-78939-1.patch b/SOURCES/glibc-RHEL-78939-1.patch new file mode 100644 index 0000000000000000000000000000000000000000..932bd01624c529524ce322cf5594a6d791c20f07 --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-1.patch @@ -0,0 +1,447 @@ +commit 1db84775f831a1494993ce9c118deaf9537cc50a +Author: Frank Barrus <frankbarrus_sw@shaggy.cc> +Date: Wed Dec 4 07:55:02 2024 -0500 + + pthreads NPTL: lost wakeup fix 2 + + This fixes the lost wakeup (from a bug in signal stealing) with a change + in the usage of g_signals[] in the condition variable internal state. + It also completely eliminates the concept and handling of signal stealing, + as well as the need for signalers to block to wait for waiters to wake + up every time there is a G1/G2 switch. This greatly reduces the average + and maximum latency for pthread_cond_signal. + + The g_signals[] field now contains a signal count that is relative to + the current g1_start value. Since it is a 32-bit field, and the LSB is + still reserved (though not currently used anymore), it has a 31-bit value + that corresponds to the low 31 bits of the sequence number in g1_start. + (since g1_start also has an LSB flag, this means bits 31:1 in g_signals + correspond to bits 31:1 in g1_start, plus the current signal count) + + By making the signal count relative to g1_start, there is no longer + any ambiguity or A/B/A issue, and thus any checks before blocking, + including the futex call itself, are guaranteed not to block if the G1/G2 + switch occurs, even if the signal count remains the same. This allows + initially safely blocking in G2 until the switch to G1 occurs, and + then transitioning from G1 to a new G1 or G2, and always being able to + distinguish the state change. This removes the race condition and A/B/A + problems that otherwise ocurred if a late (pre-empted) waiter were to + resume just as the futex call attempted to block on g_signal since + otherwise there was no last opportunity to re-check things like whether + the current G1 group was already closed. + + By fixing these issues, the signal stealing code can be eliminated, + since there is no concept of signal stealing anymore. The code to block + for all waiters to exit g_refs can also be removed, since any waiters + that are still in the g_refs region can be guaranteed to safely wake + up and exit. If there are still any left at this time, they are all + sent one final futex wakeup to ensure that they are not blocked any + longer, but there is no need for the signaller to block and wait for + them to wake up and exit the g_refs region. + + The signal count is then effectively "zeroed" but since it is now + relative to g1_start, this is done by advancing it to a new value that + can be observed by any pending blocking waiters. Any late waiters can + always tell the difference, and can thus just cleanly exit if they are + in a stale G1 or G2. They can never steal a signal from the current + G1 if they are not in the current G1, since the signal value that has + to match in the cmpxchg has the low 31 bits of the g1_start value + contained in it, and that's first checked, and then it won't match if + there's a G1/G2 change. + + Note: the 31-bit sequence number used in g_signals is designed to + handle wrap-around when checking the signal count, but if the entire + 31-bit wraparound (2 billion signals) occurs while there is still a + late waiter that has not yet resumed, and it happens to then match + the current g1_start low bits, and the pre-emption occurs after the + normal "closed group" checks (which are 64-bit) but then hits the + futex syscall and signal consuming code, then an A/B/A issue could + still result and cause an incorrect assumption about whether it + should block. This particular scenario seems unlikely in practice. + Note that once awake from the futex, the waiter would notice the + closed group before consuming the signal (since that's still a 64-bit + check that would not be aliased in the wrap-around in g_signals), + so the biggest impact would be blocking on the futex until the next + full wakeup from a G1/G2 switch. + + Signed-off-by: Frank Barrus <frankbarrus_sw@shaggy.cc> + Reviewed-by: Carlos O'Donell <carlos@redhat.com> + +# Conflicts: +# nptl/pthread_cond_common.c (Missing spelling fixes) +# nptl/pthread_cond_wait.c (Likewise) + +diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c +index c35b9ef03afd2c64..b1565b780d175d3a 100644 +--- a/nptl/pthread_cond_common.c ++++ b/nptl/pthread_cond_common.c +@@ -341,7 +341,6 @@ static bool __attribute__ ((unused)) + __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + unsigned int *g1index, int private) + { +- const unsigned int maxspin = 0; + unsigned int g1 = *g1index; + + /* If there is no waiter in G2, we don't do anything. The expression may +@@ -362,84 +361,46 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + * New waiters arriving concurrently with the group switching will all go + into G2 until we atomically make the switch. Waiters existing in G2 + are not affected. +- * Waiters in G1 will be closed out immediately by setting a flag in +- __g_signals, which will prevent waiters from blocking using a futex on +- __g_signals and also notifies them that the group is closed. As a +- result, they will eventually remove their group reference, allowing us +- to close switch group roles. */ +- +- /* First, set the closed flag on __g_signals. This tells waiters that are +- about to wait that they shouldn't do that anymore. This basically +- serves as an advance notificaton of the upcoming change to __g1_start; +- waiters interpret it as if __g1_start was larger than their waiter +- sequence position. This allows us to change __g1_start after waiting +- for all existing waiters with group references to leave, which in turn +- makes recovery after stealing a signal simpler because it then can be +- skipped if __g1_start indicates that the group is closed (otherwise, +- we would have to recover always because waiters don't know how big their +- groups are). Relaxed MO is fine. */ +- atomic_fetch_or_relaxed (cond->__data.__g_signals + g1, 1); +- +- /* Wait until there are no group references anymore. The fetch-or operation +- injects us into the modification order of __g_refs; release MO ensures +- that waiters incrementing __g_refs after our fetch-or see the previous +- changes to __g_signals and to __g1_start that had to happen before we can +- switch this G1 and alias with an older group (we have two groups, so +- aliasing requires switching group roles twice). Note that nobody else +- can have set the wake-request flag, so we do not have to act upon it. +- +- Also note that it is harmless if older waiters or waiters from this G1 +- get a group reference after we have quiesced the group because it will +- remain closed for them either because of the closed flag in __g_signals +- or the later update to __g1_start. New waiters will never arrive here +- but instead continue to go into the still current G2. */ +- unsigned r = atomic_fetch_or_release (cond->__data.__g_refs + g1, 0); +- while ((r >> 1) > 0) +- { +- for (unsigned int spin = maxspin; ((r >> 1) > 0) && (spin > 0); spin--) +- { +- /* TODO Back off. */ +- r = atomic_load_relaxed (cond->__data.__g_refs + g1); +- } +- if ((r >> 1) > 0) +- { +- /* There is still a waiter after spinning. Set the wake-request +- flag and block. Relaxed MO is fine because this is just about +- this futex word. +- +- Update r to include the set wake-request flag so that the upcoming +- futex_wait only blocks if the flag is still set (otherwise, we'd +- violate the basic client-side futex protocol). */ +- r = atomic_fetch_or_relaxed (cond->__data.__g_refs + g1, 1) | 1; +- +- if ((r >> 1) > 0) +- futex_wait_simple (cond->__data.__g_refs + g1, r, private); +- /* Reload here so we eventually see the most recent value even if we +- do not spin. */ +- r = atomic_load_relaxed (cond->__data.__g_refs + g1); +- } +- } +- /* Acquire MO so that we synchronize with the release operation that waiters +- use to decrement __g_refs and thus happen after the waiters we waited +- for. */ +- atomic_thread_fence_acquire (); ++ * Waiters in G1 will be closed out immediately by the advancing of ++ __g_signals to the next "lowseq" (low 31 bits of the new g1_start), ++ which will prevent waiters from blocking using a futex on ++ __g_signals since it provides enough signals for all possible ++ remaining waiters. As a result, they can each consume a signal ++ and they will eventually remove their group reference. */ + + /* Update __g1_start, which finishes closing this group. The value we add + will never be negative because old_orig_size can only be zero when we + switch groups the first time after a condvar was initialized, in which +- case G1 will be at index 1 and we will add a value of 1. See above for +- why this takes place after waiting for quiescence of the group. ++ case G1 will be at index 1 and we will add a value of 1. + Relaxed MO is fine because the change comes with no additional + constraints that others would have to observe. */ + __condvar_add_g1_start_relaxed (cond, + (old_orig_size << 1) + (g1 == 1 ? 1 : - 1)); + +- /* Now reopen the group, thus enabling waiters to again block using the +- futex controlled by __g_signals. Release MO so that observers that see +- no signals (and thus can block) also see the write __g1_start and thus +- that this is now a new group (see __pthread_cond_wait_common for the +- matching acquire MO loads). */ +- atomic_store_release (cond->__data.__g_signals + g1, 0); ++ unsigned int lowseq = ((old_g1_start + old_orig_size) << 1) & ~1U; ++ ++ /* If any waiters still hold group references (and thus could be blocked), ++ then wake them all up now and prevent any running ones from blocking. ++ This is effectively a catch-all for any possible current or future ++ bugs that can allow the group size to reach 0 before all G1 waiters ++ have been awakened or at least given signals to consume, or any ++ other case that can leave blocked (or about to block) older waiters.. */ ++ if ((atomic_fetch_or_release (cond->__data.__g_refs + g1, 0) >> 1) > 0) ++ { ++ /* First advance signals to the end of the group (i.e. enough signals ++ for the entire G1 group) to ensure that waiters which have not ++ yet blocked in the futex will not block. ++ Note that in the vast majority of cases, this should never ++ actually be necessary, since __g_signals will have enough ++ signals for the remaining g_refs waiters. As an optimization, ++ we could check this first before proceeding, although that ++ could still leave the potential for futex lost wakeup bugs ++ if the signal count was non-zero but the futex wakeup ++ was somehow lost. */ ++ atomic_store_release (cond->__data.__g_signals + g1, lowseq); ++ ++ futex_wake (cond->__data.__g_signals + g1, INT_MAX, private); ++ } + + /* At this point, the old G1 is now a valid new G2 (but not in use yet). + No old waiter can neither grab a signal nor acquire a reference without +@@ -451,6 +412,10 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + g1 ^= 1; + *g1index ^= 1; + ++ /* Now advance the new G1 g_signals to the new lowseq, giving it ++ an effective signal count of 0 to start. */ ++ atomic_store_release (cond->__data.__g_signals + g1, lowseq); ++ + /* These values are just observed by signalers, and thus protected by the + lock. */ + unsigned int orig_size = wseq - (old_g1_start + old_orig_size); +diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c +index dc8c511f1a72517a..c34280c6bc9e80fb 100644 +--- a/nptl/pthread_cond_wait.c ++++ b/nptl/pthread_cond_wait.c +@@ -239,9 +239,7 @@ __condvar_cleanup_waiting (void *arg) + signaled), and a reference count. + + The group reference count is used to maintain the number of waiters that +- are using the group's futex. Before a group can change its role, the +- reference count must show that no waiters are using the futex anymore; this +- prevents ABA issues on the futex word. ++ are using the group's futex. + + To represent which intervals in the waiter sequence the groups cover (and + thus also which group slot contains G1 or G2), we use a 64b counter to +@@ -301,11 +299,12 @@ __condvar_cleanup_waiting (void *arg) + last reference. + * Reference count used by waiters concurrently with signalers that have + acquired the condvar-internal lock. +- __g_signals: The number of signals that can still be consumed. ++ __g_signals: The number of signals that can still be consumed, relative to ++ the current g1_start. (i.e. bits 31 to 1 of __g_signals are bits ++ 31 to 1 of g1_start with the signal count added) + * Used as a futex word by waiters. Used concurrently by waiters and + signalers. +- * LSB is true iff this group has been completely signaled (i.e., it is +- closed). ++ * LSB is currently reserved and 0. + __g_size: Waiters remaining in this group (i.e., which have not been + signaled yet. + * Accessed by signalers and waiters that cancel waiting (both do so only +@@ -329,18 +328,6 @@ __condvar_cleanup_waiting (void *arg) + sufficient because if a waiter can see a sufficiently large value, it could + have also consume a signal in the waiters group. + +- Waiters try to grab a signal from __g_signals without holding a reference +- count, which can lead to stealing a signal from a more recent group after +- their own group was already closed. They cannot always detect whether they +- in fact did because they do not know when they stole, but they can +- conservatively add a signal back to the group they stole from; if they +- did so unnecessarily, all that happens is a spurious wake-up. To make this +- even less likely, __g1_start contains the index of the current g2 too, +- which allows waiters to check if there aliasing on the group slots; if +- there wasn't, they didn't steal from the current G1, which means that the +- G1 they stole from must have been already closed and they do not need to +- fix anything. +- + It is essential that the last field in pthread_cond_t is __g_signals[1]: + The previous condvar used a pointer-sized field in pthread_cond_t, so a + PTHREAD_COND_INITIALIZER from that condvar implementation might only +@@ -436,6 +423,9 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + { + while (1) + { ++ uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); ++ unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; ++ + /* Spin-wait first. + Note that spinning first without checking whether a timeout + passed might lead to what looks like a spurious wake-up even +@@ -447,35 +437,45 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + having to compare against the current time seems to be the right + choice from a performance perspective for most use cases. */ + unsigned int spin = maxspin; +- while (signals == 0 && spin > 0) ++ while (spin > 0 && ((int)(signals - lowseq) < 2)) + { + /* Check that we are not spinning on a group that's already + closed. */ +- if (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)) +- goto done; ++ if (seq < (g1_start >> 1)) ++ break; + + /* TODO Back off. */ + + /* Reload signals. See above for MO. */ + signals = atomic_load_acquire (cond->__data.__g_signals + g); ++ g1_start = __condvar_load_g1_start_relaxed (cond); ++ lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; + spin--; + } + +- /* If our group will be closed as indicated by the flag on signals, +- don't bother grabbing a signal. */ +- if (signals & 1) +- goto done; +- +- /* If there is an available signal, don't block. */ +- if (signals != 0) ++ if (seq < (g1_start >> 1)) ++ { ++ /* If the group is closed already, ++ then this waiter originally had enough extra signals to ++ consume, up until the time its group was closed. */ ++ goto done; ++ } ++ ++ /* If there is an available signal, don't block. ++ If __g1_start has advanced at all, then we must be in G1 ++ by now, perhaps in the process of switching back to an older ++ G2, but in either case we're allowed to consume the available ++ signal and should not block anymore. */ ++ if ((int)(signals - lowseq) >= 2) + break; + + /* No signals available after spinning, so prepare to block. + We first acquire a group reference and use acquire MO for that so + that we synchronize with the dummy read-modify-write in + __condvar_quiesce_and_switch_g1 if we read from that. In turn, +- in this case this will make us see the closed flag on __g_signals +- that designates a concurrent attempt to reuse the group's slot. ++ in this case this will make us see the advancement of __g_signals ++ to the upcoming new g1_start that occurs with a concurrent ++ attempt to reuse the group's slot. + We use acquire MO for the __g_signals check to make the + __g1_start check work (see spinning above). + Note that the group reference acquisition will not mask the +@@ -483,15 +483,24 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + an atomic read-modify-write operation and thus extend the release + sequence. */ + atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2); +- if (((atomic_load_acquire (cond->__data.__g_signals + g) & 1) != 0) +- || (seq < (__condvar_load_g1_start_relaxed (cond) >> 1))) ++ signals = atomic_load_acquire (cond->__data.__g_signals + g); ++ g1_start = __condvar_load_g1_start_relaxed (cond); ++ lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; ++ ++ if (seq < (g1_start >> 1)) + { +- /* Our group is closed. Wake up any signalers that might be +- waiting. */ ++ /* group is closed already, so don't block */ + __condvar_dec_grefs (cond, g, private); + goto done; + } + ++ if ((int)(signals - lowseq) >= 2) ++ { ++ /* a signal showed up or G1/G2 switched after we grabbed the refcount */ ++ __condvar_dec_grefs (cond, g, private); ++ break; ++ } ++ + // Now block. + struct _pthread_cleanup_buffer buffer; + struct _condvar_cleanup_buffer cbuffer; +@@ -502,7 +511,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer); + + err = __futex_abstimed_wait_cancelable64 ( +- cond->__data.__g_signals + g, 0, clockid, abstime, private); ++ cond->__data.__g_signals + g, signals, clockid, abstime, private); + + __pthread_cleanup_pop (&buffer, 0); + +@@ -525,6 +534,8 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + signals = atomic_load_acquire (cond->__data.__g_signals + g); + } + ++ if (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)) ++ goto done; + } + /* Try to grab a signal. Use acquire MO so that we see an up-to-date value + of __g1_start below (see spinning above for a similar case). In +@@ -533,69 +544,6 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + while (!atomic_compare_exchange_weak_acquire (cond->__data.__g_signals + g, + &signals, signals - 2)); + +- /* We consumed a signal but we could have consumed from a more recent group +- that aliased with ours due to being in the same group slot. If this +- might be the case our group must be closed as visible through +- __g1_start. */ +- uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); +- if (seq < (g1_start >> 1)) +- { +- /* We potentially stole a signal from a more recent group but we do not +- know which group we really consumed from. +- We do not care about groups older than current G1 because they are +- closed; we could have stolen from these, but then we just add a +- spurious wake-up for the current groups. +- We will never steal a signal from current G2 that was really intended +- for G2 because G2 never receives signals (until it becomes G1). We +- could have stolen a signal from G2 that was conservatively added by a +- previous waiter that also thought it stole a signal -- but given that +- that signal was added unnecessarily, it's not a problem if we steal +- it. +- Thus, the remaining case is that we could have stolen from the current +- G1, where "current" means the __g1_start value we observed. However, +- if the current G1 does not have the same slot index as we do, we did +- not steal from it and do not need to undo that. This is the reason +- for putting a bit with G2's index into__g1_start as well. */ +- if (((g1_start & 1) ^ 1) == g) +- { +- /* We have to conservatively undo our potential mistake of stealing +- a signal. We can stop trying to do that when the current G1 +- changes because other spinning waiters will notice this too and +- __condvar_quiesce_and_switch_g1 has checked that there are no +- futex waiters anymore before switching G1. +- Relaxed MO is fine for the __g1_start load because we need to +- merely be able to observe this fact and not have to observe +- something else as well. +- ??? Would it help to spin for a little while to see whether the +- current G1 gets closed? This might be worthwhile if the group is +- small or close to being closed. */ +- unsigned int s = atomic_load_relaxed (cond->__data.__g_signals + g); +- while (__condvar_load_g1_start_relaxed (cond) == g1_start) +- { +- /* Try to add a signal. We don't need to acquire the lock +- because at worst we can cause a spurious wake-up. If the +- group is in the process of being closed (LSB is true), this +- has an effect similar to us adding a signal. */ +- if (((s & 1) != 0) +- || atomic_compare_exchange_weak_relaxed +- (cond->__data.__g_signals + g, &s, s + 2)) +- { +- /* If we added a signal, we also need to add a wake-up on +- the futex. We also need to do that if we skipped adding +- a signal because the group is being closed because +- while __condvar_quiesce_and_switch_g1 could have closed +- the group, it might stil be waiting for futex waiters to +- leave (and one of those waiters might be the one we stole +- the signal from, which cause it to block using the +- futex). */ +- futex_wake (cond->__data.__g_signals + g, 1, private); +- break; +- } +- /* TODO Back off. */ +- } +- } +- } +- + done: + + /* Confirm that we have been woken. We do that before acquiring the mutex diff --git a/SOURCES/glibc-RHEL-78939-10.patch b/SOURCES/glibc-RHEL-78939-10.patch new file mode 100644 index 0000000000000000000000000000000000000000..e0481ced4a380f9d058479414420ce607384053f --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-10.patch @@ -0,0 +1,39 @@ +Partial revert of commit c36fc50781995e6758cae2b6927839d0157f213c +to restore the layout of pthread_cond_t and avoid a downstream +rpminspect and abidiff (libabigail tooling) spurious warning +about internal ABI changes. Without this change all RHEL developers +using pthread_cond_t would have to audit and waive the warning. +The alternative is to update the supression lists used in abidiff, +propagate that to the rpminspect service, and wait for that to +complete before doing the update. The more conservative position +is the partial revert of the layout change. + +This is a downstream-only change and is not required upstream. + +diff --git a/sysdeps/nptl/bits/thread-shared-types.h b/sysdeps/nptl/bits/thread-shared-types.h +index 5cd33b765d9689eb..5644472323fe5424 100644 +--- a/sysdeps/nptl/bits/thread-shared-types.h ++++ b/sysdeps/nptl/bits/thread-shared-types.h +@@ -109,7 +109,8 @@ struct __pthread_cond_s + unsigned int __high; + } __g1_start32; + }; +- unsigned int __g_size[2] __LOCK_ALIGNMENT; ++ unsigned int __glibc_unused___g_refs[2] __LOCK_ALIGNMENT; ++ unsigned int __g_size[2]; + unsigned int __g1_orig_size; + unsigned int __wrefs; + unsigned int __g_signals[2]; +diff --git a/sysdeps/nptl/pthread.h b/sysdeps/nptl/pthread.h +index 7ea6001784783371..43146e91c9d9579b 100644 +--- a/sysdeps/nptl/pthread.h ++++ b/sysdeps/nptl/pthread.h +@@ -152,7 +152,7 @@ enum + + + /* Conditional variable handling. */ +-#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, 0, 0, {0, 0} } } ++#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, {0, 0}, 0, 0, {0, 0} } } + + + /* Cleanup buffers */ diff --git a/SOURCES/glibc-RHEL-78939-2.patch b/SOURCES/glibc-RHEL-78939-2.patch new file mode 100644 index 0000000000000000000000000000000000000000..99e7873a3feeaeb7148708d424b5f398164f1a95 --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-2.patch @@ -0,0 +1,133 @@ +commit 0cc973160c23bb67f895bc887dd6942d29f8fee3 +Author: Malte Skarupke <malteskarupke@fastmail.fm> +Date: Wed Dec 4 07:55:22 2024 -0500 + + nptl: Update comments and indentation for new condvar implementation + + Some comments were wrong after the most recent commit. This fixes that. + + Also fixing indentation where it was using spaces instead of tabs. + + Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> + Reviewed-by: Carlos O'Donell <carlos@redhat.com> + +diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c +index b1565b780d175d3a..b355e38fb57862b1 100644 +--- a/nptl/pthread_cond_common.c ++++ b/nptl/pthread_cond_common.c +@@ -361,8 +361,9 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + * New waiters arriving concurrently with the group switching will all go + into G2 until we atomically make the switch. Waiters existing in G2 + are not affected. +- * Waiters in G1 will be closed out immediately by the advancing of +- __g_signals to the next "lowseq" (low 31 bits of the new g1_start), ++ * Waiters in G1 have already received a signal and been woken. If they ++ haven't woken yet, they will be closed out immediately by the advancing ++ of __g_signals to the next "lowseq" (low 31 bits of the new g1_start), + which will prevent waiters from blocking using a futex on + __g_signals since it provides enough signals for all possible + remaining waiters. As a result, they can each consume a signal +diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c +index c34280c6bc9e80fb..7dabcb15d2d818e7 100644 +--- a/nptl/pthread_cond_wait.c ++++ b/nptl/pthread_cond_wait.c +@@ -250,7 +250,7 @@ __condvar_cleanup_waiting (void *arg) + figure out whether they are in a group that has already been completely + signaled (i.e., if the current G1 starts at a later position that the + waiter's position). Waiters cannot determine whether they are currently +- in G2 or G1 -- but they do not have too because all they are interested in ++ in G2 or G1 -- but they do not have to because all they are interested in + is whether there are available signals, and they always start in G2 (whose + group slot they know because of the bit in the waiter sequence. Signalers + will simply fill the right group until it is completely signaled and can +@@ -413,7 +413,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + } + + /* Now wait until a signal is available in our group or it is closed. +- Acquire MO so that if we observe a value of zero written after group ++ Acquire MO so that if we observe (signals == lowseq) after group + switching in __condvar_quiesce_and_switch_g1, we synchronize with that + store and will see the prior update of __g1_start done while switching + groups too. */ +@@ -423,8 +423,8 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + { + while (1) + { +- uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); +- unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; ++ uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); ++ unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; + + /* Spin-wait first. + Note that spinning first without checking whether a timeout +@@ -448,21 +448,21 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + + /* Reload signals. See above for MO. */ + signals = atomic_load_acquire (cond->__data.__g_signals + g); +- g1_start = __condvar_load_g1_start_relaxed (cond); +- lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; ++ g1_start = __condvar_load_g1_start_relaxed (cond); ++ lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; + spin--; + } + +- if (seq < (g1_start >> 1)) ++ if (seq < (g1_start >> 1)) + { +- /* If the group is closed already, ++ /* If the group is closed already, + then this waiter originally had enough extra signals to + consume, up until the time its group was closed. */ + goto done; +- } ++ } + + /* If there is an available signal, don't block. +- If __g1_start has advanced at all, then we must be in G1 ++ If __g1_start has advanced at all, then we must be in G1 + by now, perhaps in the process of switching back to an older + G2, but in either case we're allowed to consume the available + signal and should not block anymore. */ +@@ -484,22 +484,23 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + sequence. */ + atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2); + signals = atomic_load_acquire (cond->__data.__g_signals + g); +- g1_start = __condvar_load_g1_start_relaxed (cond); +- lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; ++ g1_start = __condvar_load_g1_start_relaxed (cond); ++ lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; + +- if (seq < (g1_start >> 1)) ++ if (seq < (g1_start >> 1)) + { +- /* group is closed already, so don't block */ ++ /* group is closed already, so don't block */ + __condvar_dec_grefs (cond, g, private); + goto done; + } + + if ((int)(signals - lowseq) >= 2) + { +- /* a signal showed up or G1/G2 switched after we grabbed the refcount */ ++ /* a signal showed up or G1/G2 switched after we grabbed the ++ refcount */ + __condvar_dec_grefs (cond, g, private); + break; +- } ++ } + + // Now block. + struct _pthread_cleanup_buffer buffer; +@@ -537,10 +538,8 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + if (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)) + goto done; + } +- /* Try to grab a signal. Use acquire MO so that we see an up-to-date value +- of __g1_start below (see spinning above for a similar case). In +- particular, if we steal from a more recent group, we will also see a +- more recent __g1_start below. */ ++ /* Try to grab a signal. See above for MO. (if we do another loop ++ iteration we need to see the correct value of g1_start) */ + while (!atomic_compare_exchange_weak_acquire (cond->__data.__g_signals + g, + &signals, signals - 2)); + diff --git a/SOURCES/glibc-RHEL-78939-3.patch b/SOURCES/glibc-RHEL-78939-3.patch new file mode 100644 index 0000000000000000000000000000000000000000..00456d21de18322b2551ea28a0b9ed710888334c --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-3.patch @@ -0,0 +1,67 @@ +commit b42cc6af11062c260c7dfa91f1c89891366fed3e +Author: Malte Skarupke <malteskarupke@fastmail.fm> +Date: Wed Dec 4 07:55:50 2024 -0500 + + nptl: Remove unnecessary catch-all-wake in condvar group switch + + This wake is unnecessary. We only switch groups after every sleeper in a group + has been woken. Sure, they may take a while to actually wake up and may still + hold a reference, but waking them a second time doesn't speed that up. Instead + this just makes the code more complicated and may hide problems. + + In particular this safety wake wouldn't even have helped with the bug that was + fixed by Barrus' patch: The bug there was that pthread_cond_signal would not + switch g1 when it should, so we wouldn't even have entered this code path. + + Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> + Reviewed-by: Carlos O'Donell <carlos@redhat.com> + +diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c +index b355e38fb57862b1..517ad52077829552 100644 +--- a/nptl/pthread_cond_common.c ++++ b/nptl/pthread_cond_common.c +@@ -361,13 +361,7 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + * New waiters arriving concurrently with the group switching will all go + into G2 until we atomically make the switch. Waiters existing in G2 + are not affected. +- * Waiters in G1 have already received a signal and been woken. If they +- haven't woken yet, they will be closed out immediately by the advancing +- of __g_signals to the next "lowseq" (low 31 bits of the new g1_start), +- which will prevent waiters from blocking using a futex on +- __g_signals since it provides enough signals for all possible +- remaining waiters. As a result, they can each consume a signal +- and they will eventually remove their group reference. */ ++ * Waiters in G1 have already received a signal and been woken. */ + + /* Update __g1_start, which finishes closing this group. The value we add + will never be negative because old_orig_size can only be zero when we +@@ -380,29 +374,6 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + + unsigned int lowseq = ((old_g1_start + old_orig_size) << 1) & ~1U; + +- /* If any waiters still hold group references (and thus could be blocked), +- then wake them all up now and prevent any running ones from blocking. +- This is effectively a catch-all for any possible current or future +- bugs that can allow the group size to reach 0 before all G1 waiters +- have been awakened or at least given signals to consume, or any +- other case that can leave blocked (or about to block) older waiters.. */ +- if ((atomic_fetch_or_release (cond->__data.__g_refs + g1, 0) >> 1) > 0) +- { +- /* First advance signals to the end of the group (i.e. enough signals +- for the entire G1 group) to ensure that waiters which have not +- yet blocked in the futex will not block. +- Note that in the vast majority of cases, this should never +- actually be necessary, since __g_signals will have enough +- signals for the remaining g_refs waiters. As an optimization, +- we could check this first before proceeding, although that +- could still leave the potential for futex lost wakeup bugs +- if the signal count was non-zero but the futex wakeup +- was somehow lost. */ +- atomic_store_release (cond->__data.__g_signals + g1, lowseq); +- +- futex_wake (cond->__data.__g_signals + g1, INT_MAX, private); +- } +- + /* At this point, the old G1 is now a valid new G2 (but not in use yet). + No old waiter can neither grab a signal nor acquire a reference without + noticing that __g1_start is larger. diff --git a/SOURCES/glibc-RHEL-78939-4.patch b/SOURCES/glibc-RHEL-78939-4.patch new file mode 100644 index 0000000000000000000000000000000000000000..486903c9a65c9eca33e45c309d1225db0a81d985 --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-4.patch @@ -0,0 +1,107 @@ +commit 4f7b051f8ee3feff1b53b27a906f245afaa9cee1 +Author: Malte Skarupke <malteskarupke@fastmail.fm> +Date: Wed Dec 4 07:56:13 2024 -0500 + + nptl: Remove unnecessary quadruple check in pthread_cond_wait + + pthread_cond_wait was checking whether it was in a closed group no less than + four times. Checking once is enough. Here are the four checks: + + 1. While spin-waiting. This was dead code: maxspin is set to 0 and has been + for years. + 2. Before deciding to go to sleep, and before incrementing grefs: I kept this + 3. After incrementing grefs. There is no reason to think that the group would + close while we do an atomic increment. Obviously it could close at any + point, but that doesn't mean we have to recheck after every step. This + check was equally good as check 2, except it has to do more work. + 4. When we find ourselves in a group that has a signal. We only get here after + we check that we're not in a closed group. There is no need to check again. + The check would only have helped in cases where the compare_exchange in the + next line would also have failed. Relying on the compare_exchange is fine. + + Removing the duplicate checks clarifies the code. + + Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> + Reviewed-by: Carlos O'Donell <carlos@redhat.com> + +diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c +index 7dabcb15d2d818e7..ba9a19bedc2c176f 100644 +--- a/nptl/pthread_cond_wait.c ++++ b/nptl/pthread_cond_wait.c +@@ -367,7 +367,6 @@ static __always_inline int + __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + clockid_t clockid, const struct __timespec64 *abstime) + { +- const int maxspin = 0; + int err; + int result = 0; + +@@ -426,33 +425,6 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); + unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; + +- /* Spin-wait first. +- Note that spinning first without checking whether a timeout +- passed might lead to what looks like a spurious wake-up even +- though we should return ETIMEDOUT (e.g., if the caller provides +- an absolute timeout that is clearly in the past). However, +- (1) spurious wake-ups are allowed, (2) it seems unlikely that a +- user will (ab)use pthread_cond_wait as a check for whether a +- point in time is in the past, and (3) spinning first without +- having to compare against the current time seems to be the right +- choice from a performance perspective for most use cases. */ +- unsigned int spin = maxspin; +- while (spin > 0 && ((int)(signals - lowseq) < 2)) +- { +- /* Check that we are not spinning on a group that's already +- closed. */ +- if (seq < (g1_start >> 1)) +- break; +- +- /* TODO Back off. */ +- +- /* Reload signals. See above for MO. */ +- signals = atomic_load_acquire (cond->__data.__g_signals + g); +- g1_start = __condvar_load_g1_start_relaxed (cond); +- lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; +- spin--; +- } +- + if (seq < (g1_start >> 1)) + { + /* If the group is closed already, +@@ -483,24 +455,6 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + an atomic read-modify-write operation and thus extend the release + sequence. */ + atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2); +- signals = atomic_load_acquire (cond->__data.__g_signals + g); +- g1_start = __condvar_load_g1_start_relaxed (cond); +- lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; +- +- if (seq < (g1_start >> 1)) +- { +- /* group is closed already, so don't block */ +- __condvar_dec_grefs (cond, g, private); +- goto done; +- } +- +- if ((int)(signals - lowseq) >= 2) +- { +- /* a signal showed up or G1/G2 switched after we grabbed the +- refcount */ +- __condvar_dec_grefs (cond, g, private); +- break; +- } + + // Now block. + struct _pthread_cleanup_buffer buffer; +@@ -534,9 +488,6 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + /* Reload signals. See above for MO. */ + signals = atomic_load_acquire (cond->__data.__g_signals + g); + } +- +- if (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)) +- goto done; + } + /* Try to grab a signal. See above for MO. (if we do another loop + iteration we need to see the correct value of g1_start) */ diff --git a/SOURCES/glibc-RHEL-78939-5.patch b/SOURCES/glibc-RHEL-78939-5.patch new file mode 100644 index 0000000000000000000000000000000000000000..3df4fa83e4e4c3006773d27779d4447250067716 --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-5.patch @@ -0,0 +1,172 @@ +commit c36fc50781995e6758cae2b6927839d0157f213c +Author: Malte Skarupke <malteskarupke@fastmail.fm> +Date: Wed Dec 4 07:56:38 2024 -0500 + + nptl: Remove g_refs from condition variables + + This variable used to be needed to wait in group switching until all sleepers + have confirmed that they have woken. This is no longer needed. Nothing waits + on this variable so there is no need to track how many threads are currently + asleep in each group. + + Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> + Reviewed-by: Carlos O'Donell <carlos@redhat.com> + +# Conflicts: +# nptl/tst-cond22.c (No atomic wide counter refactor) +# sysdeps/nptl/bits/thread-shared-types.h (Likewise) + +diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c +index ba9a19bedc2c176f..9652dbafe08dfde1 100644 +--- a/nptl/pthread_cond_wait.c ++++ b/nptl/pthread_cond_wait.c +@@ -144,23 +144,6 @@ __condvar_cancel_waiting (pthread_cond_t *cond, uint64_t seq, unsigned int g, + } + } + +-/* Wake up any signalers that might be waiting. */ +-static void +-__condvar_dec_grefs (pthread_cond_t *cond, unsigned int g, int private) +-{ +- /* Release MO to synchronize-with the acquire load in +- __condvar_quiesce_and_switch_g1. */ +- if (atomic_fetch_add_release (cond->__data.__g_refs + g, -2) == 3) +- { +- /* Clear the wake-up request flag before waking up. We do not need more +- than relaxed MO and it doesn't matter if we apply this for an aliased +- group because we wake all futex waiters right after clearing the +- flag. */ +- atomic_fetch_and_relaxed (cond->__data.__g_refs + g, ~(unsigned int) 1); +- futex_wake (cond->__data.__g_refs + g, INT_MAX, private); +- } +-} +- + /* Clean-up for cancellation of waiters waiting for normal signals. We cancel + our registration as a waiter, confirm we have woken up, and re-acquire the + mutex. */ +@@ -172,8 +155,6 @@ __condvar_cleanup_waiting (void *arg) + pthread_cond_t *cond = cbuffer->cond; + unsigned g = cbuffer->wseq & 1; + +- __condvar_dec_grefs (cond, g, cbuffer->private); +- + __condvar_cancel_waiting (cond, cbuffer->wseq >> 1, g, cbuffer->private); + /* FIXME With the current cancellation implementation, it is possible that + a thread is cancelled after it has returned from a syscall. This could +@@ -328,15 +309,6 @@ __condvar_cleanup_waiting (void *arg) + sufficient because if a waiter can see a sufficiently large value, it could + have also consume a signal in the waiters group. + +- It is essential that the last field in pthread_cond_t is __g_signals[1]: +- The previous condvar used a pointer-sized field in pthread_cond_t, so a +- PTHREAD_COND_INITIALIZER from that condvar implementation might only +- initialize 4 bytes to zero instead of the 8 bytes we need (i.e., 44 bytes +- in total instead of the 48 we need). __g_signals[1] is not accessed before +- the first group switch (G2 starts at index 0), which will set its value to +- zero after a harmless fetch-or whose return value is ignored. This +- effectively completes initialization. +- + + Limitations: + * This condvar isn't designed to allow for more than +@@ -441,21 +413,6 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + if ((int)(signals - lowseq) >= 2) + break; + +- /* No signals available after spinning, so prepare to block. +- We first acquire a group reference and use acquire MO for that so +- that we synchronize with the dummy read-modify-write in +- __condvar_quiesce_and_switch_g1 if we read from that. In turn, +- in this case this will make us see the advancement of __g_signals +- to the upcoming new g1_start that occurs with a concurrent +- attempt to reuse the group's slot. +- We use acquire MO for the __g_signals check to make the +- __g1_start check work (see spinning above). +- Note that the group reference acquisition will not mask the +- release MO when decrementing the reference count because we use +- an atomic read-modify-write operation and thus extend the release +- sequence. */ +- atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2); +- + // Now block. + struct _pthread_cleanup_buffer buffer; + struct _condvar_cleanup_buffer cbuffer; +@@ -472,18 +429,11 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + + if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW)) + { +- __condvar_dec_grefs (cond, g, private); +- /* If we timed out, we effectively cancel waiting. Note that +- we have decremented __g_refs before cancellation, so that a +- deadlock between waiting for quiescence of our group in +- __condvar_quiesce_and_switch_g1 and us trying to acquire +- the lock during cancellation is not possible. */ ++ /* If we timed out, we effectively cancel waiting. */ + __condvar_cancel_waiting (cond, seq, g, private); + result = err; + goto done; + } +- else +- __condvar_dec_grefs (cond, g, private); + + /* Reload signals. See above for MO. */ + signals = atomic_load_acquire (cond->__data.__g_signals + g); +diff --git a/nptl/tst-cond22.c b/nptl/tst-cond22.c +index 64f19ea0a55af057..ebeeeaf666070076 100644 +--- a/nptl/tst-cond22.c ++++ b/nptl/tst-cond22.c +@@ -106,10 +106,10 @@ do_test (void) + status = 1; + } + +- printf ("cond = { %llu, %llu, %u/%u/%u, %u/%u/%u, %u, %u }\n", ++ printf ("cond = { %llu, %llu, %u/%u, %u/%u, %u, %u }\n", + c.__data.__wseq, c.__data.__g1_start, +- c.__data.__g_signals[0], c.__data.__g_refs[0], c.__data.__g_size[0], +- c.__data.__g_signals[1], c.__data.__g_refs[1], c.__data.__g_size[1], ++ c.__data.__g_signals[0], c.__data.__g_size[0], ++ c.__data.__g_signals[1], c.__data.__g_size[1], + c.__data.__g1_orig_size, c.__data.__wrefs); + + if (pthread_create (&th, NULL, tf, (void *) 1l) != 0) +@@ -149,10 +149,10 @@ do_test (void) + status = 1; + } + +- printf ("cond = { %llu, %llu, %u/%u/%u, %u/%u/%u, %u, %u }\n", ++ printf ("cond = { %llu, %llu, %u/%u, %u/%u, %u, %u }\n", + c.__data.__wseq, c.__data.__g1_start, +- c.__data.__g_signals[0], c.__data.__g_refs[0], c.__data.__g_size[0], +- c.__data.__g_signals[1], c.__data.__g_refs[1], c.__data.__g_size[1], ++ c.__data.__g_signals[0], c.__data.__g_size[0], ++ c.__data.__g_signals[1], c.__data.__g_size[1], + c.__data.__g1_orig_size, c.__data.__wrefs); + + return status; +diff --git a/sysdeps/nptl/bits/thread-shared-types.h b/sysdeps/nptl/bits/thread-shared-types.h +index 44bf1e358dbdaaff..5cd33b765d9689eb 100644 +--- a/sysdeps/nptl/bits/thread-shared-types.h ++++ b/sysdeps/nptl/bits/thread-shared-types.h +@@ -109,8 +109,7 @@ struct __pthread_cond_s + unsigned int __high; + } __g1_start32; + }; +- unsigned int __g_refs[2] __LOCK_ALIGNMENT; +- unsigned int __g_size[2]; ++ unsigned int __g_size[2] __LOCK_ALIGNMENT; + unsigned int __g1_orig_size; + unsigned int __wrefs; + unsigned int __g_signals[2]; +diff --git a/sysdeps/nptl/pthread.h b/sysdeps/nptl/pthread.h +index 43146e91c9d9579b..7ea6001784783371 100644 +--- a/sysdeps/nptl/pthread.h ++++ b/sysdeps/nptl/pthread.h +@@ -152,7 +152,7 @@ enum + + + /* Conditional variable handling. */ +-#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, {0, 0}, 0, 0, {0, 0} } } ++#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, 0, 0, {0, 0} } } + + + /* Cleanup buffers */ diff --git a/SOURCES/glibc-RHEL-78939-6.patch b/SOURCES/glibc-RHEL-78939-6.patch new file mode 100644 index 0000000000000000000000000000000000000000..7e966e5d2e569317dee6ccd1fd7860519a8bb94c --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-6.patch @@ -0,0 +1,91 @@ +commit 929a4764ac90382616b6a21f099192b2475da674 +Author: Malte Skarupke <malteskarupke@fastmail.fm> +Date: Wed Dec 4 08:03:44 2024 -0500 + + nptl: Use a single loop in pthread_cond_wait instaed of a nested loop + + The loop was a little more complicated than necessary. There was only one + break statement out of the inner loop, and the outer loop was nearly empty. + So just remove the outer loop, moving its code to the one break statement in + the inner loop. This allows us to replace all gotos with break statements. + + Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> + Reviewed-by: Carlos O'Donell <carlos@redhat.com> + +diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c +index 9652dbafe08dfde1..4886056d136db138 100644 +--- a/nptl/pthread_cond_wait.c ++++ b/nptl/pthread_cond_wait.c +@@ -383,17 +383,15 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + return err; + } + +- /* Now wait until a signal is available in our group or it is closed. +- Acquire MO so that if we observe (signals == lowseq) after group +- switching in __condvar_quiesce_and_switch_g1, we synchronize with that +- store and will see the prior update of __g1_start done while switching +- groups too. */ +- unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g); +- +- do +- { ++ + while (1) + { ++ /* Now wait until a signal is available in our group or it is closed. ++ Acquire MO so that if we observe (signals == lowseq) after group ++ switching in __condvar_quiesce_and_switch_g1, we synchronize with that ++ store and will see the prior update of __g1_start done while switching ++ groups too. */ ++ unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g); + uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); + unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; + +@@ -402,7 +400,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + /* If the group is closed already, + then this waiter originally had enough extra signals to + consume, up until the time its group was closed. */ +- goto done; ++ break; + } + + /* If there is an available signal, don't block. +@@ -411,7 +409,16 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + G2, but in either case we're allowed to consume the available + signal and should not block anymore. */ + if ((int)(signals - lowseq) >= 2) +- break; ++ { ++ /* Try to grab a signal. See above for MO. (if we do another loop ++ iteration we need to see the correct value of g1_start) */ ++ if (atomic_compare_exchange_weak_acquire ( ++ cond->__data.__g_signals + g, ++ &signals, signals - 2)) ++ break; ++ else ++ continue; ++ } + + // Now block. + struct _pthread_cleanup_buffer buffer; +@@ -432,19 +439,9 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + /* If we timed out, we effectively cancel waiting. */ + __condvar_cancel_waiting (cond, seq, g, private); + result = err; +- goto done; ++ break; + } +- +- /* Reload signals. See above for MO. */ +- signals = atomic_load_acquire (cond->__data.__g_signals + g); + } +- } +- /* Try to grab a signal. See above for MO. (if we do another loop +- iteration we need to see the correct value of g1_start) */ +- while (!atomic_compare_exchange_weak_acquire (cond->__data.__g_signals + g, +- &signals, signals - 2)); +- +- done: + + /* Confirm that we have been woken. We do that before acquiring the mutex + to allow for execution of pthread_cond_destroy while having acquired the diff --git a/SOURCES/glibc-RHEL-78939-7.patch b/SOURCES/glibc-RHEL-78939-7.patch new file mode 100644 index 0000000000000000000000000000000000000000..87c54f2fe7fd841ffd1f020e0888d62a32fc86a2 --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-7.patch @@ -0,0 +1,138 @@ +commit ee6c14ed59d480720721aaacc5fb03213dc153da +Author: Malte Skarupke <malteskarupke@fastmail.fm> +Date: Wed Dec 4 08:04:10 2024 -0500 + + nptl: Fix indentation + + In my previous change I turned a nested loop into a simple loop. I'm doing + the resulting indentation changes in a separate commit to make the diff on + the previous commit easier to review. + + Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> + Reviewed-by: Carlos O'Donell <carlos@redhat.com> + +diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c +index 4886056d136db138..6c130436b016977a 100644 +--- a/nptl/pthread_cond_wait.c ++++ b/nptl/pthread_cond_wait.c +@@ -384,65 +384,65 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + } + + +- while (1) +- { +- /* Now wait until a signal is available in our group or it is closed. +- Acquire MO so that if we observe (signals == lowseq) after group +- switching in __condvar_quiesce_and_switch_g1, we synchronize with that +- store and will see the prior update of __g1_start done while switching +- groups too. */ +- unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g); +- uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); +- unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; +- +- if (seq < (g1_start >> 1)) +- { +- /* If the group is closed already, +- then this waiter originally had enough extra signals to +- consume, up until the time its group was closed. */ +- break; +- } +- +- /* If there is an available signal, don't block. +- If __g1_start has advanced at all, then we must be in G1 +- by now, perhaps in the process of switching back to an older +- G2, but in either case we're allowed to consume the available +- signal and should not block anymore. */ +- if ((int)(signals - lowseq) >= 2) +- { +- /* Try to grab a signal. See above for MO. (if we do another loop +- iteration we need to see the correct value of g1_start) */ +- if (atomic_compare_exchange_weak_acquire ( +- cond->__data.__g_signals + g, ++ while (1) ++ { ++ /* Now wait until a signal is available in our group or it is closed. ++ Acquire MO so that if we observe (signals == lowseq) after group ++ switching in __condvar_quiesce_and_switch_g1, we synchronize with that ++ store and will see the prior update of __g1_start done while switching ++ groups too. */ ++ unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g); ++ uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); ++ unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; ++ ++ if (seq < (g1_start >> 1)) ++ { ++ /* If the group is closed already, ++ then this waiter originally had enough extra signals to ++ consume, up until the time its group was closed. */ ++ break; ++ } ++ ++ /* If there is an available signal, don't block. ++ If __g1_start has advanced at all, then we must be in G1 ++ by now, perhaps in the process of switching back to an older ++ G2, but in either case we're allowed to consume the available ++ signal and should not block anymore. */ ++ if ((int)(signals - lowseq) >= 2) ++ { ++ /* Try to grab a signal. See above for MO. (if we do another loop ++ iteration we need to see the correct value of g1_start) */ ++ if (atomic_compare_exchange_weak_acquire ( ++ cond->__data.__g_signals + g, + &signals, signals - 2)) +- break; +- else +- continue; +- } +- +- // Now block. +- struct _pthread_cleanup_buffer buffer; +- struct _condvar_cleanup_buffer cbuffer; +- cbuffer.wseq = wseq; +- cbuffer.cond = cond; +- cbuffer.mutex = mutex; +- cbuffer.private = private; +- __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer); +- +- err = __futex_abstimed_wait_cancelable64 ( +- cond->__data.__g_signals + g, signals, clockid, abstime, private); +- +- __pthread_cleanup_pop (&buffer, 0); +- +- if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW)) +- { +- /* If we timed out, we effectively cancel waiting. */ +- __condvar_cancel_waiting (cond, seq, g, private); +- result = err; + break; +- } ++ else ++ continue; + } + ++ // Now block. ++ struct _pthread_cleanup_buffer buffer; ++ struct _condvar_cleanup_buffer cbuffer; ++ cbuffer.wseq = wseq; ++ cbuffer.cond = cond; ++ cbuffer.mutex = mutex; ++ cbuffer.private = private; ++ __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer); ++ ++ err = __futex_abstimed_wait_cancelable64 ( ++ cond->__data.__g_signals + g, signals, clockid, abstime, private); ++ ++ __pthread_cleanup_pop (&buffer, 0); ++ ++ if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW)) ++ { ++ /* If we timed out, we effectively cancel waiting. */ ++ __condvar_cancel_waiting (cond, seq, g, private); ++ result = err; ++ break; ++ } ++ } ++ + /* Confirm that we have been woken. We do that before acquiring the mutex + to allow for execution of pthread_cond_destroy while having acquired the + mutex. */ diff --git a/SOURCES/glibc-RHEL-78939-8.patch b/SOURCES/glibc-RHEL-78939-8.patch new file mode 100644 index 0000000000000000000000000000000000000000..eff1b1a42a0456afec3c74632fa60a5e910c247b --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-8.patch @@ -0,0 +1,147 @@ +commit 4b79e27a5073c02f6bff9aa8f4791230a0ab1867 +Author: Malte Skarupke <malteskarupke@fastmail.fm> +Date: Wed Dec 4 08:04:54 2024 -0500 + + nptl: rename __condvar_quiesce_and_switch_g1 + + This function no longer waits for threads to leave g1, so rename it to + __condvar_switch_g1 + + Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> + Reviewed-by: Carlos O'Donell <carlos@redhat.com> + +diff --git a/nptl/pthread_cond_broadcast.c b/nptl/pthread_cond_broadcast.c +index f1275b2f15817788..3dff819952718892 100644 +--- a/nptl/pthread_cond_broadcast.c ++++ b/nptl/pthread_cond_broadcast.c +@@ -61,7 +61,7 @@ ___pthread_cond_broadcast (pthread_cond_t *cond) + cond->__data.__g_size[g1] << 1); + cond->__data.__g_size[g1] = 0; + +- /* We need to wake G1 waiters before we quiesce G1 below. */ ++ /* We need to wake G1 waiters before we switch G1 below. */ + /* TODO Only set it if there are indeed futex waiters. We could + also try to move this out of the critical section in cases when + G2 is empty (and we don't need to quiesce). */ +@@ -70,7 +70,7 @@ ___pthread_cond_broadcast (pthread_cond_t *cond) + + /* G1 is complete. Step (2) is next unless there are no waiters in G2, in + which case we can stop. */ +- if (__condvar_quiesce_and_switch_g1 (cond, wseq, &g1, private)) ++ if (__condvar_switch_g1 (cond, wseq, &g1, private)) + { + /* Step (3): Send signals to all waiters in the old G2 / new G1. */ + atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, +diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c +index 517ad52077829552..7b2b1e4605f163e7 100644 +--- a/nptl/pthread_cond_common.c ++++ b/nptl/pthread_cond_common.c +@@ -329,16 +329,15 @@ __condvar_get_private (int flags) + return FUTEX_SHARED; + } + +-/* This closes G1 (whose index is in G1INDEX), waits for all futex waiters to +- leave G1, converts G1 into a fresh G2, and then switches group roles so that +- the former G2 becomes the new G1 ending at the current __wseq value when we +- eventually make the switch (WSEQ is just an observation of __wseq by the +- signaler). ++/* This closes G1 (whose index is in G1INDEX), converts G1 into a fresh G2, ++ and then switches group roles so that the former G2 becomes the new G1 ++ ending at the current __wseq value when we eventually make the switch ++ (WSEQ is just an observation of __wseq by the signaler). + If G2 is empty, it will not switch groups because then it would create an + empty G1 which would require switching groups again on the next signal. + Returns false iff groups were not switched because G2 was empty. */ + static bool __attribute__ ((unused)) +-__condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, ++__condvar_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + unsigned int *g1index, int private) + { + unsigned int g1 = *g1index; +@@ -354,8 +353,7 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + + cond->__data.__g_size[g1 ^ 1]) == 0) + return false; + +- /* Now try to close and quiesce G1. We have to consider the following kinds +- of waiters: ++ /* We have to consider the following kinds of waiters: + * Waiters from less recent groups than G1 are not affected because + nothing will change for them apart from __g1_start getting larger. + * New waiters arriving concurrently with the group switching will all go +@@ -363,12 +361,12 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + are not affected. + * Waiters in G1 have already received a signal and been woken. */ + +- /* Update __g1_start, which finishes closing this group. The value we add +- will never be negative because old_orig_size can only be zero when we +- switch groups the first time after a condvar was initialized, in which +- case G1 will be at index 1 and we will add a value of 1. +- Relaxed MO is fine because the change comes with no additional +- constraints that others would have to observe. */ ++ /* Update __g1_start, which closes this group. The value we add will never ++ be negative because old_orig_size can only be zero when we switch groups ++ the first time after a condvar was initialized, in which case G1 will be ++ at index 1 and we will add a value of 1. Relaxed MO is fine because the ++ change comes with no additional constraints that others would have to ++ observe. */ + __condvar_add_g1_start_relaxed (cond, + (old_orig_size << 1) + (g1 == 1 ? 1 : - 1)); + +diff --git a/nptl/pthread_cond_signal.c b/nptl/pthread_cond_signal.c +index 171193b13e203290..4f7639e386fc207a 100644 +--- a/nptl/pthread_cond_signal.c ++++ b/nptl/pthread_cond_signal.c +@@ -70,18 +70,17 @@ ___pthread_cond_signal (pthread_cond_t *cond) + bool do_futex_wake = false; + + /* If G1 is still receiving signals, we put the signal there. If not, we +- check if G2 has waiters, and if so, quiesce and switch G1 to the former +- G2; if this results in a new G1 with waiters (G2 might have cancellations +- already, see __condvar_quiesce_and_switch_g1), we put the signal in the +- new G1. */ ++ check if G2 has waiters, and if so, switch G1 to the former G2; if this ++ results in a new G1 with waiters (G2 might have cancellations already, ++ see __condvar_switch_g1), we put the signal in the new G1. */ + if ((cond->__data.__g_size[g1] != 0) +- || __condvar_quiesce_and_switch_g1 (cond, wseq, &g1, private)) ++ || __condvar_switch_g1 (cond, wseq, &g1, private)) + { + /* Add a signal. Relaxed MO is fine because signaling does not need to +- establish a happens-before relation (see above). We do not mask the +- release-MO store when initializing a group in +- __condvar_quiesce_and_switch_g1 because we use an atomic +- read-modify-write and thus extend that store's release sequence. */ ++ establish a happens-before relation (see above). We do not mask the ++ release-MO store when initializing a group in __condvar_switch_g1 ++ because we use an atomic read-modify-write and thus extend that ++ store's release sequence. */ + atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, 2); + cond->__data.__g_size[g1]--; + /* TODO Only set it if there are indeed futex waiters. */ +diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c +index 6c130436b016977a..173bd134164eed44 100644 +--- a/nptl/pthread_cond_wait.c ++++ b/nptl/pthread_cond_wait.c +@@ -355,8 +355,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + because we do not need to establish any happens-before relation with + signalers (see __pthread_cond_signal); modification order alone + establishes a total order of waiters/signals. We do need acquire MO +- to synchronize with group reinitialization in +- __condvar_quiesce_and_switch_g1. */ ++ to synchronize with group reinitialization in __condvar_switch_g1. */ + uint64_t wseq = __condvar_fetch_add_wseq_acquire (cond, 2); + /* Find our group's index. We always go into what was G2 when we acquired + our position. */ +@@ -388,9 +387,9 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + { + /* Now wait until a signal is available in our group or it is closed. + Acquire MO so that if we observe (signals == lowseq) after group +- switching in __condvar_quiesce_and_switch_g1, we synchronize with that +- store and will see the prior update of __g1_start done while switching +- groups too. */ ++ switching in __condvar_switch_g1, we synchronize with that store and ++ will see the prior update of __g1_start done while switching groups ++ too. */ + unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g); + uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); + unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; diff --git a/SOURCES/glibc-RHEL-78939-9.patch b/SOURCES/glibc-RHEL-78939-9.patch new file mode 100644 index 0000000000000000000000000000000000000000..4f94d4312fb4d6eac7431aac3a33910d226521cb --- /dev/null +++ b/SOURCES/glibc-RHEL-78939-9.patch @@ -0,0 +1,179 @@ +commit 91bb902f58264a2fd50fbce8f39a9a290dd23706 +Author: Malte Skarupke <malteskarupke@fastmail.fm> +Date: Wed Dec 4 08:05:40 2024 -0500 + + nptl: Use all of g1_start and g_signals + + The LSB of g_signals was unused. The LSB of g1_start was used to indicate + which group is G2. This was used to always go to sleep in pthread_cond_wait + if a waiter is in G2. A comment earlier in the file says that this is not + correct to do: + + "Waiters cannot determine whether they are currently in G2 or G1 -- but they + do not have to because all they are interested in is whether there are + available signals" + + I either would have had to update the comment, or get rid of the check. I + chose to get rid of the check. In fact I don't quite know why it was there. + There will never be available signals for group G2, so we didn't need the + special case. Even if there were, this would just be a spurious wake. This + might have caught some cases where the count has wrapped around, but it + wouldn't reliably do that, (and even if it did, why would you want to force a + sleep in that case?) and we don't support that many concurrent waiters + anyway. Getting rid of it allows us to use one more bit, making us more + robust to wraparound. + + Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> + Reviewed-by: Carlos O'Donell <carlos@redhat.com> + +diff --git a/nptl/pthread_cond_broadcast.c b/nptl/pthread_cond_broadcast.c +index 3dff819952718892..6fd6cfe9d002c5d5 100644 +--- a/nptl/pthread_cond_broadcast.c ++++ b/nptl/pthread_cond_broadcast.c +@@ -58,7 +58,7 @@ ___pthread_cond_broadcast (pthread_cond_t *cond) + { + /* Add as many signals as the remaining size of the group. */ + atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, +- cond->__data.__g_size[g1] << 1); ++ cond->__data.__g_size[g1]); + cond->__data.__g_size[g1] = 0; + + /* We need to wake G1 waiters before we switch G1 below. */ +@@ -74,7 +74,7 @@ ___pthread_cond_broadcast (pthread_cond_t *cond) + { + /* Step (3): Send signals to all waiters in the old G2 / new G1. */ + atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, +- cond->__data.__g_size[g1] << 1); ++ cond->__data.__g_size[g1]); + cond->__data.__g_size[g1] = 0; + /* TODO Only set it if there are indeed futex waiters. */ + do_futex_wake = true; +diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c +index 7b2b1e4605f163e7..485aca4076a372d7 100644 +--- a/nptl/pthread_cond_common.c ++++ b/nptl/pthread_cond_common.c +@@ -348,9 +348,9 @@ __condvar_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + behavior. + Note that this works correctly for a zero-initialized condvar too. */ + unsigned int old_orig_size = __condvar_get_orig_size (cond); +- uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond) >> 1; +- if (((unsigned) (wseq - old_g1_start - old_orig_size) +- + cond->__data.__g_size[g1 ^ 1]) == 0) ++ uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond); ++ uint64_t new_g1_start = old_g1_start + old_orig_size; ++ if (((unsigned) (wseq - new_g1_start) + cond->__data.__g_size[g1 ^ 1]) == 0) + return false; + + /* We have to consider the following kinds of waiters: +@@ -361,16 +361,10 @@ __condvar_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + are not affected. + * Waiters in G1 have already received a signal and been woken. */ + +- /* Update __g1_start, which closes this group. The value we add will never +- be negative because old_orig_size can only be zero when we switch groups +- the first time after a condvar was initialized, in which case G1 will be +- at index 1 and we will add a value of 1. Relaxed MO is fine because the +- change comes with no additional constraints that others would have to +- observe. */ +- __condvar_add_g1_start_relaxed (cond, +- (old_orig_size << 1) + (g1 == 1 ? 1 : - 1)); +- +- unsigned int lowseq = ((old_g1_start + old_orig_size) << 1) & ~1U; ++ /* Update __g1_start, which closes this group. Relaxed MO is fine because ++ the change comes with no additional constraints that others would have ++ to observe. */ ++ __condvar_add_g1_start_relaxed (cond, old_orig_size); + + /* At this point, the old G1 is now a valid new G2 (but not in use yet). + No old waiter can neither grab a signal nor acquire a reference without +@@ -382,13 +376,13 @@ __condvar_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + g1 ^= 1; + *g1index ^= 1; + +- /* Now advance the new G1 g_signals to the new lowseq, giving it ++ /* Now advance the new G1 g_signals to the new g1_start, giving it + an effective signal count of 0 to start. */ +- atomic_store_release (cond->__data.__g_signals + g1, lowseq); ++ atomic_store_release (cond->__data.__g_signals + g1, (unsigned)new_g1_start); + + /* These values are just observed by signalers, and thus protected by the + lock. */ +- unsigned int orig_size = wseq - (old_g1_start + old_orig_size); ++ unsigned int orig_size = wseq - new_g1_start; + __condvar_set_orig_size (cond, orig_size); + /* Use and addition to not loose track of cancellations in what was + previously G2. */ +diff --git a/nptl/pthread_cond_signal.c b/nptl/pthread_cond_signal.c +index 4f7639e386fc207a..9a5bac92fe8fc246 100644 +--- a/nptl/pthread_cond_signal.c ++++ b/nptl/pthread_cond_signal.c +@@ -81,7 +81,7 @@ ___pthread_cond_signal (pthread_cond_t *cond) + release-MO store when initializing a group in __condvar_switch_g1 + because we use an atomic read-modify-write and thus extend that + store's release sequence. */ +- atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, 2); ++ atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, 1); + cond->__data.__g_size[g1]--; + /* TODO Only set it if there are indeed futex waiters. */ + do_futex_wake = true; +diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c +index 173bd134164eed44..944b241ec26b9b32 100644 +--- a/nptl/pthread_cond_wait.c ++++ b/nptl/pthread_cond_wait.c +@@ -85,7 +85,7 @@ __condvar_cancel_waiting (pthread_cond_t *cond, uint64_t seq, unsigned int g, + not hold a reference on the group. */ + __condvar_acquire_lock (cond, private); + +- uint64_t g1_start = __condvar_load_g1_start_relaxed (cond) >> 1; ++ uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); + if (g1_start > seq) + { + /* Our group is closed, so someone provided enough signals for it. +@@ -260,7 +260,6 @@ __condvar_cleanup_waiting (void *arg) + * Waiters fetch-add while having acquire the mutex associated with the + condvar. Signalers load it and fetch-xor it concurrently. + __g1_start: Starting position of G1 (inclusive) +- * LSB is index of current G2. + * Modified by signalers while having acquired the condvar-internal lock + and observed concurrently by waiters. + __g1_orig_size: Initial size of G1 +@@ -281,11 +280,9 @@ __condvar_cleanup_waiting (void *arg) + * Reference count used by waiters concurrently with signalers that have + acquired the condvar-internal lock. + __g_signals: The number of signals that can still be consumed, relative to +- the current g1_start. (i.e. bits 31 to 1 of __g_signals are bits +- 31 to 1 of g1_start with the signal count added) ++ the current g1_start. (i.e. g1_start with the signal count added) + * Used as a futex word by waiters. Used concurrently by waiters and + signalers. +- * LSB is currently reserved and 0. + __g_size: Waiters remaining in this group (i.e., which have not been + signaled yet. + * Accessed by signalers and waiters that cancel waiting (both do so only +@@ -392,9 +389,8 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + too. */ + unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g); + uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); +- unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U; + +- if (seq < (g1_start >> 1)) ++ if (seq < g1_start) + { + /* If the group is closed already, + then this waiter originally had enough extra signals to +@@ -407,13 +403,13 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, + by now, perhaps in the process of switching back to an older + G2, but in either case we're allowed to consume the available + signal and should not block anymore. */ +- if ((int)(signals - lowseq) >= 2) ++ if ((int)(signals - (unsigned int)g1_start) > 0) + { + /* Try to grab a signal. See above for MO. (if we do another loop + iteration we need to see the correct value of g1_start) */ + if (atomic_compare_exchange_weak_acquire ( + cond->__data.__g_signals + g, +- &signals, signals - 2)) ++ &signals, signals - 1)) + break; + else + continue; diff --git a/SPECS/glibc.spec b/SPECS/glibc.spec index a4b9704ab23dec4ef0b1d5dfce12fd5a941b9591..76474d46e2ab6b179720b8ec3ffa774efbf449fd 100644 --- a/SPECS/glibc.spec +++ b/SPECS/glibc.spec @@ -157,7 +157,7 @@ end \ Summary: The GNU libc libraries Name: glibc Version: %{glibcversion} -Release: 125%{?dist}.1 +Release: 125%{?dist}.3 # In general, GPLv2+ is used by programs, LGPLv2+ is used for # libraries. @@ -871,6 +871,17 @@ Patch632: glibc-RHEL-46979-3.patch Patch633: glibc-RHEL-46979-4.patch Patch634: glibc-RHEL-49489-3.patch Patch635: glibc-RHEL-49489-4.patch +Patch636: glibc-RHEL-69003.patch +Patch637: glibc-RHEL-78939-1.patch +Patch638: glibc-RHEL-78939-2.patch +Patch639: glibc-RHEL-78939-3.patch +Patch640: glibc-RHEL-78939-4.patch +Patch641: glibc-RHEL-78939-5.patch +Patch642: glibc-RHEL-78939-6.patch +Patch643: glibc-RHEL-78939-7.patch +Patch644: glibc-RHEL-78939-8.patch +Patch645: glibc-RHEL-78939-9.patch +Patch646: glibc-RHEL-78939-10.patch ############################################################################## # Continued list of core "glibc" package information: @@ -3030,6 +3041,12 @@ update_gconv_modules_cache () %endif %changelog +* Thu Feb 13 2025 Carlos O'Donell <carlos@redhat.com> - 2.34-125.3 +- Fix missed wakeup in POSIX thread condition variables (RHEL-78939) + +* Fri Dec 6 2024 DJ Delorie <dj@redhat.com> - 2.34-125.2 +- add GB18030-2022 charmap and tests (RHEL-69003) + * Fri Sep 27 2024 Florian Weimer <fweimer@redhat.com> - 2.34-125.1 - Remove some unused ppc64le string functions (RHEL-49489)