Commit d817a874 authored by Rocky Automation's avatar Rocky Automation 📺
Browse files

import valgrind-3.18.1-7.el8

parent 1f5d47a6
SOURCES/valgrind-3.17.0.tar.bz2
SOURCES/valgrind-3.18.1.tar.bz2
7770912c7465f93a90c5a9d5c1b1b036ebec04fd SOURCES/valgrind-3.17.0.tar.bz2
0a694a8d0c2152978bf64b67ad0b3dd972bbeb54 SOURCES/valgrind-3.18.1.tar.bz2
commit 595341b150312d2407bd43304449bf39ec3e1fa8
Author: Julian Seward <jseward@acm.org>
Date: Sat Nov 13 19:59:07 2021 +0100
amd64 front end: add more spec rules:
S after SHRQ
Z after SHLQ
NZ after SHLQ
Z after SHLL
S after SHLL
The lack of at least one of these was observed to cause occasional false
positives in Memcheck.
Plus add commented-out cases so as to complete the set of 12 rules
{Z,NZ,S,NS} after {SHRQ,SHLQ,SHLL}. The commented-out ones are commented
out because I so far didn't find any use cases for them.
diff --git a/VEX/priv/guest_amd64_helpers.c b/VEX/priv/guest_amd64_helpers.c
index 9d61e7a0f..ba71c1b62 100644
--- a/VEX/priv/guest_amd64_helpers.c
+++ b/VEX/priv/guest_amd64_helpers.c
@@ -1823,16 +1823,26 @@ IRExpr* guest_amd64_spechelper ( const HChar* function_name,
/*---------------- SHRQ ----------------*/
if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondZ)) {
- /* SHRQ, then Z --> test dep1 == 0 */
+ /* SHRQ, then Z --> test result[63:0] == 0 */
return unop(Iop_1Uto64,
binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
}
if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondNZ)) {
- /* SHRQ, then NZ --> test dep1 != 0 */
+ /* SHRQ, then NZ --> test result[63:0] != 0 */
return unop(Iop_1Uto64,
binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
}
+ if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondS)) {
+ /* SHRQ, then S --> (ULong)result[63] (result is in dep1) */
+ return binop(Iop_Shr64, cc_dep1, mkU8(63));
+ }
+ // No known test case for this, hence disabled:
+ //if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondNS)) {
+ // /* SHRQ, then NS --> (ULong) ~ result[63] */
+ // vassert(0);
+ //}
+
/*---------------- SHRL ----------------*/
if (isU64(cc_op, AMD64G_CC_OP_SHRL) && isU64(cond, AMD64CondZ)) {
@@ -1881,6 +1891,52 @@ IRExpr* guest_amd64_spechelper ( const HChar* function_name,
// mkU32(0)));
//}
+ /*---------------- SHLQ ----------------*/
+
+ if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondZ)) {
+ /* SHLQ, then Z --> test dep1 == 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
+ }
+ if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondNZ)) {
+ /* SHLQ, then NZ --> test dep1 != 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+ }
+
+ //if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondS)) {
+ // /* SHLQ, then S --> (ULong)result[63] */
+ // vassert(0);
+ //}
+ //if (isU64(cc_op, AMD64G_CC_OP_SHLQ) && isU64(cond, AMD64CondNS)) {
+ // /* SHLQ, then NS --> (ULong) ~ result[63] */
+ // vassert(0);
+ //}
+
+ /*---------------- SHLL ----------------*/
+
+ if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondZ)) {
+ /* SHLL, then Z --> test result[31:0] == 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpEQ32, unop(Iop_64to32, cc_dep1),
+ mkU32(0)));
+ }
+ //if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondNZ)) {
+ // /* SHLL, then NZ --> test dep1 != 0 */
+ // vassert(0);
+ //}
+
+ if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondS)) {
+ /* SHLL, then S --> (ULong)result[31] */
+ return binop(Iop_And64,
+ binop(Iop_Shr64, cc_dep1, mkU8(31)),
+ mkU64(1));
+ }
+ //if (isU64(cc_op, AMD64G_CC_OP_SHLL) && isU64(cond, AMD64CondNS)) {
+ // /* SHLL, then NS --> (ULong) ~ result[31] */
+ // vassert(0);
+ //}
+
/*---------------- COPY ----------------*/
/* This can happen, as a result of amd64 FP compares: "comisd ... ;
jbe" for example. */
commit 2be719921e700a9ac9b85f470ed87cb8adf8151b
Author: Julian Seward <jseward@acm.org>
Date: Sat Nov 13 09:27:01 2021 +0100
Bug 445415 - arm64 front end: alignment checks missing for atomic instructions.
For the arm64 front end, none of the atomic instructions have address
alignment checks included in their IR. They all should. The effect of
missing alignment checks in the IR is that, since this IR will in most cases
be translated back to atomic instructions in the back end, we will get
alignment traps (SIGBUS) on the host side and not on the guest side, which is
(very) incorrect behaviour of the simulation.
diff --git a/VEX/priv/guest_arm64_toIR.c b/VEX/priv/guest_arm64_toIR.c
index ee018c6a9..16a7e075f 100644
--- a/VEX/priv/guest_arm64_toIR.c
+++ b/VEX/priv/guest_arm64_toIR.c
@@ -4833,6 +4833,34 @@ static IRTemp gen_zwidening_load ( UInt szB, IRTemp addr )
}
+/* Generate a SIGBUS followed by a restart of the current instruction if
+ `effective_addr` is `align`-aligned. This is required behaviour for atomic
+ instructions. This assumes that guest_RIP_curr_instr is set correctly!
+
+ This is hardwired to generate SIGBUS because so far the only supported arm64
+ (arm64-linux) does that. Should we need to later extend it to generate some
+ other signal, use the same scheme as with gen_SIGNAL_if_not_XX_aligned in
+ guest_amd64_toIR.c. */
+static
+void gen_SIGBUS_if_not_XX_aligned ( IRTemp effective_addr, ULong align )
+{
+ if (align == 1) {
+ return;
+ }
+ vassert(align == 16 || align == 8 || align == 4 || align == 2);
+ stmt(
+ IRStmt_Exit(
+ binop(Iop_CmpNE64,
+ binop(Iop_And64,mkexpr(effective_addr),mkU64(align-1)),
+ mkU64(0)),
+ Ijk_SigBUS,
+ IRConst_U64(guest_PC_curr_instr),
+ OFFB_PC
+ )
+ );
+}
+
+
/* Generate a "standard 7" name, from bitQ and size. But also
allow ".1d" since that's occasionally useful. */
static
@@ -6670,7 +6698,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRTemp ea = newTemp(Ity_I64);
assign(ea, getIReg64orSP(nn));
- /* FIXME generate check that ea is szB-aligned */
+ gen_SIGBUS_if_not_XX_aligned(ea, szB);
if (isLD && ss == BITS5(1,1,1,1,1)) {
IRTemp res = newTemp(ty);
@@ -6803,7 +6831,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRTemp ea = newTemp(Ity_I64);
assign(ea, getIReg64orSP(nn));
- /* FIXME generate check that ea is 2*elemSzB-aligned */
+ gen_SIGBUS_if_not_XX_aligned(ea, fullSzB);
if (isLD && ss == BITS5(1,1,1,1,1)) {
if (abiinfo->guest__use_fallback_LLSC) {
@@ -7044,7 +7072,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRTemp ea = newTemp(Ity_I64);
assign(ea, getIReg64orSP(nn));
- /* FIXME generate check that ea is szB-aligned */
+ gen_SIGBUS_if_not_XX_aligned(ea, szB);
if (isLD) {
IRTemp res = newTemp(ty);
@@ -7159,6 +7187,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRTemp ea = newTemp(Ity_I64);
assign(ea, getIReg64orSP(nn));
+ gen_SIGBUS_if_not_XX_aligned(ea, szB);
// Insert barrier before loading for acquire and acquire-release variants:
// A and AL.
@@ -7266,6 +7295,10 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
IRType ty = integerIRTypeOfSize(szB);
Bool is64 = szB == 8;
+ IRTemp ea = newTemp(Ity_I64);
+ assign(ea, getIReg64orSP(nn));
+ gen_SIGBUS_if_not_XX_aligned(ea, szB);
+
IRExpr *exp = narrowFrom64(ty, getIReg64orZR(ss));
IRExpr *new = narrowFrom64(ty, getIReg64orZR(tt));
@@ -7275,7 +7308,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
// Store the result back if LHS remains unchanged in memory.
IRTemp old = newTemp(ty);
stmt( IRStmt_CAS(mkIRCAS(/*oldHi*/IRTemp_INVALID, old,
- Iend_LE, getIReg64orSP(nn),
+ Iend_LE, mkexpr(ea),
/*expdHi*/NULL, exp,
/*dataHi*/NULL, new)) );
@@ -7307,6 +7340,10 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
if ((ss & 0x1) || (tt & 0x1)) {
/* undefined; fall through */
} else {
+ IRTemp ea = newTemp(Ity_I64);
+ assign(ea, getIReg64orSP(nn));
+ gen_SIGBUS_if_not_XX_aligned(ea, is64 ? 16 : 8);
+
IRExpr *expLo = getIRegOrZR(is64, ss);
IRExpr *expHi = getIRegOrZR(is64, ss + 1);
IRExpr *newLo = getIRegOrZR(is64, tt);
@@ -7318,7 +7355,7 @@ Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
stmt(IRStmt_MBE(Imbe_Fence));
stmt( IRStmt_CAS(mkIRCAS(oldHi, oldLo,
- Iend_LE, getIReg64orSP(nn),
+ Iend_LE, mkexpr(ea),
expHi, expLo,
newHi, newLo)) );
diff --git a/VEX/priv/host_arm64_defs.c b/VEX/priv/host_arm64_defs.c
index b65e27db4..39c6aaa46 100644
--- a/VEX/priv/host_arm64_defs.c
+++ b/VEX/priv/host_arm64_defs.c
@@ -4033,6 +4033,7 @@ Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
case Ijk_FlushDCache: trcval = VEX_TRC_JMP_FLUSHDCACHE; break;
case Ijk_NoRedir: trcval = VEX_TRC_JMP_NOREDIR; break;
case Ijk_SigTRAP: trcval = VEX_TRC_JMP_SIGTRAP; break;
+ case Ijk_SigBUS: trcval = VEX_TRC_JMP_SIGBUS; break;
//case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break;
case Ijk_Boring: trcval = VEX_TRC_JMP_BORING; break;
/* We don't expect to see the following being assisted. */
diff --git a/VEX/priv/host_arm64_isel.c b/VEX/priv/host_arm64_isel.c
index 094e7e74b..82cb2d78c 100644
--- a/VEX/priv/host_arm64_isel.c
+++ b/VEX/priv/host_arm64_isel.c
@@ -4483,6 +4483,7 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt )
case Ijk_InvalICache:
case Ijk_FlushDCache:
case Ijk_SigTRAP:
+ case Ijk_SigBUS:
case Ijk_Yield: {
HReg r = iselIntExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
addInstr(env, ARM64Instr_XAssisted(r, amPC, cc,
@@ -4576,8 +4577,8 @@ static void iselNext ( ISelEnv* env,
case Ijk_InvalICache:
case Ijk_FlushDCache:
case Ijk_SigTRAP:
- case Ijk_Yield:
- {
+ case Ijk_SigBUS:
+ case Ijk_Yield: {
HReg r = iselIntExpr_R(env, next);
ARM64AMode* amPC = mk_baseblock_64bit_access_amode(offsIP);
addInstr(env, ARM64Instr_XAssisted(r, amPC, ARM64cc_AL, jk));
diff --git a/VEX/priv/main_main.c b/VEX/priv/main_main.c
index 1253cf588..482047c7a 100644
--- a/VEX/priv/main_main.c
+++ b/VEX/priv/main_main.c
@@ -2163,11 +2163,6 @@ static void check_hwcaps ( VexArch arch, UInt hwcaps )
if (have_fp16 != have_vfp16)
invalid_hwcaps(arch, hwcaps,
"Mismatch detected between scalar and vector FP16 features.\n");
- Bool have_rdm = ((hwcaps & VEX_HWCAPS_ARM64_RDM) != 0);
- Bool have_atomics = ((hwcaps & VEX_HWCAPS_ARM64_ATOMICS) != 0);
- if (have_rdm != have_atomics)
- invalid_hwcaps(arch, hwcaps,
- "Mismatch detected between RDMA and atomics features.\n");
return;
}
commit 7dbe2fed72886874f2eaf57dc07929542ae55b58
Author: Julian Seward <jseward@acm.org>
Date: Fri Nov 12 10:40:48 2021 +0100
Bug 445354 - arm64 backend: incorrect code emitted for doubleword CAS.
The sequence of instructions emitted by the arm64 backend for doubleword
compare-and-swap is incorrect. This could lead to incorrect simulation of the
AArch8.1 atomic instructions (CASP, at least). It also causes failures in the
upcoming fix for v8.0 support for LD{,A}XP/ST{,L}XP in bug 444399, at least
when running with the fallback LL/SC implementation
(`--sim-hints=fallback-llsc`, or as autoselected at startup). In the worst
case it can cause segfaulting in the generated code, because it could jump
backwards unexpectedly far.
The problem is the sequence emitted for ARM64in_CASP:
* the jump offsets are incorrect, both for `bne out` (x 2) and `cbnz w1, loop`.
* using w1 to hold the success indication of the stxp instruction trashes the
previous value in x1. But the value in x1 is an output of ARM64in_CASP,
hence one of the two output registers is corrupted. That confuses any code
downstream that want to inspect those values to find out whether or not the
transaction succeeded.
The fixes are to
* fix the branch offsets
* use a different register to hold the stxp success indication. w3 is a
convenient check.
diff --git a/VEX/priv/host_arm64_defs.c b/VEX/priv/host_arm64_defs.c
index 5dccc0495..5657bcab9 100644
--- a/VEX/priv/host_arm64_defs.c
+++ b/VEX/priv/host_arm64_defs.c
@@ -2271,6 +2271,7 @@ void getRegUsage_ARM64Instr ( HRegUsage* u, const ARM64Instr* i, Bool mode64 )
addHRegUse(u, HRmWrite, hregARM64_X1());
addHRegUse(u, HRmWrite, hregARM64_X9());
addHRegUse(u, HRmWrite, hregARM64_X8());
+ addHRegUse(u, HRmWrite, hregARM64_X3());
break;
case ARM64in_MFence:
return;
@@ -4254,16 +4255,16 @@ Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
-- always:
cmp x0, x8 // EB08001F
- bne out // 540000E1 (b.ne #28 <out>)
+ bne out // 540000A1
cmp x1, x9 // EB09003F
- bne out // 540000A1 (b.ne #20 <out>)
+ bne out // 54000061
-- one of:
- stxp w1, x6, x7, [x2] // C8211C46
- stxp w1, w6, w7, [x2] // 88211C46
+ stxp w3, x6, x7, [x2] // C8231C46
+ stxp w3, w6, w7, [x2] // 88231C46
-- always:
- cbnz w1, loop // 35FFFE81 (cbnz w1, #-48 <loop>)
+ cbnz w3, loop // 35FFFF03
out:
*/
switch (i->ARM64in.CASP.szB) {
@@ -4277,15 +4278,15 @@ Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
default: vassert(0);
}
*p++ = 0xEB08001F;
- *p++ = 0x540000E1;
- *p++ = 0xEB09003F;
*p++ = 0x540000A1;
+ *p++ = 0xEB09003F;
+ *p++ = 0x54000061;
switch (i->ARM64in.CASP.szB) {
- case 8: *p++ = 0xC8211C46; break;
- case 4: *p++ = 0x88211C46; break;
+ case 8: *p++ = 0xC8231C46; break;
+ case 4: *p++ = 0x88231C46; break;
default: vassert(0);
}
- *p++ = 0x35FFFE81;
+ *p++ = 0x35FFFF03;
goto done;
}
case ARM64in_MFence: {
diff --git a/VEX/priv/host_arm64_defs.h b/VEX/priv/host_arm64_defs.h
index f0737f2c6..01fb5708e 100644
--- a/VEX/priv/host_arm64_defs.h
+++ b/VEX/priv/host_arm64_defs.h
@@ -720,6 +720,7 @@ typedef
Int szB; /* 1, 2, 4 or 8 */
} StrEX;
/* x1 = CAS(x3(addr), x5(expected) -> x7(new)),
+ and trashes x8
where x1[8*szB-1 : 0] == x5[8*szB-1 : 0] indicates success,
x1[8*szB-1 : 0] != x5[8*szB-1 : 0] indicates failure.
Uses x8 as scratch (but that's not allocatable).
@@ -738,7 +739,7 @@ typedef
-- if branch taken, failure; x1[[8*szB-1 : 0] holds old value
-- attempt to store
stxr w8, x7, [x3]
- -- if store successful, x1==0, so the eor is "x1 := x5"
+ -- if store successful, x8==0
-- if store failed, branch back and try again.
cbne w8, loop
after:
@@ -746,6 +747,12 @@ typedef
struct {
Int szB; /* 1, 2, 4 or 8 */
} CAS;
+ /* Doubleworld CAS, 2 x 32 bit or 2 x 64 bit
+ x0(oldLSW),x1(oldMSW)
+ = DCAS(x2(addr), x4(expectedLSW),x5(expectedMSW)
+ -> x6(newLSW),x7(newMSW))
+ and trashes x8, x9 and x3
+ */
struct {
Int szB; /* 4 or 8 */
} CASP;
This diff is collapsed.
commit 9abfed23c0d430aafb85de6397d171316c982792
Author: Paul Floyd <pjfloyd@wanadoo.fr>
Date: Fri Nov 19 08:34:53 2021 +0100
Bug 445504 Using C++ condition_variable results in bogus "mutex is locked simultaneously by two threads" warning(edit)
Add intercepts for pthread_cond_clockwait to DRD and Helgrind
Also testcase from bugzilla done by Bart, with configure check
diff --git a/configure.ac b/configure.ac
index e7381f205..cb836dbff 100755
--- a/configure.ac
+++ b/configure.ac
@@ -1989,6 +1989,27 @@ AC_LANG(C)
AM_CONDITIONAL(CXX_CAN_INCLUDE_THREAD_HEADER, test x$ac_cxx_can_include_thread_header = xyes)
+# Check whether compiler can process #include <condition_variable> without errors
+
+AC_MSG_CHECKING([that C++ compiler can include <condition_variable> header file])
+AC_LANG(C++)
+safe_CXXFLAGS=$CXXFLAGS
+CXXFLAGS=-std=c++0x
+
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([
+#include <condition_variable>
+])],
+[
+ac_cxx_can_include_condition_variable_header=yes
+AC_MSG_RESULT([yes])
+], [
+ac_cxx_can_include_condition_variable_header=no
+AC_MSG_RESULT([no])
+])
+CXXFLAGS=$safe_CXXFLAGS
+AC_LANG(C)
+
+AM_CONDITIONAL(CXX_CAN_INCLUDE_CONDITION_VARIABLE_HEADER, test x$ac_cxx_can_include_condition_variable_header = xyes)
# On aarch64 before glibc 2.20 we would get the kernel user_pt_regs instead
# of the user_regs_struct from sys/user.h. They are structurally the same
diff --git a/drd/drd_pthread_intercepts.c b/drd/drd_pthread_intercepts.c
index 8b4454364..95127b42c 100644
--- a/drd/drd_pthread_intercepts.c
+++ b/drd/drd_pthread_intercepts.c
@@ -1175,6 +1175,30 @@ PTH_FUNCS(int, condZureltimedwait, pthread_cond_timedwait_intercept,
(cond, mutex, timeout));
#endif /* VGO_solaris */
+
+static __always_inline
+int pthread_cond_clockwait_intercept(pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
+ clockid_t clockid,
+ const struct timespec* abstime)
+{
+ int ret;
+ OrigFn fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PRE_COND_WAIT,
+ cond, mutex, DRD_(mutex_type)(mutex), 0, 0);
+ CALL_FN_W_WWWW(ret, fn, cond, mutex, clockid, abstime);
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__POST_COND_WAIT,
+ cond, mutex, 1, 0, 0);
+ return ret;
+}
+
+PTH_FUNCS(int, pthreadZucondZuclockwait, pthread_cond_clockwait_intercept,
+ (pthread_cond_t *cond, pthread_mutex_t *mutex,
+ clockid_t clockid, const struct timespec* abstime),
+ (cond, mutex, clockid, abstime));
+
+
// NOTE: be careful to intercept only pthread_cond_signal() and not Darwin's
// pthread_cond_signal_thread_np(). The former accepts one argument; the latter
// two. Intercepting all pthread_cond_signal* functions will cause only one
diff --git a/drd/tests/Makefile.am b/drd/tests/Makefile.am
index 4cb2f7f84..c804391e8 100755
--- a/drd/tests/Makefile.am
+++ b/drd/tests/Makefile.am
@@ -105,6 +105,8 @@ EXTRA_DIST = \
circular_buffer.vgtest \
concurrent_close.stderr.exp \
concurrent_close.vgtest \
+ condvar.stderr.exp \
+ condvar.vgtest \
custom_alloc.stderr.exp \
custom_alloc.vgtest \
custom_alloc_fiw.stderr.exp \
@@ -458,6 +460,11 @@ check_PROGRAMS += \
endif
endif
+if CXX_CAN_INCLUDE_CONDITION_VARIABLE_HEADER
+check_PROGRAMS += \
+ condvar
+endif
+
if HAVE_OPENMP
check_PROGRAMS += omp_matinv omp_prime omp_printf
endif
@@ -502,6 +509,8 @@ LDADD = -lpthread
bug322621_SOURCES = bug322621.cpp
+condvar_SOURCES = condvar.cpp
+condvar_CXXFLAGS = $(AM_CXXFLAGS) -std=c++0x
concurrent_close_SOURCES = concurrent_close.cpp
if !VGCONF_OS_IS_FREEBSD
dlopen_main_LDADD = -ldl
diff --git a/drd/tests/condvar.cpp b/drd/tests/condvar.cpp
new file mode 100644
index 000000000..18ecb3f8a
--- /dev/null
+++ b/drd/tests/condvar.cpp
@@ -0,0 +1,55 @@
+/* See also https://bugs.kde.org/show_bug.cgi?id=445504 */
+
+#include <condition_variable>
+#include <future>
+#include <iostream>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+using lock_guard = std::lock_guard<std::mutex>;
+using unique_lock = std::unique_lock<std::mutex>;
+
+struct state {
+ std::mutex m;
+ std::vector<int> v;
+ std::condition_variable cv;
+
+ state() {
+ // Call pthread_cond_init() explicitly to let DRD know about 'cv'.
+ pthread_cond_init(cv.native_handle(), NULL);
+ }
+};
+
+void other_thread(state *sp) {
+ state &s = *sp;
+ std::cerr << "Other thread: waiting for notify\n";
+ unique_lock l{s.m};
+ while (true) {
+ if (s.cv.wait_for(l, std::chrono::seconds(3)) !=
+ std::cv_status::timeout) {
+ std::cerr << "Other thread: notified\n";
+ break;
+ }
+ }
+ return;
+}
+
+
+int main() {
+ state s;
+ auto future = std::async(std::launch::async, other_thread, &s);
+
+ if (future.wait_for(std::chrono::seconds(1)) != std::future_status::timeout) {
+ std::cerr << "Main: other thread returned too early!\n";
+ return 2;
+ }
+
+ {
+ std::lock_guard<std::mutex> g{s.m};
+ s.v.push_back(1);
+ s.v.push_back(2);
+ s.cv.notify_all();
+ }
+ return 0;
+}
diff --git a/drd/tests/condvar.stderr.exp b/drd/tests/condvar.stderr.exp
new file mode 100644
index 000000000..be1de9f97
--- /dev/null
+++ b/drd/tests/condvar.stderr.exp
@@ -0,0 +1,5 @@
+
+Other thread: waiting for notify
+Other thread: notified
+
+ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)
diff --git a/drd/tests/condvar.vgtest b/drd/tests/condvar.vgtest
new file mode 100644
index 000000000..2e7d49f5a
--- /dev/null
+++ b/drd/tests/condvar.vgtest
@@ -0,0 +1,3 @@
+prereq: ./supported_libpthread && [ -e condvar ]
+vgopts: --check-stack-var=yes --read-var-info=yes
+prog: condvar
diff --git a/helgrind/hg_intercepts.c b/helgrind/hg_intercepts.c
index 866efdbaa..49c3ddcd9 100644
--- a/helgrind/hg_intercepts.c
+++ b/helgrind/hg_intercepts.c
@@ -1409,6 +1409,88 @@ static int pthread_cond_timedwait_WRK(pthread_cond_t* cond,
# error "Unsupported OS"
#endif
+//-----------------------------------------------------------
+// glibc: pthread_cond_clockwait
+//
+__attribute__((noinline))
+static int pthread_cond_clockwait_WRK(pthread_cond_t* cond,
+ pthread_mutex_t* mutex,
+ clockid_t clockid,