Commit 6da9342b authored by Rocky Automation's avatar Rocky Automation 📺
Browse files

import 389-ds-base-1.4.3.8-6.module+el8.3.0+8995+c08169ba

parent f180f2ac
From e78d3bd879b880d679b49f3fa5ebe8009d309063 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Fri, 2 Oct 2020 12:03:12 +0200
Subject: [PATCH 1/8] Issue 4297- On ADD replication URP issue internal
searches with filter containing unescaped chars (#4355)
Bug description:
In MMR a consumer receiving a ADD has to do some checking based on basedn.
It checks if the entry was a tombstone or if the conflicting parent entry was a tombstone.
To do this checking, URP does internal searches using basedn.
A '*' (ASTERISK) is valid in a RDN and in a DN. But using a DN in an assertionvalue of a filter, the ASTERISK needs to be escaped else the server will interprete the filtertype to be a substring. (see
https://tools.ietf.org/html/rfc4515#section-3)
The problem is that if a added entry contains an ASTERISK in the DN, it will not be escaped in internal search and trigger substring search (likely unindexed).
Fix description:
escape the DN before doing internal search in URP
Fixes: #4297
Reviewed by: Mark Reynolds, William Brown, Simon Pichugi (thanks !)
Platforms tested: F31
---
.../suites/replication/acceptance_test.py | 63 +++++++++++++++++++
ldap/servers/plugins/replication/urp.c | 10 ++-
ldap/servers/slapd/filter.c | 21 +++++++
ldap/servers/slapd/slapi-plugin.h | 1 +
4 files changed, 93 insertions(+), 2 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
index 5009f4e7c..661dddb11 100644
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
@@ -7,6 +7,7 @@
# --- END COPYRIGHT BLOCK ---
#
import pytest
+import logging
from lib389.replica import Replicas
from lib389.tasks import *
from lib389.utils import *
@@ -556,6 +557,68 @@ def test_csnpurge_large_valueset(topo_m2):
for i in range(21,25):
test_user.add('description', 'value {}'.format(str(i)))
+@pytest.mark.ds51244
+def test_urp_trigger_substring_search(topo_m2):
+ """Test that a ADD of a entry with a '*' in its DN, triggers
+ an internal search with a escaped DN
+
+ :id: 9869bb39-419f-42c3-a44b-c93eb0b77667
+ :setup: MMR with 2 masters
+ :steps:
+ 1. enable internal operation loggging for plugins
+ 2. Create on M1 a test_user with a '*' in its DN
+ 3. Check the test_user is replicated
+ 4. Check in access logs that the internal search does not contain '*'
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ """
+ m1 = topo_m2.ms["master1"]
+ m2 = topo_m2.ms["master2"]
+
+ # Enable loggging of internal operation logging to capture URP intop
+ log.info('Set nsslapd-plugin-logging to on')
+ for inst in (m1, m2):
+ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')
+ inst.config.set('nsslapd-plugin-logging', 'on')
+ inst.restart()
+
+ # add a user with a DN containing '*'
+ test_asterisk_uid = 'asterisk_*_in_value'
+ test_asterisk_dn = 'uid={},{}'.format(test_asterisk_uid, DEFAULT_SUFFIX)
+
+ test_user = UserAccount(m1, test_asterisk_dn)
+ if test_user.exists():
+ log.info('Deleting entry {}'.format(test_asterisk_dn))
+ test_user.delete()
+ test_user.create(properties={
+ 'uid': test_asterisk_uid,
+ 'cn': test_asterisk_uid,
+ 'sn': test_asterisk_uid,
+ 'userPassword': test_asterisk_uid,
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/asterisk',
+ })
+
+ # check that the ADD was replicated on M2
+ test_user_m2 = UserAccount(m2, test_asterisk_dn)
+ for i in range(1,5):
+ if test_user_m2.exists():
+ break
+ else:
+ log.info('Entry not yet replicated on M2, wait a bit')
+ time.sleep(2)
+
+ # check that M2 access logs does not "(&(objectclass=nstombstone)(nscpentrydn=uid=asterisk_*_in_value,dc=example,dc=com))"
+ log.info('Check that on M2, URP as not triggered such internal search')
+ pattern = ".*\(Internal\).*SRCH.*\(&\(objectclass=nstombstone\)\(nscpentrydn=uid=asterisk_\*_in_value,dc=example,dc=com.*"
+ found = m2.ds_access_log.match(pattern)
+ log.info("found line: %s" % found)
+ assert not found
+
if __name__ == '__main__':
# Run isolated
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index 79a817c90..301e9fa00 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1411,9 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
char *basedn = slapi_entry_get_ndn(entry);
+ char *escaped_basedn;
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
+ escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
+ slapi_ch_free((void **)&escaped_basedn);
newpb = slapi_pblock_new();
slapi_search_internal_set_pb(newpb,
slapi_sdn_get_dn(suffix), /* Base DN */
@@ -1602,12 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
+ char *escaped_basedn;
+ escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
CSN *tombstone_csn = NULL;
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
+ slapi_ch_free((void **)&escaped_basedn);
newpb = slapi_pblock_new();
char *parent_dn = slapi_dn_parent (basedn);
slapi_search_internal_set_pb(newpb,
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
index c818baec3..d671c87ff 100644
--- a/ldap/servers/slapd/filter.c
+++ b/ldap/servers/slapd/filter.c
@@ -130,6 +130,27 @@ filter_escape_filter_value(struct slapi_filter *f, const char *fmt, size_t len _
return ptr;
}
+/* Escaped an equality filter value (assertionValue) of a given attribute
+ * Caller must free allocated escaped filter value
+ */
+char *
+slapi_filter_escape_filter_value(char* filter_attr, char *filter_value)
+{
+ char *result;
+ struct slapi_filter *f;
+
+ if ((filter_attr == NULL) || (filter_value == NULL)) {
+ return NULL;
+ }
+ f = (struct slapi_filter *)slapi_ch_calloc(1, sizeof(struct slapi_filter));
+ f->f_choice = LDAP_FILTER_EQUALITY;
+ f->f_un.f_un_ava.ava_type = filter_attr;
+ f->f_un.f_un_ava.ava_value.bv_len = strlen(filter_value);
+ f->f_un.f_un_ava.ava_value.bv_val = filter_value;
+ result = filter_escape_filter_value(f, FILTER_EQ_FMT, FILTER_EQ_LEN);
+ slapi_ch_free((void**) &f);
+ return result;
+}
/*
* get_filter_internal(): extract an LDAP filter from a BerElement and create
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 8d9c3fa6a..04c02cf7c 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5262,6 +5262,7 @@ int slapi_vattr_filter_test_ext(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Filter *
int slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2);
Slapi_Filter *slapi_filter_dup(Slapi_Filter *f);
int slapi_filter_changetype(Slapi_Filter *f, const char *newtype);
+char *slapi_filter_escape_filter_value(char* filter_attr, char *filter_value);
int slapi_attr_is_last_mod(char *attr);
--
2.26.2
From 3cf7734177c70c36062d4e667b91e15f22a2ea81 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 25 Nov 2020 18:07:34 +0100
Subject: [PATCH 2/8] Issue 4297 - 2nd fix for on ADD replication URP issue
internal searches with filter containing unescaped chars (#4439)
Bug description:
Previous fix is buggy because slapi_filter_escape_filter_value returns
a escaped filter component not an escaped assertion value.
Fix description:
use the escaped filter component
relates: https://github.com/389ds/389-ds-base/issues/4297
Reviewed by: William Brown
Platforms tested: F31
---
ldap/servers/plugins/replication/urp.c | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index 301e9fa00..96ad2759a 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
char *basedn = slapi_entry_get_ndn(entry);
- char *escaped_basedn;
+ char *escaped_filter;
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn);
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
slapi_search_internal_set_pb(newpb,
slapi_sdn_get_dn(suffix), /* Base DN */
@@ -1605,15 +1605,14 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
- char *escaped_basedn;
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
-
+ char *escaped_filter;
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
CSN *tombstone_csn = NULL;
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
char *parent_dn = slapi_dn_parent (basedn);
slapi_search_internal_set_pb(newpb,
--
2.26.2
From 16a004faf7eda3f8c4d59171bceab8cf78a9d002 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 6 Aug 2020 14:50:19 -0400
Subject: [PATCH 3/8] Issue 51233 - ds-replcheck crashes in offline mode
Bug Description: When processing all the DN's found in the Master LDIF
it is possible that the LDIF is not in the expected
order and ldifsearch fails (crashing the tool).
Fix Description: If ldifsearch does not find an entry, start from the
beginning of the LDIF and try again.
relates: https://pagure.io/389-ds-base/issue/51233
Reviewed by: spichugi(Thanks!)
---
ldap/admin/src/scripts/ds-replcheck | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
index 5bb7dfce3..1c133f4dd 100755
--- a/ldap/admin/src/scripts/ds-replcheck
+++ b/ldap/admin/src/scripts/ds-replcheck
@@ -725,6 +725,10 @@ def do_offline_report(opts, output_file=None):
missing = False
for dn in master_dns:
mresult = ldif_search(MLDIF, dn)
+ if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']:
+ # Try from the beginning
+ MLDIF.seek(0)
+ mresult = ldif_search(MLDIF, dn)
rresult = ldif_search(RLDIF, dn)
if dn in replica_dns:
--
2.26.2
From bc8bdaa57ba9b57671e2921705b99eaa70729ce7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 11 Nov 2020 11:45:11 -0500
Subject: [PATCH 4/8] Issue 4429 - NULL dereference in revert_cache()
Bug Description: During a delete, if the DN (with an escaped leading space)
of an existing entry fail to parse the server will revert
the entry update. In this case it will lead to a crash
becuase ther ldbm inst struct is not set before it attempts
the cache revert.
Fix Description: Check the the ldbm instance struct is not NULL before
dereferencing it.
Relates: https://github.com/389ds/389-ds-base/issues/4429
Reviewed by: firstyear & spichugi(Thanks!!)
---
.../tests/suites/syntax/acceptance_test.py | 40 +++++++++++++++++++
ldap/servers/slapd/back-ldbm/cache.c | 3 ++
2 files changed, 43 insertions(+)
diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py
index db8f63c7e..543718689 100644
--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py
+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py
@@ -6,12 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+import ldap
import logging
import pytest
import os
from lib389.schema import Schema
from lib389.config import Config
from lib389.idm.user import UserAccounts
+from lib389.idm.group import Groups
from lib389._constants import DEFAULT_SUFFIX
from lib389.topologies import log, topology_st as topo
@@ -105,6 +107,44 @@ def test_invalid_uidnumber(topo, validate_syntax_off):
log.info('Found an invalid entry with wrong uidNumber - Success')
+def test_invalid_dn_syntax_crash(topo):
+ """Add an entry with an escaped space, restart the server, and try to delete
+ it. In this case the DN is not correctly parsed and causes cache revert to
+ to dereference a NULL pointer. So the delete can fail as long as the server
+ does not crash.
+
+ :id: 62d87272-dfb8-4627-9ca1-dbe33082caf8
+ :setup: Standalone Instance
+ :steps:
+ 1. Add entry with leading escaped space in the RDN
+ 2. Restart the server so the entry is rebuilt from the database
+ 3. Delete the entry
+ 4. The server should still be running
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ # Create group
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties={'cn': ' test'})
+
+ # Restart the server
+ topo.standalone.restart()
+
+ # Delete group
+ try:
+ group.delete()
+ except ldap.NO_SUCH_OBJECT:
+ # This is okay in this case as we are only concerned about a crash
+ pass
+
+ # Make sure server is still running
+ groups.list()
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c
index 89f958a35..5ad9ca829 100644
--- a/ldap/servers/slapd/back-ldbm/cache.c
+++ b/ldap/servers/slapd/back-ldbm/cache.c
@@ -614,6 +614,9 @@ flush_hash(struct cache *cache, struct timespec *start_time, int32_t type)
void
revert_cache(ldbm_instance *inst, struct timespec *start_time)
{
+ if (inst == NULL) {
+ return;
+ }
flush_hash(&inst->inst_cache, start_time, ENTRY_CACHE);
flush_hash(&inst->inst_dncache, start_time, DN_CACHE);
}
--
2.26.2
From 132f126c18214345ef4204bf8a061a0eca58fa59 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 3 Nov 2020 12:18:50 +0100
Subject: [PATCH 5/8] ticket 2058: Add keep alive entry after on-line
initialization - second version (#4399)
Bug description:
Keep alive entry is not created on target master after on line initialization,
and its RUVelement stays empty until a direct update is issued on that master
Fix description:
The patch allows a consumer (configured as a master) to create (if it did not
exist before) the consumer's keep alive entry. It creates it at the end of a
replication session at a time we are sure the changelog exists and will not
be reset. It allows a consumer to have RUVelement with csn in the RUV at the
first incoming replication session.
That is basically lkrispen's proposal with an associated pytest testcase
Second version changes:
- moved the testcase to suites/replication/regression_test.py
- set up the topology from a 2 master topology then
reinitialized the replicas from an ldif without replication metadata
rather than using the cli.
- search for keepalive entries using search_s instead of getEntry
- add a comment about keep alive entries purpose
last commit:
- wait that ruv are in sync before checking keep alive entries
Reviewed by: droideck, Firstyear
Platforms tested: F32
relates: #2058
---
.../suites/replication/regression_test.py | 130 ++++++++++++++++++
.../plugins/replication/repl5_replica.c | 14 ++
ldap/servers/plugins/replication/repl_extop.c | 4 +
3 files changed, 148 insertions(+)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 844d762b9..14b9d6a44 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -98,6 +98,30 @@ def _move_ruv(ldif_file):
for dn, entry in ldif_list:
ldif_writer.unparse(dn, entry)
+def _remove_replication_data(ldif_file):
+ """ Remove the replication data from ldif file:
+ db2lif without -r includes some of the replica data like
+ - nsUniqueId
+ - keepalive entries
+ This function filters the ldif fil to remove these data
+ """
+
+ with open(ldif_file) as f:
+ parser = ldif.LDIFRecordList(f)
+ parser.parse()
+
+ ldif_list = parser.all_records
+ # Iterate on a copy of the ldif entry list
+ for dn, entry in ldif_list[:]:
+ if dn.startswith('cn=repl keep alive'):
+ ldif_list.remove((dn,entry))
+ else:
+ entry.pop('nsUniqueId')
+ with open(ldif_file, 'w') as f:
+ ldif_writer = ldif.LDIFWriter(f)
+ for dn, entry in ldif_list:
+ ldif_writer.unparse(dn, entry)
+
@pytest.fixture(scope="module")
def topo_with_sigkill(request):
@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2):
assert len(m1entries) == len(m2entries)
+def get_keepalive_entries(instance,replica):
+ # Returns the keep alive entries that exists with the suffix of the server instance
+ try:
+ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
+ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
+ ['cn', 'nsUniqueId', 'modifierTimestamp'])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e)))
+ assert False
+ # No error, so lets log the keepalive entries
+ if log.isEnabledFor(logging.DEBUG):
+ for ret in entries:
+ log.debug("Found keepalive entry:\n"+str(ret));
+ return entries
+
+def verify_keepalive_entries(topo, expected):
+ #Check that keep alive entries exists (or not exists) for every masters on every masters
+ #Note: The testing method is quite basic: counting that there is one keepalive entry per master.
+ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but
+ # not for the general case as keep alive associated with no more existing master may exists
+ # (for example after: db2ldif / demote a master / ldif2db / init other masters)
+ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries
+ # should be done.
+ for masterId in topo.ms:
+ master=topo.ms[masterId]
+ for replica in Replicas(master).list():
+ if (replica.get_role() != ReplicaRole.MASTER):
+ continue
+ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}'
+ log.debug(f'Checking keepAliveEntries on {replica_info}')
+ keepaliveEntries = get_keepalive_entries(master, replica);
+ expectedCount = len(topo.ms) if expected else 0
+ foundCount = len(keepaliveEntries)
+ if (foundCount == expectedCount):
+ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.')
+ else:
+ log.error(f'{foundCount} Keepalive entries are found '
+ f'while {expectedCount} were expected on {replica_info}.')
+ assert False
+
+
+def test_online_init_should_create_keepalive_entries(topo_m2):
+ """Check that keep alive entries are created when initializinf a master from another one
+
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
+ :setup: Two masters replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2 Init both masters from that ldif
+ 3 Check that keep alive entries does not exists
+ 4 Perform on line init of master2 from master1
+ 5 Check that keep alive entries exists
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No keepalive entrie should exists on any masters
+ 4. No error while initializing master2
+ 5. All keepalive entries should exist on every masters
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["master1"]
+ m2 = topo_m2.ms["master2"]
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ """ Replica state is now as if CLI setup has been done using:
+ dsconf master1 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master2 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master1 repl-agmt create --suffix "${SUFFIX}"
+ dsconf master2 repl-agmt create --suffix "${SUFFIX}"
+ """
+
+ # Step 3: No keepalive entrie should exists on any masters
+ verify_keepalive_entries(topo_m2, False)
+
+ # Step 4: Perform on line init of master2 from master1
+ agmt = Agreements(m1).list()[0]
+ agmt.begin_reinit()
+ (done, error) = agmt.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 5: All keepalive entries should exists on every masters
+ # Verify the keep alive entry once replication is in sync
+ # (that is the step that fails when bug is not fixed)
+ repl.wait_for_ruv(m2,m1)
+ verify_keepalive_entries(topo_m2, True);
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f01782330..f0ea0f8ef 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -373,6 +373,20 @@ replica_destroy(void **arg)
slapi_ch_free((void **)arg);
}
+/******************************************************************************
+ ******************** REPLICATION KEEP ALIVE ENTRIES **************************
+ ******************************************************************************
+ * They are subentries of the replicated suffix and there is one per master. *
+ * These entries exist only to trigger a change that get replicated over the *
+ * topology. *
+ * Their main purpose is to generate records in the changelog and they are *
+ * updated from time to time by fractional replication to insure that at *
+ * least a change must be replicated by FR after a great number of not *
+ * replicated changes are found in the changelog. The interest is that the *
+ * fractional RUV get then updated so less changes need to be walked in the *
+ * changelog when searching for the first change to send