diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py index c3e26eed44..08544594fa 100644 --- a/dirsrvtests/tests/suites/config/config_test.py +++ b/dirsrvtests/tests/suites/config/config_test.py @@ -17,6 +17,7 @@ from lib389.utils import * from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME from lib389._mapped_object import DSLdapObjects +from lib389.agreement import Agreements from lib389.cli_base import FakeArgs from lib389.cli_conf.backend import db_config_set from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES @@ -27,6 +28,8 @@ from lib389.backend import Backends, DatabaseConfig from lib389.monitor import MonitorLDBM, Monitor from lib389.plugins import ReferentialIntegrityPlugin +from lib389.replica import BootstrapReplicationManager, Replicas +from lib389.passwd import password_generate pytestmark = pytest.mark.tier0 @@ -36,6 +39,8 @@ logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) +DEBUGGING = os.getenv("DEBUGGING", default=False) + @pytest.fixture(scope="module") def big_file(): TEMP_BIG_FILE = '' @@ -813,6 +818,87 @@ def test_numlisteners_limit(topo): assert numlisteners[0] == '4' +def bootstrap_replication(inst_from, inst_to, creds): + manager = BootstrapReplicationManager(inst_to) + rdn_val = 'replication manager' + if manager.exists(): + manager.delete() + manager.create(properties={ + 'cn': rdn_val, + 'uid': rdn_val, + 'userPassword': creds + }) + for replica in Replicas(inst_to).list(): + replica.remove_all('nsDS5ReplicaBindDNGroup') + replica.replace('nsDS5ReplicaBindDN', manager.dn) + for agmt in Agreements(inst_from).list(): + agmt.replace('nsDS5ReplicaBindDN', manager.dn) + agmt.replace('nsDS5ReplicaCredentials', creds) + + +@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="This test requires lmdb") +def test_lmdb_autotuned_maxdbs(topology_m2, request): + """Verify that after restart, nsslapd-mdb-max-dbs is large enough to add a new backend. + + :id: 0272d432-9080-11ef-8f40-482ae39447e5 + :setup: Two suppliers configuration + :steps: + 1. loop 20 times + 3. In 1 loop: restart instance + 3. In 1 loop: add a new backend + 4. In 1 loop: check that instance is still alive + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + s1 = topology_m2.ms["supplier1"] + s2 = topology_m2.ms["supplier2"] + + backends = Backends(s1) + db_config = DatabaseConfig(s1) + # Generate the teardown finalizer + belist = [] + creds=password_generate() + bootstrap_replication(s2, s1, creds) + bootstrap_replication(s1, s2, creds) + + def fin(): + s1.start() + for be in belist: + be.delete() + + if not DEBUGGING: + request.addfinalizer(fin) + + # 1. Set autotuning (off-line to be able to decrease the value) + s1.stop() + dse_ldif = DSEldif(s1) + dse_ldif.replace(db_config.dn, 'nsslapd-mdb-max-dbs', '0') + os.remove(f'{s1.dbdir}/data.mdb') + s1.start() + + # 2. Reinitialize the db: + log.info("Bulk import...") + agmt = Agreements(s2).list()[0] + agmt.begin_reinit() + (done, error) = agmt.wait_reinit() + log.info(f'Bulk importresult is ({done}, {error})') + assert done is True + assert error is False + + # 3. loop 20 times + for idx in range(20): + s1.restart() + log.info(f'Adding backend test{idx}') + belist.append(backends.create(properties={'cn': f'test{idx}', + 'nsslapd-suffix': f'dc=test{idx}'})) + assert s1.status() + + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h index 8fea63e359..35d0ece045 100644 --- a/ldap/servers/slapd/back-ldbm/back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h @@ -896,4 +896,6 @@ typedef struct _back_search_result_set ((L)->size == (R)->size && !memcmp((L)->data, (R)->data, (L)->size)) typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array); + +pthread_mutex_t *get_import_ctx_mutex(); #endif /* _back_ldbm_h_ */ diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c index b3cd5ee866..2a142ec6fc 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c @@ -83,7 +83,7 @@ dbmdb_compute_limits(struct ldbminfo *li) uint64_t total_space = 0; uint64_t avail_space = 0; uint64_t cur_dbsize = 0; - int nbchangelogs = 0; + int nbvlvs = 0; int nbsuffixes = 0; int nbindexes = 0; int nbagmt = 0; @@ -100,7 +100,7 @@ dbmdb_compute_limits(struct ldbminfo *li) */ if (dbmdb_count_config_entries("(objectClass=nsMappingTree)", &nbsuffixes) || dbmdb_count_config_entries("(objectClass=nsIndex)", &nbsuffixes) || - dbmdb_count_config_entries("(&(objectClass=nsds5Replica)(nsDS5Flags=1))", &nbchangelogs) || + dbmdb_count_config_entries("(objectClass=vlvIndex)", &nbvlvs) || dbmdb_count_config_entries("(objectClass=nsds5replicationagreement)", &nbagmt)) { /* error message is already logged */ return 1; @@ -120,8 +120,15 @@ dbmdb_compute_limits(struct ldbminfo *li) info->pagesize = sysconf(_SC_PAGE_SIZE); limits->min_readers = config_get_threadnumber() + nbagmt + DBMDB_READERS_MARGIN; - /* Default indexes are counted in "nbindexes" so we should always have enough resource to add 1 new suffix */ - limits->min_dbs = nbsuffixes + nbindexes + nbchangelogs + DBMDB_DBS_MARGIN; + /* + * For each suffix there are 4 databases instances: + * long-entryrdn, replication_changelog, id2entry and ancestorid + * then the indexes and the vlv and vlv cache + * + * Default indexes are counted in "nbindexes" so we should always have enough + * resource to add 1 new suffix + */ + limits->min_dbs = 4*nbsuffixes + nbindexes + 2*nbvlvs + DBMDB_DBS_MARGIN; total_space = ((uint64_t)(buf.f_blocks)) * ((uint64_t)(buf.f_bsize)); avail_space = ((uint64_t)(buf.f_bavail)) * ((uint64_t)(buf.f_bsize)); diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c index 9504c24343..2d2a4f711e 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c @@ -4280,9 +4280,12 @@ dbmdb_import_init_writer(ImportJob *job, ImportRole_t role) void dbmdb_free_import_ctx(ImportJob *job) { - if (job->writer_ctx) { - ImportCtx_t *ctx = job->writer_ctx; - job->writer_ctx = NULL; + ImportCtx_t *ctx = NULL; + pthread_mutex_lock(get_import_ctx_mutex()); + ctx = job->writer_ctx; + job->writer_ctx = NULL; + pthread_mutex_unlock(get_import_ctx_mutex()); + if (ctx) { pthread_mutex_destroy(&ctx->workerq.mutex); pthread_cond_destroy(&ctx->workerq.cv); slapi_ch_free((void**)&ctx->workerq.slots); diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c index 50db548d95..83787931b7 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c @@ -287,6 +287,13 @@ int add_dbi(dbi_open_ctx_t *octx, backend *be, const char *fname, int flags) slapi_ch_free((void**)&treekey.dbname); return octx->rc; } + if (treekey.dbi >= ctx->dsecfg.max_dbs) { + octx->rc = MDB_DBS_FULL; + slapi_log_err(SLAPI_LOG_ERR, "add_dbi", "Failed to open database instance %s slots: %d/%d. Error is %d: %s.\n", + treekey.dbname, treekey.dbi, ctx->dsecfg.max_dbs, octx->rc, mdb_strerror(octx->rc)); + slapi_ch_free((void**)&treekey.dbname); + return octx->rc; + } if (octx->ai && octx->ai->ai_key_cmp_fn) { octx->rc = dbmdb_update_dbi_cmp_fn(ctx, &treekey, octx->ai->ai_key_cmp_fn, octx->txn); if (octx->rc) { @@ -694,6 +701,7 @@ int dbmdb_make_env(dbmdb_ctx_t *ctx, int readOnly, mdb_mode_t mode) rc = dbmdb_write_infofile(ctx); } else { /* No Config ==> read it from info file */ + ctx->dsecfg = ctx->startcfg; } if (rc) { return rc; diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c index 86df986bdd..f3bf68a9f4 100644 --- a/ldap/servers/slapd/back-ldbm/dbimpl.c +++ b/ldap/servers/slapd/back-ldbm/dbimpl.c @@ -505,7 +505,7 @@ int dblayer_show_statistics(const char *dbimpl_name, const char *dbhome, FILE *f li->li_plugin = be->be_database; li->li_plugin->plg_name = (char*) "back-ldbm-dbimpl"; li->li_plugin->plg_libpath = (char*) "libback-ldbm"; - li->li_directory = (char*)dbhome; + li->li_directory = get_li_directory(dbhome); /* Initialize database plugin */ rc = dbimpl_setup(li, dbimpl_name); diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c index 2bb8cb581d..e050d4a815 100644 --- a/ldap/servers/slapd/back-ldbm/import.c +++ b/ldap/servers/slapd/back-ldbm/import.c @@ -27,6 +27,8 @@ #define NEED_DN_NORM_SP -25 #define NEED_DN_NORM_BT -26 +pthread_mutex_t import_ctx_mutex = PTHREAD_MUTEX_INITIALIZER; /* Protect agains import context destruction */ + /********** routines to manipulate the entry fifo **********/ @@ -143,6 +145,14 @@ ldbm_back_wire_import(Slapi_PBlock *pb) /* Threads management */ +/* Return the mutex that protects against import context destruction */ +pthread_mutex_t * +get_import_ctx_mutex() +{ + return &import_ctx_mutex; +} + + /* tell all the threads to abort */ void import_abort_all(ImportJob *job, int wait_for_them) @@ -151,7 +161,7 @@ import_abort_all(ImportJob *job, int wait_for_them) /* tell all the worker threads to abort */ job->flags |= FLAG_ABORT; - + pthread_mutex_lock(&import_ctx_mutex); for (worker = job->worker_list; worker; worker = worker->next) worker->command = ABORT; @@ -167,6 +177,7 @@ import_abort_all(ImportJob *job, int wait_for_them) } } } + pthread_mutex_unlock(&import_ctx_mutex); }