Skip to content

Commit

Permalink
Issue 5976 - Fix freeipa install regression with lmdb (#5977)
Browse files Browse the repository at this point in the history
* Issue 5976 - Fix freeipa install regression with lmdb

There are three issues blocking the ipa setup when using lmdb database

Missing cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config entry (For compatibility reason, the entry should exists even if it is unused)
Missing task status after reindexing (know issue: cf nsTaskStatus is not created for index task with mdb backend #5911)
Reindex task set the exit code too early (leading to UNWILLING_TO_PERFORM / 'database is read-only' error in subsequent write operation.
The fixes are:

Creates the cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config entry even if it is not used.
Ensure that both task status and exit code are set when importing/reindexing
do not run the import framework in a new thread (but use the current thread) when doing a reindex in a task.
Issue: #5976

Reviewed by: @droideck, @tbordaz Thanks
  • Loading branch information
progier389 authored Nov 21, 2023
1 parent df7dd83 commit 770edb2
Show file tree
Hide file tree
Showing 5 changed files with 278 additions and 18 deletions.
16 changes: 15 additions & 1 deletion dirsrvtests/tests/suites/config/regression_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import pytest
from lib389.utils import *
from lib389.dseldif import DSEldif
from lib389.config import LDBMConfig
from lib389.config import BDB_LDBMConfig, LDBMConfig
from lib389.backend import Backends
from lib389.topologies import topology_st as topo

Expand Down Expand Up @@ -112,3 +112,17 @@ def test_maxbersize_repl(topo):
log.info("Assert no init_dse_file errors in the error log")
assert not inst.ds_error_log.match('.*ERR - init_dse_file.*')


def test_bdb_config(topo):
"""Check that bdb config entry exists
:id: edbc6f54-7c98-11ee-b1c0-482ae39447e5
:setup: MMR with two suppliers
:steps:
1. Check that bdb config instance exists.
:expectedresults:
1. Success
"""

inst = topo.standalone
assert BDB_LDBMConfig(inst).exists()
161 changes: 156 additions & 5 deletions dirsrvtests/tests/suites/indexes/regression_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,59 @@
import pytest
import ldap
from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
from lib389.backend import Backend, Backends, DatabaseConfig
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
from lib389.index import Indexes
from lib389.backend import Backends, DatabaseConfig
from lib389.idm.user import UserAccounts
from lib389.dbgen import dbgen_users
from lib389.idm.domain import Domain
from lib389.idm.group import Groups, Group
from lib389.idm.nscontainer import nsContainer
from lib389.idm.user import UserAccount, UserAccounts
from lib389.index import Indexes
from lib389.plugins import MemberOfPlugin
from lib389.properties import TASK_WAIT
from lib389.tasks import Tasks, Task
from lib389.topologies import topology_st as topo
from lib389.utils import ds_is_older
from lib389.plugins import MemberOfPlugin
from lib389.idm.nscontainer import nsContainer

pytestmark = pytest.mark.tier1

SUFFIX2 = 'dc=example2,dc=com'
BENAME2 = 'be2'

DEBUGGING = os.getenv("DEBUGGING", default=False)


@pytest.fixture(scope="function")
def add_backend_and_ldif_50K_users(request, topo):
"""
Add an empty backend and associated 50K users ldif file
"""

tasks = Tasks(topo.standalone)
import_ldif = f'{topo.standalone.ldifdir}/be2_50K_users.ldif'
be2 = Backend(topo.standalone)
be2.create(properties={
'cn': BENAME2,
'nsslapd-suffix': SUFFIX2,
},
)

def fin():
nonlocal be2
if not DEBUGGING:
be2.delete()

request.addfinalizer(fin)
parent = f'ou=people,{SUFFIX2}'
dbgen_users(topo.standalone, 50000, import_ldif, SUFFIX2, generic=True, parent=parent)
assert tasks.importLDIF(
suffix=SUFFIX2,
input_file=import_ldif,
args={TASK_WAIT: True}
) == 0

return import_ldif


@pytest.fixture(scope="function")
def add_a_group_with_users(request, topo):
Expand Down Expand Up @@ -260,6 +301,116 @@ def test_reject_virtual_attr_for_indexing(topo):
be.add_index('employeeType', ['eq'])
break

def test_task_status(topo):
"""Check that finished tasks have both a status and exit code
:id: 56d03656-79a6-11ee-bfc3-482ae39447e5
:setup: Standalone instance
:steps:
1. Start a Reindex task on 'cn' and wait until it is completed
2. Check that task has a status
3. Check that exit code is 0
4. Start a Reindex task on 'badattr' and wait until it is completed
5. Check that task has a status
6. Check that exit code is 0
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
6. Success
"""

tasks = Tasks(topo.standalone)
# completed reindex tasks MUST have a status because freeipa check it.

# Reindex 'cn'
tasks.reindex(
suffix=DEFAULT_SUFFIX,
attrname='cn',
args={TASK_WAIT: True}
)
reindex_task = Task(topo.standalone, tasks.dn)
assert reindex_task.status()
assert reindex_task.get_exit_code() == 0

# Reindex 'badattr'
tasks.reindex(
suffix=DEFAULT_SUFFIX,
attrname='badattr',
args={TASK_WAIT: True}
)
reindex_task = Task(topo.standalone, tasks.dn)
assert reindex_task.status()
# Bad attribute are skipped without setting error code
assert reindex_task.get_exit_code() == 0


def test_task_and_be(topo, add_backend_and_ldif_50K_users):
"""Check that backend is writable after finishing a tasks
:id: 047869da-7a4d-11ee-895c-482ae39447e5
:setup: Standalone instance + a second backend with 50K users
:steps:
1. Start an Import task and wait until it is completed
2. Modify the suffix entry description
3. Start a Reindex task on all attributes and wait until it is completed
4. Modify the suffix entry description
5. Start an Export task and wait until it is completed
6. Modify the suffix entry description
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
6. Success
"""

tasks = Tasks(topo.standalone)
user = UserAccount(topo.standalone, f'uid=user00001,ou=people,{SUFFIX2}')
ldif_file = add_backend_and_ldif_50K_users

# Import
tasks.importLDIF(
suffix=SUFFIX2,
input_file=ldif_file,
args={TASK_WAIT: True}
) == 0
descval = 'test_task_and_be tc1'
user.set('description', descval)
assert user.get_attr_val_utf8_l('description') == descval

# Reindex some attributes
assert tasks.reindex(
suffix=SUFFIX2,
attrname=[ 'description', 'rdn', 'uid', 'cn', 'sn', 'badattr' ],
args={TASK_WAIT: True}
) == 0
descval = 'test_task_and_be tc2'
user.set('description', descval)
assert user.get_attr_val_utf8_l('description') == descval
users = UserAccounts(topo.standalone, SUFFIX2, rdn=None)
user = users.create(properties={
'uid': 'user1',
'sn': 'user1',
'cn': 'user1',
'uidNumber': '1001',
'gidNumber': '1001',
'homeDirectory': '/home/user1'
})

# Export
assert tasks.exportLDIF(
suffix=SUFFIX2,
output_file=f'{ldif_file}2',
args={TASK_WAIT: True}
) == 0
descval = 'test_task_and_be tc3'
user.set('description', descval)
assert user.get_attr_val_utf8_l('description') == descval


if __name__ == "__main__":
# Run isolated
Expand Down
56 changes: 54 additions & 2 deletions ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,39 @@ int dbmdb_ctx_t_modify_entry_callback(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_En

static dblayer_private dbmdb_fake_priv; /* A copy of the callback array used by dbmdb_be() */

/* Dummy bdb config entry (needed for compatibility with application that tries to tweak it). */
const char *bdb_config_entry_template =
"dn: cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config\n"
"objectClass: extensibleobject\n"
"objectClass: top\n"
"cn: bdb\n"
"description: The BerkeleyDB config entry (meaningful only \n"
" if nsslapd-backend-implement is bdb)\n"
"nsslapd-cache-autosize: 25\n"
"nsslapd-cache-autosize-split: 25\n"
"nsslapd-dbcachesize: 1610612736\n"
"nsslapd-db-checkpoint-interval: 60\n"
"nsslapd-db-compactdb-interval: 2592000\n"
"nsslapd-db-compactdb-time: 23:59\n"
"nsslapd-db-deadlock-policy: 9\n"
"nsslapd-db-durable-transaction: on\n"
"nsslapd-db-home-directory: %s\n"
"nsslapd-db-locks: 10000\n"
"nsslapd-db-locks-monitoring-enabled: on\n"
"nsslapd-db-locks-monitoring-pause: 500\n"
"nsslapd-db-locks-monitoring-threshold: 90\n"
"nsslapd-db-logbuf-size: 0\n"
"nsslapd-db-logdirectory: %s\n"
"nsslapd-db-private-import-mem: on\n"
"nsslapd-db-transaction-batch-max-wait: 50\n"
"nsslapd-db-transaction-batch-min-wait: 50\n"
"nsslapd-db-transaction-batch-val: 0\n"
"nsslapd-db-transaction-wait: off\n"
"nsslapd-import-cache-autosize: -1\n"
"nsslapd-import-cachesize: 16777216\n"
"nsslapd-search-bypass-filter-test: on\n"
"nsslapd-serial-lock: on\n";

backend *dbmdb_be(void)
{
static backend be = {0};
Expand Down Expand Up @@ -207,7 +240,7 @@ dbmdb_ctx_t_add_dse_entries(struct ldbminfo *li, char **entries, char *string1,
Slapi_PBlock *util_pb = NULL;
int rc;
int result;
char entry_string[512];
char entry_string[4096];
int dont_write_file = 0;
char ebuf[BUFSIZ];

Expand All @@ -217,7 +250,7 @@ dbmdb_ctx_t_add_dse_entries(struct ldbminfo *li, char **entries, char *string1,

for (x = 0; strlen(entries[x]) > 0; x++) {
util_pb = slapi_pblock_new();
PR_snprintf(entry_string, 512, entries[x], string1, string2, string3);
PR_snprintf(entry_string, (sizeof entry_string), entries[x], string1, string2, string3);
e = slapi_str2entry(entry_string, 0);
PL_strncpyz(ebuf, slapi_entry_get_dn_const(e), sizeof(ebuf)-1); /* for logging */
slapi_add_entry_internal_set_pb(util_pb, e, NULL, li->li_identity, 0);
Expand Down Expand Up @@ -531,6 +564,22 @@ dbmdb_ctx_t_setup_default(struct ldbminfo *li)
}
}

/* Add a dummy config entry to ensure compatibility with
* applications that tries to tweak bdb parameters
*/
static void
dbmdb_create_bdb_config_entry(struct ldbminfo *li)
{
int hasdbhome = 0;
char *dbhome = dbmdb_get_home_dir(li, &hasdbhome);
char *entries[] = { (char*)bdb_config_entry_template, ""};

if (!hasdbhome) {
dbhome = "???";
}
dbmdb_ctx_t_add_dse_entries(li, entries, dbhome, dbhome, NULL, 0);
}

static int
dbmdb_ctx_t_upgrade_dse_info(struct ldbminfo *li)
{
Expand Down Expand Up @@ -655,6 +704,9 @@ dbmdb_ctx_t_load_dse_info(struct ldbminfo *li)
char *dn = NULL;
int rval = 0;

/* Lets create bdb config entry for compatibility with legacy applications */
dbmdb_create_bdb_config_entry(li);

/* We try to read the entry
* cn=mdb, cn=config, cn=ldbm database, cn=plugins, cn=config. If the entry is
* there, then we process the config information it stores.
Expand Down
59 changes: 52 additions & 7 deletions ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
Original file line number Diff line number Diff line change
Expand Up @@ -668,6 +668,43 @@ dbmdb_import_run_pass(ImportJob *job, int *status)

/* Helper function to make up filenames */

/* Ensure that the task status and exit code is properly set */
static void
dbmdb_task_finish(ImportJob *job, int ret)
{
ldbm_instance *inst = job->inst;
char *opname = "importing";
char *task_dn = "";

if (job->flags & (FLAG_DRYRUN | FLAG_UPGRADEDNFORMAT_V1)) {
opname = "upgrading dn";
} else if (job->flags & FLAG_REINDEXING) {
opname = "indexing";
}

if (job->task != NULL) {
/*
* freeipa expect that finished tasks have both a status and
* an exit code.
* So lets ensure that both are set.
*/
if (!job->task_status) {
dbmdb_import_log_status_start(job);
}
dbmdb_import_log_status_add_line(job, "%s: Finished %s task",
inst->inst_name, opname);
dbmdb_import_log_status_done(job);
slapi_task_finish(job->task, ret);
task_dn = slapi_ch_smprintf(" task '%s'", job->task->task_dn);
}
slapi_log_err(SLAPI_LOG_INFO, "dbmdb_task_finish",
"%s: Finished %s%s. Exit code is %d\n",
inst->inst_name, opname, task_dn, ret);
if (*task_dn) {
slapi_ch_free_string(&task_dn);
}
}

/* when the import is done, this function is called to bring stuff back up.
* returns 0 on success; anything else is an error
*/
Expand Down Expand Up @@ -700,6 +737,10 @@ dbmdb_import_all_done(ImportJob *job, int ret)

/* bring backend online again */
slapi_mtn_be_enable(inst->inst_be);
slapi_log_err(SLAPI_LOG_INFO, "dbmdb_import_all_done",
"Backend %s is now online.\n",
slapi_be_get_name(inst->inst_be));

}
ret |= rc;
}
Expand All @@ -708,7 +749,7 @@ dbmdb_import_all_done(ImportJob *job, int ret)
* (to avoid race condition in CI)
*/
if ((job->task != NULL) && (0 == slapi_task_get_refcount(job->task))) {
slapi_task_finish(job->task, ret & ~WARN_SKIPPED_IMPORT_ENTRY);
dbmdb_task_finish(job, ret & ~WARN_SKIPPED_IMPORT_ENTRY);
}

return ret;
Expand Down Expand Up @@ -982,15 +1023,11 @@ dbmdb_public_dbmdb_import_main(void *arg)
ret |= WARN_UPGRADE_DN_FORMAT_SPACE;
} else {
ret = -1;
if (job->task != NULL) {
slapi_task_finish(job->task, ret);
}
dbmdb_task_finish(job, ret);
}
} else if (0 != ret) {
import_log_notice(job, SLAPI_LOG_ERR, "dbmdb_public_dbmdb_import_main", "%s failed.", opstr);
if (job->task != NULL) {
slapi_task_finish(job->task, ret);
}
dbmdb_task_finish(job, ret);
} else {
if (job->task) {
/* set task warning if there are no errors */
Expand Down Expand Up @@ -1178,6 +1215,14 @@ dbmdb_run_ldif2db(Slapi_PBlock *pb)
slapi_task_set_cancel_fn(job->task, dbmdb_import_task_abort);
job->flags |= FLAG_ONLINE;

if (job->flags & FLAG_REINDEXING) {
/* Reindexing task : so we are called by task_index_thread
* that runs in a dedicated thread.
* ==> should process the "import" synchroneously.
*/
return dbmdb_public_dbmdb_import_main((void *)job);
}

/* create thread for dbmdb_import_main, so we can return */
thread = PR_CreateThread(PR_USER_THREAD, dbmdb_import_main, (void *)job,
PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
Expand Down
Loading

0 comments on commit 770edb2

Please sign in to comment.