Skip to content

Commit

Permalink
Include keyspace range checks on flow tests
Browse files Browse the repository at this point in the history
  • Loading branch information
filipecosta90 committed Feb 13, 2023
1 parent 82dba6a commit 31cb241
Show file tree
Hide file tree
Showing 2 changed files with 210 additions and 49 deletions.
99 changes: 59 additions & 40 deletions tests/include.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,24 @@
TLS_CACERT = os.environ.get("TLS_CACERT", "")


def assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count,
def assert_keyspace_range(env, key_max, key_min, master_nodes_connections):
expected_keyspace_range = key_max - key_min + 1
overall_keyspace_range = agg_keyspace_range(master_nodes_connections)
# assert we have the expected keyspace range
env.assertEqual(expected_keyspace_range, overall_keyspace_range)

def assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count):
failed_asserts = env.getNumberOfFailedAssertion()
try:
# assert correct exit code
env.assertTrue(memtier_ok == True)
# assert we have all outputs
env.assertTrue(os.path.isfile('{0}/mb.stdout'.format(config.results_dir)))
env.assertTrue(os.path.isfile('{0}/mb.stderr'.format(config.results_dir)))
env.assertTrue(os.path.isfile('{0}/mb.json'.format(config.results_dir)))
# assert we have the expected request count
env.assertEqual(overall_expected_request_count, overall_request_count)
finally:
if env.getNumberOfFailedAssertion() > failed_asserts:
debugPrintMemtierOnError(config, env)
# assert correct exit code
env.assertTrue(memtier_ok == True)
# assert we have all outputs
env.assertTrue(os.path.isfile('{0}/mb.stdout'.format(config.results_dir)))
env.assertTrue(os.path.isfile('{0}/mb.stderr'.format(config.results_dir)))
env.assertTrue(os.path.isfile('{0}/mb.json'.format(config.results_dir)))

# assert we have the expected request count
env.assertEqual(overall_expected_request_count, overall_request_count)


def add_required_env_arguments(benchmark_specs, config, env, master_nodes_list):
# check if environment is cluster
Expand All @@ -36,39 +39,55 @@ def add_required_env_arguments(benchmark_specs, config, env, master_nodes_list):
config['redis_process_port'] = master_nodes_list[0]['port']


def debugPrintMemtierOnError(config, env):
with open('{0}/mb.stderr'.format(config.results_dir)) as stderr:
env.debugPrint("### PRINTING STDERR OUTPUT OF MEMTIER ON FAILURE ###", True)
env.debugPrint("### mb.stderr file location: {0}".format('{0}/mb.stderr'.format(config.results_dir)), True)
for line in stderr:
env.debugPrint(line.rstrip(), True)
with open('{0}/mb.stdout'.format(config.results_dir)) as stderr:
env.debugPrint("### PRINTING STDOUT OUTPUT OF MEMTIER ON FAILURE ###", True)
env.debugPrint("### mb.stdout file location: {0}".format('{0}/mb.stdout'.format(config.results_dir)), True)
for line in stderr:
env.debugPrint(line.rstrip(), True)

if not env.isCluster():
if env.envRunner is not None:
log_file = os.path.join(env.envRunner.dbDirPath, env.envRunner._getFileName('master', '.log'))
with open(log_file) as redislog:
env.debugPrint("### REDIS LOG ###", True)
env.debugPrint(
"### log_file file location: {0}".format(log_file), True)
for line in redislog:
env.debugPrint(line.rstrip(), True)


def get_expected_request_count(config):
def debugPrintMemtierOnError(config, env, memtier_ok):
if not memtier_ok:
with open('{0}/mb.stderr'.format(config.results_dir)) as stderr:
env.debugPrint("### PRINTING STDERR OUTPUT OF MEMTIER ON FAILURE ###", True)
env.debugPrint("### mb.stderr file location: {0}".format('{0}/mb.stderr'.format(config.results_dir)), True)
for line in stderr:
env.debugPrint(line.rstrip(), True)

with open('{0}/mb.stdout'.format(config.results_dir)) as stderr:
env.debugPrint("### PRINTING STDERR OUTPUT OF MEMTIER ON FAILURE ###", True)
env.debugPrint("### mb.stderr file location: {0}".format('{0}/mb.stdout'.format(config.results_dir)), True)
for line in stderr:
env.debugPrint(line.rstrip(), True)

if not env.isCluster():
if env.envRunner is not None:
log_file = os.path.join(env.envRunner.dbDirPath, env.envRunner._getFileName('master', '.log'))
with open(log_file) as redislog:
env.debugPrint("### REDIS LOG ###", True)
env.debugPrint(
"### log_file file location: {0}".format(log_file), True)
for line in redislog:
env.debugPrint(line.rstrip(), True)


def get_expected_request_count(config,key_minimum=0,key_maximum=1000000):
result = -1
if 'memtier_benchmark' in config:
mt = config['memtier_benchmark']
if 'threads' in mt and 'clients' in mt and 'requests' in mt:
result = config['memtier_benchmark']['threads'] * config['memtier_benchmark']['clients'] * \
config['memtier_benchmark']['requests']
if mt['requests'] != 'allkeys':
result = mt['threads'] * mt['clients'] * mt['requests']
else:
result = key_maximum - key_minimum + 1
return result



def agg_keyspace_range(master_nodes_connections):
overall_keyspace_range = 0
for master_connection in master_nodes_connections:
shard_reply = master_connection.execute_command("INFO", "KEYSPACE")
shard_count = 0
if 'db0' in shard_reply:
if 'keys' in shard_reply['db0']:
shard_count = int(shard_reply['db0']['keys'])
overall_keyspace_range = overall_keyspace_range + shard_count
return overall_keyspace_range

def agg_info_commandstats(master_nodes_connections, merged_command_stats):
overall_request_count = 0
for master_connection in master_nodes_connections:
Expand Down
160 changes: 151 additions & 9 deletions tests/tests_oss_simple_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,135 @@
from mbdirector.runner import RunConfig


def test_preload_and_set_get(env):
key_max = 500000
key_min = 1
benchmark_specs = {"name": env.testName, "args": ['--pipeline=10','--ratio=1:0','--key-pattern=P:P','--key-minimum={}'.format(key_min),'--key-maximum={}'.format(key_max)]}
addTLSArgs(benchmark_specs, env)
config = get_default_memtier_config(threads=2, clients=10, requests='allkeys')
master_nodes_list = env.getMasterNodesList()
overall_expected_request_count = get_expected_request_count(config,key_min, key_max)

add_required_env_arguments(benchmark_specs, config, env, master_nodes_list)

# Create a temporary directory
test_dir = tempfile.mkdtemp()

config = RunConfig(test_dir, env.testName, config, {})
ensure_clean_benchmark_folder(config.results_dir)

benchmark = Benchmark.from_json(config, benchmark_specs)

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)

master_nodes_connections = env.getOSSMasterNodesConnectionList()
merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
assert_keyspace_range(env, key_max, key_min, master_nodes_connections)

overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)
json_filename = '{0}/mb.json'.format(config.results_dir)

for master_connection in master_nodes_connections:
master_connection.execute_command("CONFIG", "RESETSTAT")

benchmark_specs = {"name": env.testName, "args": ['--pipeline=10','--ratio=1:1','--key-pattern=R:R','--key-minimum={}'.format(key_min),'--key-maximum={}'.format(key_max)]}
addTLSArgs(benchmark_specs, env)
config = get_default_memtier_config(threads=2, clients=10, requests=200000)
master_nodes_list = env.getMasterNodesList()
overall_expected_request_count = get_expected_request_count(config,key_min, key_max)

add_required_env_arguments(benchmark_specs, config, env, master_nodes_list)

# Create a temporary directory
test_dir = tempfile.mkdtemp()

config = RunConfig(test_dir, env.testName, config, {})
ensure_clean_benchmark_folder(config.results_dir)

benchmark = Benchmark.from_json(config, benchmark_specs)

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)

merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
assert_keyspace_range(env, key_max, key_min, master_nodes_connections)

overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)
json_filename = '{0}/mb.json'.format(config.results_dir)


def test_default_set(env):
key_max = 500000
key_min = 1
benchmark_specs = {"name": env.testName, "args": ['--pipeline=10','--ratio=1:0','--key-pattern=P:P','--key-minimum={}'.format(key_min),'--key-maximum={}'.format(key_max)]}
addTLSArgs(benchmark_specs, env)
config = get_default_memtier_config(threads=2, clients=10, requests='allkeys')
master_nodes_list = env.getMasterNodesList()
overall_expected_request_count = get_expected_request_count(config,key_min, key_max)

add_required_env_arguments(benchmark_specs, config, env, master_nodes_list)

# Create a temporary directory
test_dir = tempfile.mkdtemp()

config = RunConfig(test_dir, env.testName, config, {})
ensure_clean_benchmark_folder(config.results_dir)

benchmark = Benchmark.from_json(config, benchmark_specs)

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)
master_nodes_connections = env.getOSSMasterNodesConnectionList()

merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
assert_keyspace_range(env, key_max, key_min, master_nodes_connections)

overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)
json_filename = '{0}/mb.json'.format(config.results_dir)

# ensure if we run again on a different key pattern the dataset doesn't grow
for master_connection in master_nodes_connections:
master_connection.execute_command("CONFIG", "RESETSTAT")

benchmark_specs = {"name": env.testName, "args": ['--pipeline=10','--ratio=1:0','--key-pattern=R:R','--key-minimum={}'.format(key_min),'--key-maximum={}'.format(key_max)]}
addTLSArgs(benchmark_specs, env)
config = get_default_memtier_config(threads=2, clients=10, requests=200000)
master_nodes_list = env.getMasterNodesList()
overall_expected_request_count = get_expected_request_count(config,key_min, key_max)

add_required_env_arguments(benchmark_specs, config, env, master_nodes_list)

# Create a temporary directory
test_dir = tempfile.mkdtemp()

config = RunConfig(test_dir, env.testName, config, {})
ensure_clean_benchmark_folder(config.results_dir)

benchmark = Benchmark.from_json(config, benchmark_specs)

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)

master_nodes_connections = env.getOSSMasterNodesConnectionList()
merged_command_stats = {'cmdstat_set': {'calls': 0}}
assert_keyspace_range(env, key_max, key_min, master_nodes_connections)

overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)
json_filename = '{0}/mb.json'.format(config.results_dir)

def test_default_set_get(env):
benchmark_specs = {"name": env.testName, "args": []}
addTLSArgs(benchmark_specs, env)
Expand All @@ -24,11 +153,13 @@ def test_default_set_get(env):

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)

master_nodes_connections = env.getOSSMasterNodesConnectionList()
merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)
assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)


def test_default_set_get_with_print_percentiles(env):
Expand All @@ -52,11 +183,13 @@ def test_default_set_get_with_print_percentiles(env):

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)

master_nodes_connections = env.getOSSMasterNodesConnectionList()
merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)
assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)
json_filename = '{0}/mb.json'.format(config.results_dir)

hdr_files_sufix = ["_FULL_RUN_1","_SET_command_run_1","_GET_command_run_1"]
Expand Down Expand Up @@ -99,11 +232,13 @@ def test_default_set_get_1_1(env):

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)

master_nodes_connections = env.getOSSMasterNodesConnectionList()
merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)
assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)

# assert same number of gets and sets
env.assertEqual(merged_command_stats['cmdstat_set']['calls'], merged_command_stats['cmdstat_get']['calls'])
Expand All @@ -130,11 +265,13 @@ def test_default_set_get_3_runs(env):

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)

master_nodes_connections = env.getOSSMasterNodesConnectionList()
merged_command_stats = {'cmdstat_set': {'calls': 0}, 'cmdstat_get': {'calls': 0}}
overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)
assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)


def test_default_arbitrary_command_pubsub(env):
Expand All @@ -143,6 +280,7 @@ def test_default_arbitrary_command_pubsub(env):
addTLSArgs(benchmark_specs, env)
config = get_default_memtier_config()
master_nodes_list = env.getMasterNodesList()
overall_expected_request_count = 0

add_required_env_arguments(benchmark_specs, config, env, master_nodes_list)

Expand All @@ -154,8 +292,9 @@ def test_default_arbitrary_command_pubsub(env):

benchmark = Benchmark.from_json(config, benchmark_specs)

if not benchmark.run():
debugPrintMemtierOnError(config, env)
# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)


def test_default_arbitrary_command_set(env):
Expand All @@ -178,11 +317,13 @@ def test_default_arbitrary_command_set(env):

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)

master_nodes_connections = env.getOSSMasterNodesConnectionList()
merged_command_stats = {'cmdstat_set': {'calls': 0}}
overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)
assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)


def test_default_arbitrary_command_hset(env):
Expand All @@ -205,9 +346,10 @@ def test_default_arbitrary_command_hset(env):

# benchmark.run() returns True if the return code of memtier_benchmark was 0
memtier_ok = benchmark.run()
debugPrintMemtierOnError(config, env, memtier_ok)

master_nodes_connections = env.getOSSMasterNodesConnectionList()
merged_command_stats = {'cmdstat_hset': {'calls': 0}}
overall_request_count = agg_info_commandstats(master_nodes_connections, merged_command_stats)
assert_minimum_memtier_outcomes(config, env, memtier_ok, overall_expected_request_count, overall_request_count)

assert_minimum_memtier_outcomes(config, env, memtier_ok, merged_command_stats, overall_expected_request_count,
overall_request_count)

0 comments on commit 31cb241

Please sign in to comment.