Fix clearing cache of InternalConfigScope (#22609)

Co-authored-by: Massimiliano Culpo <massimiliano.culpo@gmail.com>
This commit is contained in:
Harmen Stoppels 2021-03-30 13:23:39 +02:00 committed by GitHub
parent e3bcb0ec1e
commit 2a4c06b1e6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 47 additions and 38 deletions

View file

@ -362,6 +362,10 @@ def _write_section(self, section):
def __repr__(self):
return '<InternalConfigScope: %s>' % self.name
def clear(self):
# no cache to clear here.
pass
@staticmethod
def _process_dict_keyname_overrides(data):
"""Turn a trailing `:' in a key name into an override attribute."""

View file

@ -16,53 +16,37 @@
@pytest.fixture()
def parser():
def job_parser():
# --jobs needs to write to a command_line config scope, so this is the only
# scope we create.
p = argparse.ArgumentParser()
arguments.add_common_arguments(p, ['jobs'])
yield p
# Cleanup the command line scope if it was set during tests
spack.config.config.clear_caches()
if 'command_line' in spack.config.config.scopes:
spack.config.config.scopes['command_line'].clear()
scopes = [spack.config.InternalConfigScope('command_line', {'config': {}})]
with spack.config.use_configuration(*scopes):
yield p
@pytest.fixture(params=[1, 2, 4, 8, 16, 32])
def ncores(monkeypatch, request):
"""Mocks having a machine with n cores for the purpose of
computing config:build_jobs.
"""
def _cpu_count():
return request.param
# Patch multiprocessing.cpu_count() to return the value we need
monkeypatch.setattr(multiprocessing, 'cpu_count', _cpu_count)
# Patch the configuration parts that have been cached already
monkeypatch.setitem(spack.config.config_defaults['config'],
'build_jobs', min(16, request.param))
monkeypatch.setitem(
spack.config.config.scopes, '_builtin',
spack.config.InternalConfigScope(
'_builtin', spack.config.config_defaults
))
return request.param
@pytest.mark.parametrize('cli_args,requested', [
(['-j', '24'], 24),
# Here we report the default if we have enough cores, as the cap
# on the available number of cores will be taken care of in the test
([], 16)
])
def test_setting_parallel_jobs(parser, cli_args, ncores, requested):
expected = min(requested, ncores)
namespace = parser.parse_args(cli_args)
@pytest.mark.parametrize("ncores", [1, 2, 4, 8, 16, 32])
def test_setting_jobs_flag(job_parser, ncores, monkeypatch):
monkeypatch.setattr(multiprocessing, 'cpu_count', lambda: ncores)
namespace = job_parser.parse_args(['-j', '24'])
expected = min(24, ncores)
assert namespace.jobs == expected
assert spack.config.get('config:build_jobs') == expected
def test_negative_integers_not_allowed_for_parallel_jobs(parser):
@pytest.mark.parametrize("ncores", [1, 2, 4, 8, 16, 32])
def test_omitted_job_flag(job_parser, ncores, monkeypatch):
monkeypatch.setattr(multiprocessing, 'cpu_count', lambda: ncores)
namespace = job_parser.parse_args([])
assert namespace.jobs == min(ncores, 16)
assert spack.config.get('config:build_jobs') is None
def test_negative_integers_not_allowed_for_parallel_jobs(job_parser):
with pytest.raises(ValueError) as exc_info:
parser.parse_args(['-j', '-2'])
job_parser.parse_args(['-j', '-2'])
assert 'expected a positive integer' in str(exc_info.value)

View file

@ -1132,3 +1132,24 @@ def test_single_file_scope_cache_clearing(env_yaml):
after = scope.get_section('config')
assert after
assert before == after
@pytest.mark.regression('22611')
def test_internal_config_scope_cache_clearing():
"""
An InternalConfigScope object is constructed from data that is already
in memory, therefore it doesn't have any cache to clear. Here we ensure
that calling the clear method is consistent with that..
"""
data = {
'config': {
'build_jobs': 10
}
}
internal_scope = spack.config.InternalConfigScope('internal', data)
# Ensure that the initial object is properly set
assert internal_scope.sections['config'] == data
# Call the clear method
internal_scope.clear()
# Check that this didn't affect the scope object
assert internal_scope.sections['config'] == data