From 13132c6b9785859d4edf9ca1b11e61abc3ad4b51 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 28 Apr 2015 23:15:13 -0700
Subject: [PATCH 01/25] Add YAML to Spack.
---
lib/spack/external/yaml/LICENSE | 19 +
lib/spack/external/yaml/README | 35 +
lib/spack/external/yaml/__init__.py | 315 +++++
lib/spack/external/yaml/composer.py | 139 +++
lib/spack/external/yaml/constructor.py | 678 +++++++++++
lib/spack/external/yaml/dumper.py | 62 +
lib/spack/external/yaml/emitter.py | 1140 ++++++++++++++++++
lib/spack/external/yaml/error.py | 75 ++
lib/spack/external/yaml/events.py | 86 ++
lib/spack/external/yaml/loader.py | 40 +
lib/spack/external/yaml/nodes.py | 49 +
lib/spack/external/yaml/parser.py | 589 ++++++++++
lib/spack/external/yaml/reader.py | 189 +++
lib/spack/external/yaml/representer.py | 484 ++++++++
lib/spack/external/yaml/resolver.py | 224 ++++
lib/spack/external/yaml/scanner.py | 1457 ++++++++++++++++++++++++
lib/spack/external/yaml/serializer.py | 111 ++
lib/spack/external/yaml/tokens.py | 104 ++
18 files changed, 5796 insertions(+)
create mode 100644 lib/spack/external/yaml/LICENSE
create mode 100644 lib/spack/external/yaml/README
create mode 100644 lib/spack/external/yaml/__init__.py
create mode 100644 lib/spack/external/yaml/composer.py
create mode 100644 lib/spack/external/yaml/constructor.py
create mode 100644 lib/spack/external/yaml/dumper.py
create mode 100644 lib/spack/external/yaml/emitter.py
create mode 100644 lib/spack/external/yaml/error.py
create mode 100644 lib/spack/external/yaml/events.py
create mode 100644 lib/spack/external/yaml/loader.py
create mode 100644 lib/spack/external/yaml/nodes.py
create mode 100644 lib/spack/external/yaml/parser.py
create mode 100644 lib/spack/external/yaml/reader.py
create mode 100644 lib/spack/external/yaml/representer.py
create mode 100644 lib/spack/external/yaml/resolver.py
create mode 100644 lib/spack/external/yaml/scanner.py
create mode 100644 lib/spack/external/yaml/serializer.py
create mode 100644 lib/spack/external/yaml/tokens.py
diff --git a/lib/spack/external/yaml/LICENSE b/lib/spack/external/yaml/LICENSE
new file mode 100644
index 0000000000..050ced23f6
--- /dev/null
+++ b/lib/spack/external/yaml/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lib/spack/external/yaml/README b/lib/spack/external/yaml/README
new file mode 100644
index 0000000000..c1edf13870
--- /dev/null
+++ b/lib/spack/external/yaml/README
@@ -0,0 +1,35 @@
+PyYAML - The next generation YAML parser and emitter for Python.
+
+To install, type 'python setup.py install'.
+
+By default, the setup.py script checks whether LibYAML is installed
+and if so, builds and installs LibYAML bindings. To skip the check
+and force installation of LibYAML bindings, use the option '--with-libyaml':
+'python setup.py --with-libyaml install'. To disable the check and
+skip building and installing LibYAML bindings, use '--without-libyaml':
+'python setup.py --without-libyaml install'.
+
+When LibYAML bindings are installed, you may use fast LibYAML-based
+parser and emitter as follows:
+
+ >>> yaml.load(stream, Loader=yaml.CLoader)
+ >>> yaml.dump(data, Dumper=yaml.CDumper)
+
+PyYAML includes a comprehensive test suite. To run the tests,
+type 'python setup.py test'.
+
+For more information, check the PyYAML homepage:
+'http://pyyaml.org/wiki/PyYAML'.
+
+For PyYAML tutorial and reference, see:
+'http://pyyaml.org/wiki/PyYAMLDocumentation'.
+
+Post your questions and opinions to the YAML-Core mailing list:
+'http://lists.sourceforge.net/lists/listinfo/yaml-core'.
+
+Submit bug reports and feature requests to the PyYAML bug tracker:
+'http://pyyaml.org/newticket?component=pyyaml'.
+
+PyYAML is written by Kirill Simonov . It is released
+under the MIT license. See the file LICENSE for more details.
+
diff --git a/lib/spack/external/yaml/__init__.py b/lib/spack/external/yaml/__init__.py
new file mode 100644
index 0000000000..f977f46ba7
--- /dev/null
+++ b/lib/spack/external/yaml/__init__.py
@@ -0,0 +1,315 @@
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '3.10'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/lib/spack/external/yaml/composer.py b/lib/spack/external/yaml/composer.py
new file mode 100644
index 0000000000..06e5ac782f
--- /dev/null
+++ b/lib/spack/external/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/lib/spack/external/yaml/constructor.py b/lib/spack/external/yaml/constructor.py
new file mode 100644
index 0000000000..8c0ec181b2
--- /dev/null
+++ b/lib/spack/external/yaml/constructor.py
@@ -0,0 +1,678 @@
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ if key in mapping:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found already in-use key (%s)" % key, key_node.start_mark)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P[0-9][0-9][0-9][0-9])
+ -(?P[0-9][0-9]?)
+ -(?P[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P[0-9][0-9]?)
+ :(?P[0-9][0-9])
+ :(?P[0-9][0-9])
+ (?:\.(?P[0-9]*))?
+ (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?)
+ (?::(?P[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/lib/spack/external/yaml/dumper.py b/lib/spack/external/yaml/dumper.py
new file mode 100644
index 0000000000..f811d2c919
--- /dev/null
+++ b/lib/spack/external/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/lib/spack/external/yaml/emitter.py b/lib/spack/external/yaml/emitter.py
new file mode 100644
index 0000000000..e5bcdcccbb
--- /dev/null
+++ b/lib/spack/external/yaml/emitter.py
@@ -0,0 +1,1140 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from error import YAMLError
+from events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/lib/spack/external/yaml/error.py b/lib/spack/external/yaml/error.py
new file mode 100644
index 0000000000..577686db5f
--- /dev/null
+++ b/lib/spack/external/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/lib/spack/external/yaml/events.py b/lib/spack/external/yaml/events.py
new file mode 100644
index 0000000000..f79ad389cb
--- /dev/null
+++ b/lib/spack/external/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/lib/spack/external/yaml/loader.py b/lib/spack/external/yaml/loader.py
new file mode 100644
index 0000000000..293ff467b1
--- /dev/null
+++ b/lib/spack/external/yaml/loader.py
@@ -0,0 +1,40 @@
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/lib/spack/external/yaml/nodes.py b/lib/spack/external/yaml/nodes.py
new file mode 100644
index 0000000000..c4f070c41e
--- /dev/null
+++ b/lib/spack/external/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = ''
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/lib/spack/external/yaml/parser.py b/lib/spack/external/yaml/parser.py
new file mode 100644
index 0000000000..f9e3057f33
--- /dev/null
+++ b/lib/spack/external/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected , but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected , but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/lib/spack/external/yaml/reader.py b/lib/spack/external/yaml/reader.py
new file mode 100644
index 0000000000..a67af7c5da
--- /dev/null
+++ b/lib/spack/external/yaml/reader.py
@@ -0,0 +1,189 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream, name=None):
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "" if name is None else name
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "" if name is None else name
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "") if name is None else name
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/lib/spack/external/yaml/representer.py b/lib/spack/external/yaml/representer.py
new file mode 100644
index 0000000000..5f4fc70dbc
--- /dev/null
+++ b/lib/spack/external/yaml/representer.py
@@ -0,0 +1,484 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import sys, copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/lib/spack/external/yaml/resolver.py b/lib/spack/external/yaml/resolver.py
new file mode 100644
index 0000000000..6b5ab87596
--- /dev/null
+++ b/lib/spack/external/yaml/resolver.py
@@ -0,0 +1,224 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/lib/spack/external/yaml/scanner.py b/lib/spack/external/yaml/scanner.py
new file mode 100644
index 0000000000..5228fad65c
--- /dev/null
+++ b/lib/spack/external/yaml/scanner.py
@@ -0,0 +1,1457 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if :
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in u',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == u':'
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/lib/spack/external/yaml/serializer.py b/lib/spack/external/yaml/serializer.py
new file mode 100644
index 0000000000..0bf1e96dc1
--- /dev/null
+++ b/lib/spack/external/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/lib/spack/external/yaml/tokens.py b/lib/spack/external/yaml/tokens.py
new file mode 100644
index 0000000000..4d0b48a394
--- /dev/null
+++ b/lib/spack/external/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = ''
+
+class DirectiveToken(Token):
+ id = ''
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = ''
+
+class DocumentEndToken(Token):
+ id = ''
+
+class StreamStartToken(Token):
+ id = ''
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = ''
+
+class BlockSequenceStartToken(Token):
+ id = ''
+
+class BlockMappingStartToken(Token):
+ id = ''
+
+class BlockEndToken(Token):
+ id = ''
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = ''
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = ''
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = ''
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = ''
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
From 278e70e533587f734a9ccaa2131291734883efbb Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Wed, 29 Apr 2015 01:47:09 -0700
Subject: [PATCH 02/25] Specs to/from YAML are working.
---
lib/spack/external/__init__.py | 9 +++-
lib/spack/spack/spec.py | 95 ++++++++++++++++++++++++++++++++++
2 files changed, 102 insertions(+), 2 deletions(-)
diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py
index 1cc981930a..0578022210 100644
--- a/lib/spack/external/__init__.py
+++ b/lib/spack/external/__init__.py
@@ -28,6 +28,11 @@
So far:
argparse: We include our own version to be Python 2.6 compatible.
- pyqver2: External script to query required python version of python source code.
- Used for ensuring 2.6 compatibility.
+
+ pyqver2: External script to query required python version of
+ python source code. Used for ensuring 2.6 compatibility.
+
+ functools: Used for implementation of total_ordering.
+
+ yaml: Used for config files.
"""
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index dffdccaddb..32970cdd33 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -95,6 +95,7 @@
import hashlib
from StringIO import StringIO
from operator import attrgetter
+from external import yaml
import llnl.util.tty as tty
from llnl.util.lang import *
@@ -578,6 +579,100 @@ def dep_hash(self, length=None):
return full_hash[:length]
+ def dag_hash(self, length=None):
+ """Return a hash of the entire spec DAG, including connectivity."""
+ sha = hashlib.sha1()
+ sha.update(self.to_node_yaml(canonical=True))
+ full_hash = sha.hexdigest()
+ return full_hash[:length]
+
+
+ def to_node_dict(self):
+ return {
+ self.name : {
+ 'versions': [str(v) for v in self.versions],
+ 'compiler' : None if self.compiler is None else {
+ 'name' : self.compiler.name,
+ 'versions': [str(v) for v in self.compiler.versions],
+ },
+ 'variants' : dict(
+ (name,v.enabled) for name, v in self.variants.items()),
+ 'arch' : self.architecture,
+ 'dependencies' : dict((d, self.dependencies[d].dag_hash())
+ for d in sorted(self.dependencies))
+ }}
+
+
+ def to_node_yaml(self, **kwargs):
+ """Return spec's DAG in minimal YAML (only immediate descendents)."""
+ canonical = kwargs.pop('canonical', False)
+ check_kwargs(kwargs, self.to_yaml)
+ if canonical:
+ return yaml.dump(self.to_node_dict(),
+ default_flow_style=True, width=sys.maxint)
+ else:
+ return yaml.dump(self.to_node_dict(),
+ default_flow_style=False)
+
+
+ def to_dict(self):
+ return {
+ 'dag' : [s.to_node_dict() for s in self.traverse(order='pre')],
+ 'hash' : self.dag_hash()
+ }
+
+
+ def to_yaml(self):
+ return yaml.dump(self.to_dict(), default_flow_style=False)
+
+
+ @staticmethod
+ def from_node_dict(node):
+ name = next(iter(node))
+ node = node[name]
+
+ spec = Spec(name)
+ spec.versions = VersionList(node['versions'])
+ compiler = node['compiler']
+ spec.architecture = node['arch']
+
+ if compiler is None:
+ spec.compiler = None
+ else:
+ spec.compiler = CompilerSpec(compiler['name'], compiler['versions'])
+
+ for name, enabled in node['variants'].items():
+ spec.variants[name] = Variant(name, enabled)
+
+ return spec
+
+
+ @staticmethod
+ def from_yaml(string):
+ """Construct a spec from YAML.
+
+ TODO: currently discards hashes. Include hashes when they
+ represent more than the DAG does.
+
+ """
+ deps = {}
+ spec = None
+
+ yfile = yaml.load(string)
+ for node in yfile['dag']:
+ name = next(iter(node))
+ dep = Spec.from_node_dict(node)
+ if not spec:
+ spec = dep
+ deps[dep.name] = dep
+
+ for node in yfile['dag']:
+ name = next(iter(node))
+ for dep_name in node[name]['dependencies']:
+ deps[name].dependencies[dep_name] = deps[dep_name]
+ return spec
+
+
def _concretize_helper(self, presets=None, visited=None):
"""Recursive helper function for concretize().
This concretizes everything bottom-up. As things are
From 53e8e44a8b63e615b0d2baa74b4e51fc551365c1 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 5 May 2015 13:06:41 -0700
Subject: [PATCH 03/25] Make YAML specs more human readable.
---
lib/spack/spack/spec.py | 80 +++++++++++++++++++-------------------
lib/spack/spack/version.py | 19 +++++++++
2 files changed, 58 insertions(+), 41 deletions(-)
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 32970cdd33..72a95de79e 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -256,6 +256,18 @@ def _cmp_key(self):
return (self.name, self.versions)
+ def to_dict(self):
+ d = {'name' : self.name}
+ d.update(self.versions.to_dict())
+ return { 'compiler' : d }
+
+
+ @staticmethod
+ def from_dict(d):
+ d = d['compiler']
+ return CompilerSpec(d['name'], VersionList.from_dict(d))
+
+
def __str__(self):
out = self.name
if self.versions and self.versions != _any_version:
@@ -582,48 +594,35 @@ def dep_hash(self, length=None):
def dag_hash(self, length=None):
"""Return a hash of the entire spec DAG, including connectivity."""
sha = hashlib.sha1()
- sha.update(self.to_node_yaml(canonical=True))
- full_hash = sha.hexdigest()
- return full_hash[:length]
+ hash_text = yaml.dump(
+ self.to_node_dict(), default_flow_style=True, width=sys.maxint)
+ sha.update(hash_text)
+ return sha.hexdigest()[:length]
def to_node_dict(self):
- return {
- self.name : {
- 'versions': [str(v) for v in self.versions],
- 'compiler' : None if self.compiler is None else {
- 'name' : self.compiler.name,
- 'versions': [str(v) for v in self.compiler.versions],
- },
- 'variants' : dict(
- (name,v.enabled) for name, v in self.variants.items()),
- 'arch' : self.architecture,
- 'dependencies' : dict((d, self.dependencies[d].dag_hash())
- for d in sorted(self.dependencies))
- }}
-
-
- def to_node_yaml(self, **kwargs):
- """Return spec's DAG in minimal YAML (only immediate descendents)."""
- canonical = kwargs.pop('canonical', False)
- check_kwargs(kwargs, self.to_yaml)
- if canonical:
- return yaml.dump(self.to_node_dict(),
- default_flow_style=True, width=sys.maxint)
- else:
- return yaml.dump(self.to_node_dict(),
- default_flow_style=False)
-
-
- def to_dict(self):
- return {
- 'dag' : [s.to_node_dict() for s in self.traverse(order='pre')],
- 'hash' : self.dag_hash()
+ d = {
+ 'variants' : dict(
+ (name,v.enabled) for name, v in self.variants.items()),
+ 'arch' : self.architecture,
+ 'dependencies' : dict((d, self.dependencies[d].dag_hash())
+ for d in sorted(self.dependencies))
}
+ if self.compiler:
+ d.update(self.compiler.to_dict())
+ else:
+ d['compiler'] = None
+ d.update(self.versions.to_dict())
+ return { self.name : d }
def to_yaml(self):
- return yaml.dump(self.to_dict(), default_flow_style=False)
+ node_list = []
+ for s in self.traverse(order='pre'):
+ node = s.to_node_dict()
+ node[s.name]['hash'] = s.dag_hash()
+ node_list.append(node)
+ return yaml.dump({ 'spec' : node_list }, default_flow_style=False)
@staticmethod
@@ -632,14 +631,13 @@ def from_node_dict(node):
node = node[name]
spec = Spec(name)
- spec.versions = VersionList(node['versions'])
- compiler = node['compiler']
+ spec.versions = VersionList.from_dict(node)
spec.architecture = node['arch']
- if compiler is None:
+ if node['compiler'] is None:
spec.compiler = None
else:
- spec.compiler = CompilerSpec(compiler['name'], compiler['versions'])
+ spec.compiler = CompilerSpec.from_dict(node)
for name, enabled in node['variants'].items():
spec.variants[name] = Variant(name, enabled)
@@ -659,14 +657,14 @@ def from_yaml(string):
spec = None
yfile = yaml.load(string)
- for node in yfile['dag']:
+ for node in yfile['spec']:
name = next(iter(node))
dep = Spec.from_node_dict(node)
if not spec:
spec = dep
deps[dep.name] = dep
- for node in yfile['dag']:
+ for node in yfile['spec']:
name = next(iter(node))
for dep_name in node[name]['dependencies']:
deps[name].dependencies[dep_name] = deps[dep_name]
diff --git a/lib/spack/spack/version.py b/lib/spack/spack/version.py
index cc83634137..908577122a 100644
--- a/lib/spack/spack/version.py
+++ b/lib/spack/spack/version.py
@@ -587,6 +587,25 @@ def overlaps(self, other):
return False
+ def to_dict(self):
+ """Generate human-readable dict for YAML."""
+ if self.concrete:
+ return { 'version' : str(self[0]) }
+ else:
+ return { 'versions' : str(v) for v in self }
+
+
+ @staticmethod
+ def from_dict(dictionary):
+ """Parse dict from to_dict."""
+ if 'versions' in dictionary:
+ return VersionList(dictionary['versions'])
+ elif 'version' in dictionary:
+ return VersionList([dictionary['version']])
+ else:
+ raise ValueError("Dict must have 'version' or 'versions' in it.")
+
+
@coerced
def satisfies(self, other):
"""A VersionList satisfies another if some version in the list would
From d687962b746c1b8e10789799ba20e1f2ea01407f Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 5 May 2015 13:07:02 -0700
Subject: [PATCH 04/25] Add test for YAML specs.
---
lib/spack/spack/test/__init__.py | 3 +-
lib/spack/spack/test/spec_yaml.py | 65 +++++++++++++++++++++++++++++++
2 files changed, 67 insertions(+), 1 deletion(-)
create mode 100644 lib/spack/spack/test/spec_yaml.py
diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py
index c53e6774fc..77c8bd3191 100644
--- a/lib/spack/spack/test/__init__.py
+++ b/lib/spack/spack/test/__init__.py
@@ -52,7 +52,8 @@
'mirror',
'url_extrapolate',
'cc',
- 'link_tree']
+ 'link_tree',
+ 'spec_yaml']
def list_tests():
diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py
new file mode 100644
index 0000000000..b1339b6da3
--- /dev/null
+++ b/lib/spack/spack/test/spec_yaml.py
@@ -0,0 +1,65 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Test YAML serialization for specs.
+
+YAML format preserves DAG informatoin in the spec.
+
+"""
+from spack.spec import Spec
+from spack.test.mock_packages_test import *
+
+class SpecDagTest(MockPackagesTest):
+
+ def check_yaml_round_trip(self, spec):
+ yaml_text = spec.to_yaml()
+ spec_from_yaml = Spec.from_yaml(yaml_text)
+ self.assertTrue(spec.eq_dag(spec_from_yaml))
+
+
+ def test_simple_spec(self):
+ spec = Spec('mpileaks')
+ self.check_yaml_round_trip(spec)
+
+
+ def test_normal_spec(self):
+ spec = Spec('mpileaks+debug~opt')
+ spec.normalize()
+ self.check_yaml_round_trip(spec)
+
+
+ def test_concrete_spec(self):
+ spec = Spec('mpileaks+debug~opt')
+ spec.concretize()
+ self.check_yaml_round_trip(spec)
+
+
+ def test_yaml_subdag(self):
+ spec = Spec('mpileaks^mpich+debug~opt')
+ spec.concretize()
+
+ yaml_spec = Spec.from_yaml(spec.to_yaml())
+
+ for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'):
+ self.assertTrue(spec[dep].eq_dag(yaml_spec[dep]))
From 1d0975bac676c607b6e4c18edcdf683e11b4f457 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 5 May 2015 14:24:46 -0700
Subject: [PATCH 05/25] Bugfixes for yaml specs.
---
lib/spack/spack/spec.py | 13 +++++++++++--
lib/spack/spack/test/spec_yaml.py | 6 ++++++
lib/spack/spack/version.py | 2 +-
3 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 72a95de79e..dd9ec5dbe3 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
@@ -96,6 +96,7 @@
from StringIO import StringIO
from operator import attrgetter
from external import yaml
+from external.yaml.error import MarkedYAMLError
import llnl.util.tty as tty
from llnl.util.lang import *
@@ -656,7 +657,11 @@ def from_yaml(string):
deps = {}
spec = None
- yfile = yaml.load(string)
+ try:
+ yfile = yaml.load(string)
+ except MarkedYAMLError, e:
+ raise SpackYAMLError("error parsing YMAL spec:", str(e))
+
for node in yfile['spec']:
name = next(iter(node))
dep = Spec.from_node_dict(node)
@@ -1776,3 +1781,7 @@ class UnsatisfiableDependencySpecError(UnsatisfiableSpecError):
def __init__(self, provided, required):
super(UnsatisfiableDependencySpecError, self).__init__(
provided, required, "dependency")
+
+class SpackYAMLError(spack.error.SpackError):
+ def __init__(self, msg, yaml_error):
+ super(SpackError, self).__init__(msg, str(yaml_error))
diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py
index b1339b6da3..74c957827e 100644
--- a/lib/spack/spack/test/spec_yaml.py
+++ b/lib/spack/spack/test/spec_yaml.py
@@ -49,6 +49,12 @@ def test_normal_spec(self):
self.check_yaml_round_trip(spec)
+ def test_ambiguous_version_spec(self):
+ spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt')
+ spec.normalize()
+ self.check_yaml_round_trip(spec)
+
+
def test_concrete_spec(self):
spec = Spec('mpileaks+debug~opt')
spec.concretize()
diff --git a/lib/spack/spack/version.py b/lib/spack/spack/version.py
index 908577122a..61b1e328ce 100644
--- a/lib/spack/spack/version.py
+++ b/lib/spack/spack/version.py
@@ -592,7 +592,7 @@ def to_dict(self):
if self.concrete:
return { 'version' : str(self[0]) }
else:
- return { 'versions' : str(v) for v in self }
+ return { 'versions' : [str(v) for v in self] }
@staticmethod
From 9412fc8083ab8607974afdeca04aa000b115a1ed Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Sat, 9 May 2015 16:12:04 -0500
Subject: [PATCH 06/25] restore some disabled git tests.
---
lib/spack/spack/test/git_fetch.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/spack/spack/test/git_fetch.py b/lib/spack/spack/test/git_fetch.py
index 04422adb57..da57f87bd0 100644
--- a/lib/spack/spack/test/git_fetch.py
+++ b/lib/spack/spack/test/git_fetch.py
@@ -109,7 +109,7 @@ def test_fetch_master(self):
})
- def ztest_fetch_branch(self):
+ def test_fetch_branch(self):
"""Test fetching a branch."""
self.try_fetch(self.repo.branch, self.repo.branch_file, {
'git' : self.repo.path,
@@ -117,7 +117,7 @@ def ztest_fetch_branch(self):
})
- def ztest_fetch_tag(self):
+ def test_fetch_tag(self):
"""Test fetching a tag."""
self.try_fetch(self.repo.tag, self.repo.tag_file, {
'git' : self.repo.path,
@@ -125,7 +125,7 @@ def ztest_fetch_tag(self):
})
- def ztest_fetch_commit(self):
+ def test_fetch_commit(self):
"""Test fetching a particular commit."""
self.try_fetch(self.repo.r1, self.repo.r1_file, {
'git' : self.repo.path,
From 2f3b0481def800fb1e3009bf878ba28d82412453 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Sat, 9 May 2015 16:32:57 -0500
Subject: [PATCH 07/25] YamlDirectoryLayout now working.
---
lib/spack/spack/__init__.py | 7 +-
lib/spack/spack/directory_layout.py | 153 +++++++++--------------
lib/spack/spack/package.py | 6 +-
lib/spack/spack/spec.py | 24 +---
lib/spack/spack/test/directory_layout.py | 52 +++++---
lib/spack/spack/test/install.py | 4 +-
6 files changed, 113 insertions(+), 133 deletions(-)
diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py
index eb891e3d57..80fc0390f9 100644
--- a/lib/spack/spack/__init__.py
+++ b/lib/spack/spack/__init__.py
@@ -42,7 +42,8 @@
hooks_path = join_path(module_path, "hooks")
var_path = join_path(prefix, "var", "spack")
stage_path = join_path(var_path, "stage")
-install_path = join_path(prefix, "opt")
+opt_path = join_path(prefix, "opt")
+install_path = join_path(opt_path, "spack")
share_path = join_path(prefix, "share", "spack")
#
@@ -65,8 +66,8 @@
# This controls how spack lays out install prefixes and
# stage directories.
#
-from spack.directory_layout import SpecHashDirectoryLayout
-install_layout = SpecHashDirectoryLayout(install_path)
+from spack.directory_layout import YamlDirectoryLayout
+install_layout = YamlDirectoryLayout(install_path)
#
# This controls how things are concretized in spack.
diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py
index b2cf5dc801..67708c47b5 100644
--- a/lib/spack/spack/directory_layout.py
+++ b/lib/spack/spack/directory_layout.py
@@ -27,8 +27,9 @@
import exceptions
import hashlib
import shutil
+import glob
import tempfile
-from contextlib import closing
+from external import yaml
import llnl.util.tty as tty
from llnl.util.lang import memoized
@@ -81,7 +82,7 @@ def relative_path_for_spec(self, spec):
raise NotImplementedError()
- def make_path_for_spec(self, spec):
+ def create_install_directory(self, spec):
"""Creates the installation directory for a spec."""
raise NotImplementedError()
@@ -131,7 +132,7 @@ def path_for_spec(self, spec):
return os.path.join(self.root, path)
- def remove_path_for_spec(self, spec):
+ def remove_install_directory(self, spec):
"""Removes a prefix and any empty parent directories from the root.
Raised RemoveFailedError if something goes wrong.
"""
@@ -153,94 +154,70 @@ def remove_path_for_spec(self, spec):
path = os.path.dirname(path)
-def traverse_dirs_at_depth(root, depth, path_tuple=(), curdepth=0):
- """For each directory at within , return a tuple representing
- the ancestors of that directory.
- """
- if curdepth == depth and curdepth != 0:
- yield path_tuple
- elif depth > curdepth:
- for filename in os.listdir(root):
- child = os.path.join(root, filename)
- if os.path.isdir(child):
- child_tuple = path_tuple + (filename,)
- for tup in traverse_dirs_at_depth(
- child, depth, child_tuple, curdepth+1):
- yield tup
-
-
-class SpecHashDirectoryLayout(DirectoryLayout):
+class YamlDirectoryLayout(DirectoryLayout):
"""Lays out installation directories like this::
- /
+ /
/
- /
- name@version+variant-
+ -/
+ ---
- Where dependency_hash is a SHA-1 hash prefix for the full package spec.
- This accounts for dependencies.
+ The hash here is a SHA-1 hash for the full DAG plus the build
+ spec. TODO: implement the build spec.
- If there is ever a hash collision, you won't be able to install a new
- package unless you use a larger prefix. However, the full spec is stored
- in a file called .spec in each directory, so you can migrate an entire
- install directory to a new hash size pretty easily.
-
- TODO: make a tool to migrate install directories to different hash sizes.
+ To avoid special characters (like ~) in the directory name,
+ only enabled variants are included in the install path.
+ Disabled variants are omitted.
"""
def __init__(self, root, **kwargs):
- """Prefix size is number of characters in the SHA-1 prefix to use
- to make each hash unique.
- """
- spec_file_name = kwargs.get('spec_file_name', '.spec')
- extension_file_name = kwargs.get('extension_file_name', '.extensions')
- super(SpecHashDirectoryLayout, self).__init__(root)
- self.spec_file_name = spec_file_name
- self.extension_file_name = extension_file_name
+ super(YamlDirectoryLayout, self).__init__(root)
+ self.metadata_dir = kwargs.get('metadata_dir', '.spack')
+ self.hash_len = kwargs.get('hash_len', None)
+
+ self.spec_file_name = 'spec'
+ self.extension_file_name = 'extensions'
# Cache of already written/read extension maps.
self._extension_maps = {}
@property
def hidden_file_paths(self):
- return ('.spec', '.extensions')
+ return (self.metadata_dir)
def relative_path_for_spec(self, spec):
_check_concrete(spec)
- dir_name = spec.format('$_$@$+$#')
- return join_path(spec.architecture, spec.compiler, dir_name)
+ enabled_variants = (
+ '-' + v.name for v in spec.variants.values()
+ if v.enabled)
+
+ dir_name = "%s-%s%s-%s" % (
+ spec.name,
+ spec.version,
+ ''.join(enabled_variants),
+ spec.dag_hash(self.hash_len))
+
+ path = join_path(
+ spec.architecture,
+ "%s-%s" % (spec.compiler.name, spec.compiler.version),
+ dir_name)
+
+ return path
def write_spec(self, spec, path):
"""Write a spec out to a file."""
- with closing(open(path, 'w')) as spec_file:
- spec_file.write(spec.tree(ids=False, cover='nodes'))
+ _check_concrete(spec)
+ with open(path, 'w') as f:
+ f.write(spec.to_yaml())
def read_spec(self, path):
"""Read the contents of a file and parse them as a spec"""
- with closing(open(path)) as spec_file:
- # Specs from files are assumed normal and concrete
- spec = Spec(spec_file.read().replace('\n', ''))
+ with open(path) as f:
+ yaml_text = f.read()
+ spec = Spec.from_yaml(yaml_text)
- if all(spack.db.exists(s.name) for s in spec.traverse()):
- copy = spec.copy()
-
- # TODO: It takes a lot of time to normalize every spec on read.
- # TODO: Storing graph info with spec files would fix this.
- copy.normalize()
- if copy.concrete:
- return copy # These are specs spack still understands.
-
- # If we get here, either the spec is no longer in spack, or
- # something about its dependencies has changed. So we need to
- # just assume the read spec is correct. We'll lose graph
- # information if we do this, but this is just for best effort
- # for commands like uninstall and find. Currently Spack
- # doesn't do anything that needs the graph info after install.
-
- # TODO: store specs with full connectivity information, so
- # that we don't have to normalize or reconstruct based on
- # changing dependencies in the Spack tree.
+ # Specs read from actual installations are always concrete
spec._normal = True
spec._concrete = True
return spec
@@ -249,10 +226,14 @@ def read_spec(self, path):
def spec_file_path(self, spec):
"""Gets full path to spec file"""
_check_concrete(spec)
- return join_path(self.path_for_spec(spec), self.spec_file_name)
+ return join_path(self.metadata_path(spec), self.spec_file_name)
- def make_path_for_spec(self, spec):
+ def metadata_path(self, spec):
+ return join_path(self.path_for_spec(spec), self.metadata_dir)
+
+
+ def create_install_directory(self, spec):
_check_concrete(spec)
path = self.path_for_spec(spec)
@@ -267,16 +248,13 @@ def make_path_for_spec(self, spec):
if installed_spec == self.spec:
raise InstallDirectoryAlreadyExistsError(path)
- spec_hash = self.hash_spec(spec)
- installed_hash = self.hash_spec(installed_spec)
- if installed_spec == spec_hash:
+ if spec.dag_hash() == installed_spec.dag_hash():
raise SpecHashCollisionError(installed_hash, spec_hash)
else:
raise InconsistentInstallDirectoryError(
- 'Spec file in %s does not match SHA-1 hash!'
- % spec_file_path)
+ 'Spec file in %s does not match hash!' % spec_file_path)
- mkdirp(path)
+ mkdirp(self.metadata_path(spec))
self.write_spec(spec, spec_file_path)
@@ -284,22 +262,14 @@ def make_path_for_spec(self, spec):
def all_specs(self):
if not os.path.isdir(self.root):
return []
-
- specs = []
- for path in traverse_dirs_at_depth(self.root, 3):
- arch, compiler, last_dir = path
- spec_file_path = join_path(
- self.root, arch, compiler, last_dir, self.spec_file_name)
- if os.path.exists(spec_file_path):
- spec = self.read_spec(spec_file_path)
- specs.append(spec)
- return specs
+ spec_files = glob.glob("%s/*/*/*/.spack/spec" % self.root)
+ return [self.read_spec(s) for s in spec_files]
def extension_file_path(self, spec):
"""Gets full path to an installed package's extension file"""
_check_concrete(spec)
- return join_path(self.path_for_spec(spec), self.extension_file_name)
+ return join_path(self.metadata_path(spec), self.extension_file_name)
def _extension_map(self, spec):
@@ -314,7 +284,7 @@ def _extension_map(self, spec):
else:
exts = {}
- with closing(open(path)) as ext_file:
+ with open(path) as ext_file:
for line in ext_file:
try:
spec = Spec(line.strip())
@@ -358,7 +328,7 @@ def _write_extensions(self, spec, extensions):
prefix=basename, dir=dirname, delete=False)
# Write temp file.
- with closing(tmp):
+ with tmp:
for extension in sorted(extensions.values()):
tmp.write("%s\n" % extension)
@@ -392,6 +362,7 @@ def remove_extension(self, spec, ext_spec):
self._write_extensions(spec, exts)
+
class DirectoryLayoutError(SpackError):
"""Superclass for directory layout errors."""
def __init__(self, message):
@@ -399,9 +370,9 @@ def __init__(self, message):
class SpecHashCollisionError(DirectoryLayoutError):
- """Raised when there is a hash collision in an SpecHashDirectoryLayout."""
+ """Raised when there is a hash collision in an install layout."""
def __init__(self, installed_spec, new_spec):
- super(SpecHashDirectoryLayout, self).__init__(
+ super(SpecHashCollisionError, self).__init__(
'Specs %s and %s have the same SHA-1 prefix!'
% installed_spec, new_spec)
@@ -422,7 +393,7 @@ def __init__(self, message):
class InstallDirectoryAlreadyExistsError(DirectoryLayoutError):
- """Raised when make_path_for_sec is called unnecessarily."""
+ """Raised when create_install_directory is called unnecessarily."""
def __init__(self, path):
super(InstallDirectoryAlreadyExistsError, self).__init__(
"Install path %s already exists!")
@@ -455,5 +426,3 @@ def __init__(self, spec, ext_spec):
super(NoSuchExtensionError, self).__init__(
"%s cannot be removed from %s because it's not activated."% (
ext_spec.short_spec, spec.short_spec))
-
-
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 908fd86a87..bbb64cd05b 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -658,7 +658,7 @@ def url_version(self, version):
def remove_prefix(self):
"""Removes the prefix for a package along with any empty parent directories."""
- spack.install_layout.remove_path_for_spec(self.spec)
+ spack.install_layout.remove_install_directory(self.spec)
def do_fetch(self):
@@ -810,7 +810,7 @@ def do_install(self, **kwargs):
# create the install directory. The install layout
# handles this in case so that it can use whatever
# package naming scheme it likes.
- spack.install_layout.make_path_for_spec(self.spec)
+ spack.install_layout.create_install_directory(self.spec)
def cleanup():
if not keep_prefix:
@@ -831,11 +831,11 @@ def real_work():
spack.hooks.pre_install(self)
# Set up process's build environment before running install.
- self.stage.chdir_to_source()
if fake_install:
self.do_fake_install()
else:
# Subclasses implement install() to do the real work.
+ self.stage.chdir_to_source()
self.install(self.spec, self.prefix)
# Ensure that something was actually installed.
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index dd9ec5dbe3..e36a1d6f0b 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -93,6 +93,7 @@
import sys
import itertools
import hashlib
+import base64
from StringIO import StringIO
from operator import attrgetter
from external import yaml
@@ -578,27 +579,12 @@ def prefix(self):
return Prefix(spack.install_layout.path_for_spec(self))
- def dep_hash(self, length=None):
- """Return a hash representing all dependencies of this spec
- (direct and indirect).
-
- If you want this hash to be consistent, you should
- concretize the spec first so that it is not ambiguous.
- """
- sha = hashlib.sha1()
- sha.update(self.dep_string())
- full_hash = sha.hexdigest()
-
- return full_hash[:length]
-
-
def dag_hash(self, length=None):
"""Return a hash of the entire spec DAG, including connectivity."""
- sha = hashlib.sha1()
- hash_text = yaml.dump(
+ yaml_text = yaml.dump(
self.to_node_dict(), default_flow_style=True, width=sys.maxint)
- sha.update(hash_text)
- return sha.hexdigest()[:length]
+ sha = hashlib.sha1(yaml_text)
+ return base64.b32encode(sha.digest()).lower()[:length]
def to_node_dict(self):
@@ -1363,7 +1349,7 @@ def write(s, c):
write(fmt % (c + str(self.architecture)), c)
elif c == '#':
if self.dependencies:
- out.write(fmt % ('-' + self.dep_hash(8)))
+ out.write(fmt % ('-' + self.dag_hash(8)))
elif c == '$':
if fmt != '':
raise ValueError("Can't use format width with $$.")
diff --git a/lib/spack/spack/test/directory_layout.py b/lib/spack/spack/test/directory_layout.py
index 3e52954cfe..34374628be 100644
--- a/lib/spack/spack/test/directory_layout.py
+++ b/lib/spack/spack/test/directory_layout.py
@@ -36,7 +36,11 @@
import spack
from spack.spec import Spec
from spack.packages import PackageDB
-from spack.directory_layout import SpecHashDirectoryLayout
+from spack.directory_layout import YamlDirectoryLayout
+
+# number of packages to test (to reduce test time)
+max_packages = 10
+
class DirectoryLayoutTest(unittest.TestCase):
"""Tests that a directory layout works correctly and produces a
@@ -44,11 +48,11 @@ class DirectoryLayoutTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
- self.layout = SpecHashDirectoryLayout(self.tmpdir)
+ self.layout = YamlDirectoryLayout(self.tmpdir)
def tearDown(self):
- shutil.rmtree(self.tmpdir, ignore_errors=True)
+ #shutil.rmtree(self.tmpdir, ignore_errors=True)
self.layout = None
@@ -59,7 +63,9 @@ def test_read_and_write_spec(self):
finally that the directory can be removed by the directory
layout.
"""
- for pkg in spack.db.all_packages():
+ packages = list(spack.db.all_packages())[:max_packages]
+
+ for pkg in packages:
spec = pkg.spec
# If a spec fails to concretize, just skip it. If it is a
@@ -69,7 +75,7 @@ def test_read_and_write_spec(self):
except:
continue
- self.layout.make_path_for_spec(spec)
+ self.layout.create_install_directory(spec)
install_dir = self.layout.path_for_spec(spec)
spec_path = self.layout.spec_file_path(spec)
@@ -90,7 +96,7 @@ def test_read_and_write_spec(self):
# Ensure that specs that come out "normal" are really normal.
with closing(open(spec_path)) as spec_file:
- read_separately = Spec(spec_file.read())
+ read_separately = Spec.from_yaml(spec_file.read())
read_separately.normalize()
self.assertEqual(read_separately, spec_from_file)
@@ -98,11 +104,11 @@ def test_read_and_write_spec(self):
read_separately.concretize()
self.assertEqual(read_separately, spec_from_file)
- # Make sure the dep hash of the read-in spec is the same
- self.assertEqual(spec.dep_hash(), spec_from_file.dep_hash())
+ # Make sure the hash of the read-in spec is the same
+ self.assertEqual(spec.dag_hash(), spec_from_file.dag_hash())
# Ensure directories are properly removed
- self.layout.remove_path_for_spec(spec)
+ self.layout.remove_install_directory(spec)
self.assertFalse(os.path.isdir(install_dir))
self.assertFalse(os.path.exists(install_dir))
@@ -120,12 +126,14 @@ def test_handle_unknown_package(self):
"""
mock_db = PackageDB(spack.mock_packages_path)
- not_in_mock = set(spack.db.all_package_names()).difference(
+ not_in_mock = set.difference(
+ set(spack.db.all_package_names()),
set(mock_db.all_package_names()))
+ packages = list(not_in_mock)[:max_packages]
# Create all the packages that are not in mock.
installed_specs = {}
- for pkg_name in not_in_mock:
+ for pkg_name in packages:
spec = spack.db.get(pkg_name).spec
# If a spec fails to concretize, just skip it. If it is a
@@ -135,7 +143,7 @@ def test_handle_unknown_package(self):
except:
continue
- self.layout.make_path_for_spec(spec)
+ self.layout.create_install_directory(spec)
installed_specs[spec] = self.layout.path_for_spec(spec)
tmp = spack.db
@@ -144,12 +152,28 @@ def test_handle_unknown_package(self):
# Now check that even without the package files, we know
# enough to read a spec from the spec file.
for spec, path in installed_specs.items():
- spec_from_file = self.layout.read_spec(join_path(path, '.spec'))
+ spec_from_file = self.layout.read_spec(join_path(path, '.spack', 'spec'))
# To satisfy these conditions, directory layouts need to
# read in concrete specs from their install dirs somehow.
self.assertEqual(path, self.layout.path_for_spec(spec_from_file))
self.assertEqual(spec, spec_from_file)
- self.assertEqual(spec.dep_hash(), spec_from_file.dep_hash())
+ self.assertTrue(spec.eq_dag(spec_from_file))
+ self.assertEqual(spec.dag_hash(), spec_from_file.dag_hash())
spack.db = tmp
+
+
+ def test_find(self):
+ """Test that finding specs within an install layout works."""
+ packages = list(spack.db.all_packages())[:max_packages]
+ installed_specs = {}
+ for pkg in packages:
+ spec = pkg.spec.concretized()
+ installed_specs[spec.name] = spec
+ self.layout.create_install_directory(spec)
+
+ found_specs = dict((s.name, s) for s in self.layout.all_specs())
+ for name, spec in found_specs.items():
+ self.assertTrue(name in found_specs)
+ self.assertTrue(found_specs[name].eq_dag(spec))
diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py
index e052f53e77..d240a393a6 100644
--- a/lib/spack/spack/test/install.py
+++ b/lib/spack/spack/test/install.py
@@ -33,7 +33,7 @@
import spack
from spack.stage import Stage
from spack.fetch_strategy import URLFetchStrategy
-from spack.directory_layout import SpecHashDirectoryLayout
+from spack.directory_layout import YamlDirectoryLayout
from spack.util.executable import which
from spack.test.mock_packages_test import *
from spack.test.mock_repo import MockArchive
@@ -55,7 +55,7 @@ def setUp(self):
# installed pkgs and mock packages.
self.tmpdir = tempfile.mkdtemp()
self.orig_layout = spack.install_layout
- spack.install_layout = SpecHashDirectoryLayout(self.tmpdir)
+ spack.install_layout = YamlDirectoryLayout(self.tmpdir)
def tearDown(self):
From 29e833dfefe47fa82d3115d23299921643997fbd Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Sun, 10 May 2015 02:56:50 -0700
Subject: [PATCH 08/25] extensions file now in YAML format
---
lib/spack/spack/directory_layout.py | 92 +++++++++++++++---------
lib/spack/spack/spec.py | 12 ++--
lib/spack/spack/test/directory_layout.py | 3 +-
3 files changed, 69 insertions(+), 38 deletions(-)
diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py
index 67708c47b5..c2e2ea4deb 100644
--- a/lib/spack/spack/directory_layout.py
+++ b/lib/spack/spack/directory_layout.py
@@ -173,12 +173,13 @@ def __init__(self, root, **kwargs):
self.metadata_dir = kwargs.get('metadata_dir', '.spack')
self.hash_len = kwargs.get('hash_len', None)
- self.spec_file_name = 'spec'
- self.extension_file_name = 'extensions'
+ self.spec_file_name = 'spec.yaml'
+ self.extension_file_name = 'extensions.yaml'
# Cache of already written/read extension maps.
self._extension_maps = {}
+
@property
def hidden_file_paths(self):
return (self.metadata_dir)
@@ -208,14 +209,13 @@ def write_spec(self, spec, path):
"""Write a spec out to a file."""
_check_concrete(spec)
with open(path, 'w') as f:
- f.write(spec.to_yaml())
+ spec.to_yaml(f)
def read_spec(self, path):
"""Read the contents of a file and parse them as a spec"""
with open(path) as f:
- yaml_text = f.read()
- spec = Spec.from_yaml(yaml_text)
+ spec = Spec.from_yaml(f)
# Specs read from actual installations are always concrete
spec._normal = True
@@ -262,18 +262,51 @@ def create_install_directory(self, spec):
def all_specs(self):
if not os.path.isdir(self.root):
return []
- spec_files = glob.glob("%s/*/*/*/.spack/spec" % self.root)
+
+ pattern = join_path(
+ self.root, '*', '*', '*', self.metadata_dir, self.spec_file_name)
+ spec_files = glob.glob(pattern)
return [self.read_spec(s) for s in spec_files]
+ @memoized
+ def specs_by_hash(self):
+ by_hash = {}
+ for spec in self.all_specs():
+ by_hash[spec.dag_hash()] = spec
+ return by_hash
+
+
def extension_file_path(self, spec):
"""Gets full path to an installed package's extension file"""
_check_concrete(spec)
return join_path(self.metadata_path(spec), self.extension_file_name)
+ def _write_extensions(self, spec, extensions):
+ path = self.extension_file_path(spec)
+
+ # Create a temp file in the same directory as the actual file.
+ dirname, basename = os.path.split(path)
+ tmp = tempfile.NamedTemporaryFile(
+ prefix=basename, dir=dirname, delete=False)
+
+ # write tmp file
+ with tmp:
+ yaml.dump({
+ 'extensions' : [
+ { ext.name : {
+ 'hash' : ext.dag_hash(),
+ 'path' : str(ext.prefix)
+ }} for ext in sorted(extensions.values())]
+ }, tmp, default_flow_style=False)
+
+ # Atomic update by moving tmpfile on top of old one.
+ os.rename(tmp.name, path)
+
+
def _extension_map(self, spec):
- """Get a dict spec> for all extensions currnetly
+ """Get a dict spec> for all extensions currently
installed for this package."""
_check_concrete(spec)
@@ -283,16 +316,26 @@ def _extension_map(self, spec):
self._extension_maps[spec] = {}
else:
+ by_hash = self.specs_by_hash()
exts = {}
with open(path) as ext_file:
- for line in ext_file:
- try:
- spec = Spec(line.strip())
- exts[spec.name] = spec
- except spack.error.SpackError, e:
- # TODO: do something better here -- should be
- # resilient to corrupt files.
- raise InvalidExtensionSpecError(str(e))
+ yaml_file = yaml.load(ext_file)
+ for entry in yaml_file['extensions']:
+ name = next(iter(entry))
+ dag_hash = entry[name]['hash']
+ prefix = entry[name]['path']
+
+ if not dag_hash in by_hash:
+ raise InvalidExtensionSpecError(
+ "Spec %s not found in %s." % (dag_hash, prefix))
+
+ ext_spec = by_hash[dag_hash]
+ if not prefix == ext_spec.prefix:
+ raise InvalidExtensionSpecError(
+ "Prefix %s does not match spec with hash %s: %s"
+ % (prefix, dag_hash, ext_spec))
+
+ exts[ext_spec.name] = ext_spec
self._extension_maps[spec] = exts
return self._extension_maps[spec]
@@ -300,6 +343,7 @@ def _extension_map(self, spec):
def extension_map(self, spec):
"""Defensive copying version of _extension_map() for external API."""
+ _check_concrete(spec)
return self._extension_map(spec).copy()
@@ -319,23 +363,6 @@ def check_activated(self, spec, ext_spec):
raise NoSuchExtensionError(spec, ext_spec)
- def _write_extensions(self, spec, extensions):
- path = self.extension_file_path(spec)
-
- # Create a temp file in the same directory as the actual file.
- dirname, basename = os.path.split(path)
- tmp = tempfile.NamedTemporaryFile(
- prefix=basename, dir=dirname, delete=False)
-
- # Write temp file.
- with tmp:
- for extension in sorted(extensions.values()):
- tmp.write("%s\n" % extension)
-
- # Atomic update by moving tmpfile on top of old one.
- os.rename(tmp.name, path)
-
-
def add_extension(self, spec, ext_spec):
_check_concrete(spec)
_check_concrete(ext_spec)
@@ -362,7 +389,6 @@ def remove_extension(self, spec, ext_spec):
self._write_extensions(spec, exts)
-
class DirectoryLayoutError(SpackError):
"""Superclass for directory layout errors."""
def __init__(self, message):
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index e36a1d6f0b..21e36de14d 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -603,13 +603,14 @@ def to_node_dict(self):
return { self.name : d }
- def to_yaml(self):
+ def to_yaml(self, stream=None):
node_list = []
for s in self.traverse(order='pre'):
node = s.to_node_dict()
node[s.name]['hash'] = s.dag_hash()
node_list.append(node)
- return yaml.dump({ 'spec' : node_list }, default_flow_style=False)
+ return yaml.dump({ 'spec' : node_list },
+ stream=stream, default_flow_style=False)
@staticmethod
@@ -633,9 +634,12 @@ def from_node_dict(node):
@staticmethod
- def from_yaml(string):
+ def from_yaml(stream):
"""Construct a spec from YAML.
+ Parameters:
+ stream -- string or file object to read from.
+
TODO: currently discards hashes. Include hashes when they
represent more than the DAG does.
@@ -644,7 +648,7 @@ def from_yaml(string):
spec = None
try:
- yfile = yaml.load(string)
+ yfile = yaml.load(stream)
except MarkedYAMLError, e:
raise SpackYAMLError("error parsing YMAL spec:", str(e))
diff --git a/lib/spack/spack/test/directory_layout.py b/lib/spack/spack/test/directory_layout.py
index 34374628be..7ca84090f2 100644
--- a/lib/spack/spack/test/directory_layout.py
+++ b/lib/spack/spack/test/directory_layout.py
@@ -152,7 +152,8 @@ def test_handle_unknown_package(self):
# Now check that even without the package files, we know
# enough to read a spec from the spec file.
for spec, path in installed_specs.items():
- spec_from_file = self.layout.read_spec(join_path(path, '.spack', 'spec'))
+ spec_from_file = self.layout.read_spec(
+ join_path(path, '.spack', 'spec.yaml'))
# To satisfy these conditions, directory layouts need to
# read in concrete specs from their install dirs somehow.
From 5d2ee893c406ae5695659507d909df781c41e86f Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Sun, 10 May 2015 11:45:12 -0700
Subject: [PATCH 09/25] Remove TODOs for full DAG information.
- specs on disk now include full DAG info
- no need for normalize() kludges() anymore.
---
lib/spack/llnl/util/link_tree.py | 1 +
lib/spack/spack/cmd/activate.py | 8 +-------
lib/spack/spack/cmd/deactivate.py | 14 +-------------
lib/spack/spack/package.py | 5 -----
4 files changed, 3 insertions(+), 25 deletions(-)
diff --git a/lib/spack/llnl/util/link_tree.py b/lib/spack/llnl/util/link_tree.py
index 583f077b79..db13b80780 100644
--- a/lib/spack/llnl/util/link_tree.py
+++ b/lib/spack/llnl/util/link_tree.py
@@ -54,6 +54,7 @@ def find_conflict(self, dest_root, **kwargs):
"""Returns the first file in dest that conflicts with src"""
kwargs['follow_nonexisting'] = False
for src, dest in traverse_tree(self._root, dest_root, **kwargs):
+ print src, dest
if os.path.isdir(src):
if os.path.exists(dest) and not os.path.isdir(dest):
return dest
diff --git a/lib/spack/spack/cmd/activate.py b/lib/spack/spack/cmd/activate.py
index 4070baaa70..1004f1f8e6 100644
--- a/lib/spack/spack/cmd/activate.py
+++ b/lib/spack/spack/cmd/activate.py
@@ -38,17 +38,11 @@ def setup_parser(subparser):
def activate(parser, args):
- # TODO: shouldn't have to concretize here. Fix DAG issues.
- specs = spack.cmd.parse_specs(args.spec, concretize=True)
+ specs = spack.cmd.parse_specs(args.spec)
if len(specs) != 1:
tty.die("activate requires one spec. %d given." % len(specs))
- # TODO: remove this hack when DAG info is stored in dir layout.
- # This ensures the ext spec is always normalized properly.
- spack.db.get(specs[0])
-
spec = spack.cmd.disambiguate_spec(specs[0])
-
if not spec.package.is_extension:
tty.die("%s is not an extension." % spec.name)
diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py
index c9a4d4b2f6..e44be41029 100644
--- a/lib/spack/spack/cmd/deactivate.py
+++ b/lib/spack/spack/cmd/deactivate.py
@@ -44,15 +44,10 @@ def setup_parser(subparser):
def deactivate(parser, args):
- # TODO: shouldn't have to concretize here. Fix DAG issues.
- specs = spack.cmd.parse_specs(args.spec, concretize=True)
+ specs = spack.cmd.parse_specs(args.spec)
if len(specs) != 1:
tty.die("deactivate requires one spec. %d given." % len(specs))
- # TODO: remove this hack when DAG info is stored properly.
- # This ensures the ext spec is always normalized properly.
- spack.db.get(specs[0])
-
spec = spack.cmd.disambiguate_spec(specs[0])
pkg = spec.package
@@ -67,9 +62,6 @@ def deactivate(parser, args):
ext_pkg.do_deactivate(force=True)
elif pkg.is_extension:
- # TODO: store DAG info properly (see above)
- spec.normalize()
-
if not args.force and not spec.package.activated:
tty.die("%s is not activated." % pkg.spec.short_spec)
@@ -81,10 +73,6 @@ def deactivate(parser, args):
for name in topo_order:
espec = index[name]
epkg = espec.package
-
- # TODO: store DAG info properly (see above)
- epkg.spec.normalize()
-
if epkg.extends(pkg.extendee_spec):
if epkg.activated or args.force:
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index bbb64cd05b..b0bb1fb7bc 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -984,16 +984,11 @@ def do_activate(self, **kwargs):
self._sanity_check_extension()
force = kwargs.get('force', False)
- # TODO: get rid of this normalize - DAG handling.
- self.spec.normalize()
-
spack.install_layout.check_extension_conflict(self.extendee_spec, self.spec)
if not force:
for spec in self.spec.traverse(root=False):
if spec.package.extends(self.extendee_spec):
- # TODO: fix this normalize() requirement -- revisit DAG handling.
- spec.package.spec.normalize()
if not spec.package.activated:
spec.package.do_activate(**kwargs)
From 0944ba120cad3de7b84b15e19b9c889f5bea6241 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 17 Mar 2015 20:59:47 -0400
Subject: [PATCH 10/25] relations are now "directives", and code is cleaned up.
---
lib/spack/llnl/util/lang.py | 25 +++-
lib/spack/spack/__init__.py | 6 +-
.../spack/{relations.py => directives.py} | 128 ++++++++----------
lib/spack/spack/multimethod.py | 2 +-
lib/spack/spack/package.py | 3 +-
5 files changed, 82 insertions(+), 82 deletions(-)
rename lib/spack/spack/{relations.py => directives.py} (62%)
diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py
index 332367f537..13453c20ed 100644
--- a/lib/spack/llnl/util/lang.py
+++ b/lib/spack/llnl/util/lang.py
@@ -126,9 +126,9 @@ def caller_locals():
del stack
-def get_calling_package_name():
+def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the
- module's name.
+ enclosing module's name.
"""
stack = inspect.stack()
try:
@@ -322,6 +322,27 @@ def match(string):
return match
+
+def DictWrapper(dictionary):
+ """Returns a class that wraps a dictionary and enables it to be used
+ like an object."""
+ class wrapper(object):
+ def __getattr__(self, name):
+ return dictionary[name]
+
+ def __setattr__(self, name, value):
+ dictionary[name] = value
+ return value
+
+ def setdefault(self, *args):
+ return dictionary.setdefault(*args)
+
+ def get(self, *args):
+ return dictionary.get(*args)
+
+ return wrapper()
+
+
class RequiredAttributeError(ValueError):
def __init__(self, message):
super(RequiredAttributeError, self).__init__(message)
diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py
index eb891e3d57..053c4036d8 100644
--- a/lib/spack/spack/__init__.py
+++ b/lib/spack/spack/__init__.py
@@ -146,9 +146,9 @@
from llnl.util.filesystem import *
__all__ += llnl.util.filesystem.__all__
-import spack.relations
-from spack.relations import *
-__all__ += spack.relations.__all__
+import spack.directives
+from spack.directives import *
+__all__ += spack.directives.__all__
import spack.util.executable
from spack.util.executable import *
diff --git a/lib/spack/spack/relations.py b/lib/spack/spack/directives.py
similarity index 62%
rename from lib/spack/spack/relations.py
rename to lib/spack/spack/directives.py
index a0c7723473..e1589c019f 100644
--- a/lib/spack/spack/relations.py
+++ b/lib/spack/spack/directives.py
@@ -22,51 +22,26 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
-"""
-This package contains relationships that can be defined among packages.
-Relations are functions that can be called inside a package definition,
-for example:
+"""This package contains directives that can be used within a package.
- class OpenMPI(Package):
+Directives are functions that can be called inside a package
+definition to modify the package, for example:
+
+ class OpenMpi(Package):
depends_on("hwloc")
provides("mpi")
...
-The available relations are:
+``provides`` and ``depends_on`` are spack directives.
-depends_on
- Above, the OpenMPI package declares that it "depends on" hwloc. This means
- that the hwloc package needs to be installed before OpenMPI can be
- installed. When a user runs 'spack install openmpi', spack will fetch
- hwloc and install it first.
+The available directives are:
-provides
- This is useful when more than one package can satisfy a dependence. Above,
- OpenMPI declares that it "provides" mpi. Other implementations of the MPI
- interface, like mvapich and mpich, also provide mpi, e.g.:
+ * ``version``
+ * ``depends_on``
+ * ``provides``
+ * ``extends``
+ * ``patch``
- class Mvapich(Package):
- provides("mpi")
- ...
-
- class Mpich(Package):
- provides("mpi")
- ...
-
- Instead of depending on openmpi, mvapich, or mpich, another package can
- declare that it depends on "mpi":
-
- class Mpileaks(Package):
- depends_on("mpi")
- ...
-
- Now the user can pick which MPI they would like to build with when they
- install mpileaks. For example, the user could install 3 instances of
- mpileaks, one for each MPI version, by issuing these three commands:
-
- spack install mpileaks ^openmpi
- spack install mpileaks ^mvapich
- spack install mpileaks ^mpich
"""
__all__ = [ 'depends_on', 'extends', 'provides', 'patch', 'version' ]
@@ -84,14 +59,27 @@ class Mpileaks(Package):
from spack.spec import Spec, parse_anonymous_spec
+def directive(fun):
+ """Decorator that allows a function to be called while a class is
+ being constructed, and to modify the class.
-def version(ver, checksum=None, **kwargs):
+ Adds the class scope as an initial parameter when called, like
+ a class method would.
+ """
+ def directive_function(*args, **kwargs):
+ pkg = DictWrapper(caller_locals())
+ pkg.name = get_calling_module_name()
+ return fun(pkg, *args, **kwargs)
+ return directive_function
+
+
+@directive
+def version(pkg, ver, checksum=None, **kwargs):
"""Adds a version and metadata describing how to fetch it.
Metadata is just stored as a dict in the package's versions
dictionary. Package must turn it into a valid fetch strategy
later.
"""
- pkg = caller_locals()
versions = pkg.setdefault('versions', {})
# special case checksum for backward compatibility
@@ -103,21 +91,21 @@ def version(ver, checksum=None, **kwargs):
versions[Version(ver)] = kwargs
-def depends_on(*specs):
+@directive
+def depends_on(pkg, *specs):
"""Adds a dependencies local variable in the locals of
the calling class, based on args. """
- pkg = get_calling_package_name()
- clocals = caller_locals()
- dependencies = clocals.setdefault('dependencies', {})
+ dependencies = pkg.setdefault('dependencies', {})
for string in specs:
for spec in spack.spec.parse(string):
- if pkg == spec.name:
- raise CircularReferenceError('depends_on', pkg)
+ if pkg.name == spec.name:
+ raise CircularReferenceError('depends_on', pkg.name)
dependencies[spec.name] = spec
-def extends(spec, **kwargs):
+@directive
+def extends(pkg, spec, **kwargs):
"""Same as depends_on, but dependency is symlinked into parent prefix.
This is for Python and other language modules where the module
@@ -131,64 +119,54 @@ def extends(spec, **kwargs):
mechanism.
"""
- pkg = get_calling_package_name()
- clocals = caller_locals()
- dependencies = clocals.setdefault('dependencies', {})
- extendees = clocals.setdefault('extendees', {})
+ dependencies = pkg.setdefault('dependencies', {})
+ extendees = pkg.setdefault('extendees', {})
if extendees:
raise RelationError("Packages can extend at most one other package.")
spec = Spec(spec)
- if pkg == spec.name:
- raise CircularReferenceError('extends', pkg)
+ if pkg.name == spec.name:
+ raise CircularReferenceError('extends', pkg.name)
dependencies[spec.name] = spec
extendees[spec.name] = (spec, kwargs)
-def provides(*specs, **kwargs):
+@directive
+def provides(pkg, *specs, **kwargs):
"""Allows packages to provide a virtual dependency. If a package provides
'mpi', other packages can declare that they depend on "mpi", and spack
can use the providing package to satisfy the dependency.
"""
- pkg = get_calling_package_name()
- spec_string = kwargs.get('when', pkg)
- provider_spec = parse_anonymous_spec(spec_string, pkg)
+ spec_string = kwargs.get('when', pkg.name)
+ provider_spec = parse_anonymous_spec(spec_string, pkg.name)
- provided = caller_locals().setdefault("provided", {})
+ provided = pkg.setdefault("provided", {})
for string in specs:
for provided_spec in spack.spec.parse(string):
- if pkg == provided_spec.name:
- raise CircularReferenceError('depends_on', pkg)
+ if pkg.name == provided_spec.name:
+ raise CircularReferenceError('depends_on', pkg.name)
provided[provided_spec] = provider_spec
-def patch(url_or_filename, **kwargs):
+@directive
+def patch(pkg, url_or_filename, **kwargs):
"""Packages can declare patches to apply to source. You can
optionally provide a when spec to indicate that a particular
patch should only be applied when the package's spec meets
certain conditions (e.g. a particular version).
"""
- pkg = get_calling_package_name()
level = kwargs.get('level', 1)
- when_spec = parse_anonymous_spec(kwargs.get('when', pkg), pkg)
+ when = kwargs.get('when', pkg.name)
- patches = caller_locals().setdefault('patches', {})
+ patches = pkg.setdefault('patches', {})
+
+ when_spec = parse_anonymous_spec(when, pkg.name)
if when_spec not in patches:
- patches[when_spec] = [Patch(pkg, url_or_filename, level)]
+ patches[when_spec] = [Patch(pkg.name, url_or_filename, level)]
else:
# if this spec is identical to some other, then append this
# patch to the existing list.
- patches[when_spec].append(Patch(pkg, url_or_filename, level))
-
-
-def conflicts(*specs):
- """Packages can declare conflicts with other packages.
- This can be as specific as you like: use regular spec syntax.
-
- NOT YET IMPLEMENTED.
- """
- # TODO: implement conflicts
- pass
+ patches[when_spec].append(Patch(pkg.name, url_or_filename, level))
class RelationError(spack.error.SpackError):
diff --git a/lib/spack/spack/multimethod.py b/lib/spack/spack/multimethod.py
index 974401e1aa..892619c6ac 100644
--- a/lib/spack/spack/multimethod.py
+++ b/lib/spack/spack/multimethod.py
@@ -195,7 +195,7 @@ def install(self, prefix):
"""
class when(object):
def __init__(self, spec):
- pkg = get_calling_package_name()
+ pkg = get_calling_module_name()
self.spec = parse_anonymous_spec(spec, pkg)
def __call__(self, method):
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 7d9eca5077..7f2b53ceed 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -303,7 +303,8 @@ class SomePackage(Package):
"""
#
- # These variables are defaults for the various "relations".
+ # These variables are defaults for Spack's various package
+ # directives.
#
"""Map of information about Versions of this package.
Map goes: Version -> dict of attributes"""
From 1f8ce403dcc84a741bdef8dc08db1b8182690386 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 17 Mar 2015 23:23:56 -0400
Subject: [PATCH 11/25] Modularize directives. Now each directive specifies
its storage.
---
lib/spack/llnl/util/lang.py | 25 ++---
lib/spack/spack/directives.py | 182 ++++++++++++++++++++++++----------
lib/spack/spack/package.py | 44 +-------
3 files changed, 143 insertions(+), 108 deletions(-)
diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py
index 13453c20ed..9e1bef18ca 100644
--- a/lib/spack/llnl/util/lang.py
+++ b/lib/spack/llnl/util/lang.py
@@ -132,16 +132,14 @@ def get_calling_module_name():
"""
stack = inspect.stack()
try:
- # get calling function name (the relation)
- relation = stack[1][3]
-
# Make sure locals contain __module__
caller_locals = stack[2][0].f_locals
finally:
del stack
if not '__module__' in caller_locals:
- raise ScopeError(relation)
+ raise RuntimeError("Must invoke get_calling_module_name() "
+ "from inside a class definition!")
module_name = caller_locals['__module__']
base_name = module_name.split('.')[-1]
@@ -327,18 +325,15 @@ def DictWrapper(dictionary):
"""Returns a class that wraps a dictionary and enables it to be used
like an object."""
class wrapper(object):
- def __getattr__(self, name):
- return dictionary[name]
+ def __getattr__(self, name): return dictionary[name]
+ def __setattr__(self, name, value): dictionary[name] = value
+ def setdefault(self, *args): return dictionary.setdefault(*args)
+ def get(self, *args): return dictionary.get(*args)
+ def keys(self): return dictionary.keys()
+ def values(self): return dictionary.values()
+ def items(self): return dictionary.items()
+ def __iter__(self): return iter(dictionary)
- def __setattr__(self, name, value):
- dictionary[name] = value
- return value
-
- def setdefault(self, *args):
- return dictionary.setdefault(*args)
-
- def get(self, *args):
- return dictionary.get(*args)
return wrapper()
diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py
index e1589c019f..a45edecad1 100644
--- a/lib/spack/spack/directives.py
+++ b/lib/spack/spack/directives.py
@@ -41,9 +41,11 @@ class OpenMpi(Package):
* ``provides``
* ``extends``
* ``patch``
+ * ``variant``
"""
-__all__ = [ 'depends_on', 'extends', 'provides', 'patch', 'version' ]
+__all__ = [ 'depends_on', 'extends', 'provides', 'patch', 'version',
+ 'variant' ]
import re
import inspect
@@ -59,52 +61,125 @@ class OpenMpi(Package):
from spack.spec import Spec, parse_anonymous_spec
-def directive(fun):
- """Decorator that allows a function to be called while a class is
- being constructed, and to modify the class.
+#
+# This is a list of all directives, built up as they are defined in
+# this file.
+#
+directives = {}
+
+
+def ensure_dicts(pkg):
+ """Ensure that a package has all the dicts required by directives."""
+ for name, d in directives.items():
+ d.ensure_dicts(pkg)
+
+
+class directive(object):
+ """Decorator for Spack directives.
+
+ Spack directives allow you to modify a package while it is being
+ defined, e.g. to add version or depenency information. Directives
+ are one of the key pieces of Spack's package "langauge", which is
+ embedded in python.
+
+ Here's an example directive:
+
+ @directive(dicts='versions')
+ version(pkg, ...):
+ ...
+
+ This directive allows you write:
+
+ class Foo(Package):
+ version(...)
+
+ The ``@directive`` decorator handles a couple things for you:
+
+ 1. Adds the class scope (pkg) as an initial parameter when
+ called, like a class method would. This allows you to modify
+ a package from within a directive, while the package is still
+ being defined.
+
+ 2. It automatically adds a dictionary called "versions" to the
+ package so that you can refer to pkg.versions.
+
+ The ``(dicts='versions')`` part ensures that ALL packages in Spack
+ will have a ``versions`` attribute after they're constructed, and
+ that if no directive actually modified it, it will just be an
+ empty dict.
+
+ This is just a modular way to add storage attributes to the
+ Package class, and it's how Spack gets information from the
+ packages to the core.
- Adds the class scope as an initial parameter when called, like
- a class method would.
"""
- def directive_function(*args, **kwargs):
- pkg = DictWrapper(caller_locals())
- pkg.name = get_calling_module_name()
- return fun(pkg, *args, **kwargs)
- return directive_function
+
+ def __init__(self, **kwargs):
+ # dict argument allows directives to have storage on the package.
+ dicts = kwargs.get('dicts', None)
+
+ if isinstance(dicts, basestring):
+ dicts = (dicts,)
+ elif type(dicts) not in (list, tuple):
+ raise TypeError(
+ "dicts arg must be list, tuple, or string. Found %s."
+ % type(dicts))
+
+ self.dicts = dicts
-@directive
+ def ensure_dicts(self, pkg):
+ """Ensure that a package has the dicts required by this directive."""
+ for d in self.dicts:
+ if not hasattr(pkg, d):
+ setattr(pkg, d, {})
+
+ attr = getattr(pkg, d)
+ if not isinstance(attr, dict):
+ raise spack.error.SpackError(
+ "Package %s has non-dict %s attribute!" % (pkg, d))
+
+
+ def __call__(self, directive_function):
+ directives[directive_function.__name__] = self
+
+ def wrapped(*args, **kwargs):
+ pkg = DictWrapper(caller_locals())
+ self.ensure_dicts(pkg)
+
+ pkg.name = get_calling_module_name()
+ return directive_function(pkg, *args, **kwargs)
+
+ return wrapped
+
+
+@directive(dicts='versions')
def version(pkg, ver, checksum=None, **kwargs):
"""Adds a version and metadata describing how to fetch it.
Metadata is just stored as a dict in the package's versions
dictionary. Package must turn it into a valid fetch strategy
later.
"""
- versions = pkg.setdefault('versions', {})
-
# special case checksum for backward compatibility
if checksum:
kwargs['md5'] = checksum
- # Store the kwargs for the package to use later when constructing
- # a fetch strategy.
- versions[Version(ver)] = kwargs
+ # Store kwargs for the package to later with a fetch_strategy.
+ pkg.versions[Version(ver)] = kwargs
-@directive
+@directive(dicts='dependencies')
def depends_on(pkg, *specs):
"""Adds a dependencies local variable in the locals of
the calling class, based on args. """
- dependencies = pkg.setdefault('dependencies', {})
-
for string in specs:
for spec in spack.spec.parse(string):
if pkg.name == spec.name:
raise CircularReferenceError('depends_on', pkg.name)
- dependencies[spec.name] = spec
+ pkg.dependencies[spec.name] = spec
-@directive
+@directive(dicts=('extendees', 'dependencies'))
def extends(pkg, spec, **kwargs):
"""Same as depends_on, but dependency is symlinked into parent prefix.
@@ -119,19 +194,17 @@ def extends(pkg, spec, **kwargs):
mechanism.
"""
- dependencies = pkg.setdefault('dependencies', {})
- extendees = pkg.setdefault('extendees', {})
- if extendees:
- raise RelationError("Packages can extend at most one other package.")
+ if pkg.extendees:
+ raise DirectiveError("Packages can extend at most one other package.")
spec = Spec(spec)
if pkg.name == spec.name:
raise CircularReferenceError('extends', pkg.name)
- dependencies[spec.name] = spec
- extendees[spec.name] = (spec, kwargs)
+ pkg.dependencies[spec.name] = spec
+ pkg.extendees[spec.name] = (spec, kwargs)
-@directive
+@directive(dicts='provided')
def provides(pkg, *specs, **kwargs):
"""Allows packages to provide a virtual dependency. If a package provides
'mpi', other packages can declare that they depend on "mpi", and spack
@@ -140,15 +213,14 @@ def provides(pkg, *specs, **kwargs):
spec_string = kwargs.get('when', pkg.name)
provider_spec = parse_anonymous_spec(spec_string, pkg.name)
- provided = pkg.setdefault("provided", {})
for string in specs:
for provided_spec in spack.spec.parse(string):
if pkg.name == provided_spec.name:
raise CircularReferenceError('depends_on', pkg.name)
- provided[provided_spec] = provider_spec
+ pkg.provided[provided_spec] = provider_spec
-@directive
+@directive(dicts='patches')
def patch(pkg, url_or_filename, **kwargs):
"""Packages can declare patches to apply to source. You can
optionally provide a when spec to indicate that a particular
@@ -158,36 +230,42 @@ def patch(pkg, url_or_filename, **kwargs):
level = kwargs.get('level', 1)
when = kwargs.get('when', pkg.name)
- patches = pkg.setdefault('patches', {})
-
when_spec = parse_anonymous_spec(when, pkg.name)
- if when_spec not in patches:
- patches[when_spec] = [Patch(pkg.name, url_or_filename, level)]
+ if when_spec not in pkg.patches:
+ pkg.patches[when_spec] = [Patch(pkg.name, url_or_filename, level)]
else:
# if this spec is identical to some other, then append this
# patch to the existing list.
- patches[when_spec].append(Patch(pkg.name, url_or_filename, level))
+ pkg.patches[when_spec].append(Patch(pkg.name, url_or_filename, level))
-class RelationError(spack.error.SpackError):
- """This is raised when something is wrong with a package relation."""
- def __init__(self, relation, message):
- super(RelationError, self).__init__(message)
- self.relation = relation
+@directive(dicts='variants')
+def variant(pkg, name, description="", **kwargs):
+ """Define a variant for the package. Allows the user to supply
+ +variant/-variant in a spec. You can optionally supply an
+ initial + or - to make the variant enabled or disabled by defaut.
+ """
+ return
+
+ if not re.match(r'[-~+]?[A-Za-z0-9_][A-Za-z0-9_.-]*', name):
+ raise DirectiveError("Invalid variant name in %s: '%s'"
+ % (pkg.name, name))
+
+ enabled = re.match(r'+', name)
+ pkg.variants[name] = enabled
-class ScopeError(RelationError):
- """This is raised when a relation is called from outside a spack package."""
- def __init__(self, relation):
- super(ScopeError, self).__init__(
- relation,
- "Must invoke '%s' from inside a class definition!" % relation)
+class DirectiveError(spack.error.SpackError):
+ """This is raised when something is wrong with a package directive."""
+ def __init__(self, directive, message):
+ super(DirectiveError, self).__init__(message)
+ self.directive = directive
-class CircularReferenceError(RelationError):
+class CircularReferenceError(DirectiveError):
"""This is raised when something depends on itself."""
- def __init__(self, relation, package):
+ def __init__(self, directive, package):
super(CircularReferenceError, self).__init__(
- relation,
- "Package '%s' cannot pass itself to %s." % (package, relation))
+ directive,
+ "Package '%s' cannot pass itself to %s." % (package, directive))
self.package = package
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 7f2b53ceed..2891791339 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -55,6 +55,7 @@
import spack.compilers
import spack.mirror
import spack.hooks
+import spack.directives
import spack.build_environment as build_env
import spack.url as url
import spack.fetch_strategy as fs
@@ -301,33 +302,6 @@ class SomePackage(Package):
clean() (some of them do this), and others to provide custom behavior.
"""
-
- #
- # These variables are defaults for Spack's various package
- # directives.
- #
- """Map of information about Versions of this package.
- Map goes: Version -> dict of attributes"""
- versions = {}
-
- """Specs of dependency packages, keyed by name."""
- dependencies = {}
-
- """Specs of virtual packages provided by this package, keyed by name."""
- provided = {}
-
- """Specs of conflicting packages, keyed by name. """
- conflicted = {}
-
- """Patches to apply to newly expanded source, if any."""
- patches = {}
-
- """Specs of package this one extends, or None.
-
- Currently, ppackages can extend at most one other package.
- """
- extendees = {}
-
#
# These are default values for instance variables.
#
@@ -351,20 +325,8 @@ def __init__(self, spec):
if '.' in self.name:
self.name = self.name[self.name.rindex('.') + 1:]
- # Sanity check some required variables that could be
- # overridden by package authors.
- def ensure_has_dict(attr_name):
- if not hasattr(self, attr_name):
- raise PackageError("Package %s must define %s" % attr_name)
-
- attr = getattr(self, attr_name)
- if not isinstance(attr, dict):
- raise PackageError("Package %s has non-dict %s attribute!"
- % (self.name, attr_name))
- ensure_has_dict('versions')
- ensure_has_dict('dependencies')
- ensure_has_dict('conflicted')
- ensure_has_dict('patches')
+ # Sanity check attributes required by Spack directives.
+ spack.directives.ensure_dicts(type(self))
# Check versions in the versions dict.
for v in self.versions:
From c105a8d42a0c052ad5fb7bdd9a25ee9b25008ff7 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 24 Mar 2015 08:41:42 -0700
Subject: [PATCH 12/25] Small updates to directives.
---
lib/spack/spack/directives.py | 6 ++----
lib/spack/spack/test/spec_semantics.py | 23 ++++++++++++++++-------
2 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py
index a45edecad1..2ae56fce33 100644
--- a/lib/spack/spack/directives.py
+++ b/lib/spack/spack/directives.py
@@ -240,14 +240,12 @@ def patch(pkg, url_or_filename, **kwargs):
@directive(dicts='variants')
-def variant(pkg, name, description="", **kwargs):
+def variant(pkg, name, description=""):
"""Define a variant for the package. Allows the user to supply
+variant/-variant in a spec. You can optionally supply an
initial + or - to make the variant enabled or disabled by defaut.
"""
- return
-
- if not re.match(r'[-~+]?[A-Za-z0-9_][A-Za-z0-9_.-]*', name):
+ if not re.match(r'^[-~+]?[A-Za-z0-9_][A-Za-z0-9_.-]*$', name):
raise DirectiveError("Invalid variant name in %s: '%s'"
% (pkg.name, name))
diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py
index 5fb09e68af..1db7956f04 100644
--- a/lib/spack/spack/test/spec_semantics.py
+++ b/lib/spack/spack/test/spec_semantics.py
@@ -71,7 +71,7 @@ def check_invalid_constraint(self, spec, constraint):
# ================================================================================
- # Satisfiability and constraints
+ # Satisfiability
# ================================================================================
def test_satisfies(self):
self.check_satisfies('libelf@0.8.13', '@0:1')
@@ -96,6 +96,9 @@ def test_satisfies_compiler_version(self):
self.check_unsatisfiable('foo@4.0%pgi', '@1:3%pgi')
self.check_unsatisfiable('foo@4.0%pgi@4.5', '@1:3%pgi@4.4:4.6')
+ self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')
+ self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')
+
def test_satisfies_architecture(self):
self.check_satisfies('foo=chaos_5_x86_64_ib', '=chaos_5_x86_64_ib')
@@ -147,7 +150,16 @@ def test_satisfies_virtual_dependency_versions(self):
self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0')
- def test_constrain(self):
+ def test_satisfies_variant(self):
+ self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')
+ self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')
+
+
+
+ # ================================================================================
+ # Constraints
+ # ================================================================================
+ def test_constrain_variants(self):
self.check_constrain('libelf@2.1:2.5', 'libelf@0:2.5', 'libelf@2.1:3')
self.check_constrain('libelf@2.1:2.5%gcc@4.5:4.6',
'libelf@0:2.5%gcc@2:4.6', 'libelf@2.1:3%gcc@4.5:4.7')
@@ -158,6 +170,8 @@ def test_constrain(self):
self.check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo')
self.check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf+debug~foo')
+
+ def test_constrain_arch(self):
self.check_constrain('libelf=bgqos_0', 'libelf=bgqos_0', 'libelf=bgqos_0')
self.check_constrain('libelf=bgqos_0', 'libelf', 'libelf=bgqos_0')
@@ -170,8 +184,3 @@ def test_invalid_constraint(self):
self.check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo')
self.check_invalid_constraint('libelf=bgqos_0', 'libelf=x86_54')
-
-
- def test_compiler_satisfies(self):
- self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')
- self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')
From 3b1898b8e479fc1e7d9b71a57f625f36485b1ac0 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Sun, 26 Apr 2015 13:12:02 -0700
Subject: [PATCH 13/25] Fix SPACK-40: Finish adding variant directive.
- Variants are now declarable in packages using the variant() directive.
- Variants are checked - you can't just ask for a random variant, it has to be declared.
- conditional logic (@when, if, '+debug' in spec, etc.) still required in package to
implement variant.
---
lib/spack/spack/cmd/info.py | 29 ++++++++++++++++++++----
lib/spack/spack/directives.py | 21 +++++++++--------
lib/spack/spack/spec.py | 17 ++++++++++++++
var/spack/mock_packages/mpich/package.py | 3 +++
4 files changed, 56 insertions(+), 14 deletions(-)
diff --git a/lib/spack/spack/cmd/info.py b/lib/spack/spack/cmd/info.py
index eafafc601a..c6209523f0 100644
--- a/lib/spack/spack/cmd/info.py
+++ b/lib/spack/spack/cmd/info.py
@@ -22,12 +22,22 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+import textwrap
from llnl.util.tty.colify import *
import spack
import spack.fetch_strategy as fs
description = "Get detailed information on a particular package"
+def padder(str_list, extra=0):
+ """Return a function to pad elements of a list."""
+ length = max(len(str(s)) for s in str_list) + extra
+ def pad(string):
+ string = str(string)
+ padding = max(0, length - len(string))
+ return string + (padding * ' ')
+ return pad
+
def setup_parser(subparser):
subparser.add_argument('name', metavar="PACKAGE", help="Name of package to get info for.")
@@ -42,13 +52,24 @@ def print_text_info(pkg):
print "Safe versions: "
if not pkg.versions:
- print("None.")
+ print("None")
else:
- maxlen = max(len(str(v)) for v in pkg.versions)
- fmt = "%%-%ss" % maxlen
+ pad = padder(pkg.versions, 4)
for v in reversed(sorted(pkg.versions)):
f = fs.for_package_version(pkg, v)
- print " " + (fmt % v) + " " + str(f)
+ print " %s%s" % (pad(v), str(f))
+
+ print
+ print "Variants:"
+ if not pkg.variants:
+ print "None"
+ else:
+ pad = padder(pkg.variants, 4)
+ for name in sorted(pkg.variants):
+ v = pkg.variants[name]
+ print " %s%s" % (
+ pad(('+' if v.default else '-') + name + ':'),
+ "\n".join(textwrap.wrap(v.description)))
print
print "Dependencies:"
diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py
index 2ae56fce33..5c17fe4044 100644
--- a/lib/spack/spack/directives.py
+++ b/lib/spack/spack/directives.py
@@ -58,6 +58,7 @@ class OpenMpi(Package):
import spack.url
from spack.version import Version
from spack.patch import Patch
+from spack.variant import Variant
from spack.spec import Spec, parse_anonymous_spec
@@ -240,17 +241,17 @@ def patch(pkg, url_or_filename, **kwargs):
@directive(dicts='variants')
-def variant(pkg, name, description=""):
- """Define a variant for the package. Allows the user to supply
- +variant/-variant in a spec. You can optionally supply an
- initial + or - to make the variant enabled or disabled by defaut.
- """
- if not re.match(r'^[-~+]?[A-Za-z0-9_][A-Za-z0-9_.-]*$', name):
- raise DirectiveError("Invalid variant name in %s: '%s'"
- % (pkg.name, name))
+def variant(pkg, name, **kwargs):
+ """Define a variant for the package. Packager can specify a default
+ value (on or off) as well as a text description."""
- enabled = re.match(r'+', name)
- pkg.variants[name] = enabled
+ default = bool(kwargs.get('default', False))
+ description = str(kwargs.get('description', "")).strip()
+
+ if not re.match(spack.spec.identifier_re, name):
+ raise DirectiveError("Invalid variant name in %s: '%s'" % (pkg.name, name))
+
+ pkg.variants[name] = Variant(default, description)
class DirectiveError(spack.error.SpackError):
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index dffdccaddb..fca14f97db 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -110,6 +110,9 @@
from spack.util.prefix import Prefix
from spack.virtual import ProviderIndex
+# Valid pattern for an identifier in Spack
+identifier_re = r'\w[\w-]*'
+
# Convenient names for color formats so that other things can use them
compiler_color = '@g'
version_color = '@c'
@@ -893,6 +896,11 @@ def validate_names(self):
if not compilers.supported(spec.compiler):
raise UnsupportedCompilerError(spec.compiler.name)
+ # Ensure that variants all exist.
+ for vname, variant in spec.variants.items():
+ if vname not in spec.package.variants:
+ raise UnknownVariantError(spec.name, vname)
+
def constrain(self, other, **kwargs):
other = self._autospec(other)
@@ -1354,6 +1362,8 @@ def __init__(self):
(r'\~', lambda scanner, val: self.token(OFF, val)),
(r'\%', lambda scanner, val: self.token(PCT, val)),
(r'\=', lambda scanner, val: self.token(EQ, val)),
+ # This is more liberal than identifier_re (see above).
+ # Checked by check_identifier() for better error messages.
(r'\w[\w.-]*', lambda scanner, val: self.token(ID, val)),
(r'\s+', lambda scanner, val: None)])
@@ -1580,6 +1590,13 @@ def __init__(self, compiler_name):
"The '%s' compiler is not yet supported." % compiler_name)
+class UnknownVariantError(SpecError):
+ """Raised when the same variant occurs in a spec twice."""
+ def __init__(self, pkg, variant):
+ super(UnknownVariantError, self).__init__(
+ "Package %s has no variant %s!" % (pkg, variant))
+
+
class DuplicateArchitectureError(SpecError):
"""Raised when the same architecture occurs in a spec twice."""
def __init__(self, message):
diff --git a/var/spack/mock_packages/mpich/package.py b/var/spack/mock_packages/mpich/package.py
index 75a939a892..f77d3efc5d 100644
--- a/var/spack/mock_packages/mpich/package.py
+++ b/var/spack/mock_packages/mpich/package.py
@@ -30,6 +30,9 @@ class Mpich(Package):
list_url = "http://www.mpich.org/static/downloads/"
list_depth = 2
+ variant('debug', default=False,
+ description="Compile MPICH with debug flags.")
+
version('3.0.4', '9c5d5d4fe1e17dd12153f40bc5b6dbc0')
version('3.0.3', 'foobarbaz')
version('3.0.2', 'foobarbaz')
From 535c1fac87cc2323d2ac6ed6db35bfac78ad6a35 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Mon, 27 Apr 2015 00:10:40 -0700
Subject: [PATCH 14/25] SPACK-56: fix Variant concretization.
- Variant concretization is tricky:
- During concretization, a spec without variants (e.g., mpich) means
"don't care". So, Spec('mpich').satisfies('mpich+debug') is true
because it *could* still be built that way.
- After concretization, a spec without a particular variant means
"don't know", as that wasn't part of the spec, so the opposite
relationship is true. Assume 'spec' is already installed:
spec.satisfies('mpich+debug')
this is false beacuse the `debug` variant didn't exist when spec
was built, so we can't satisfy the explicit request for +debug.
---
lib/spack/spack/concretize.py | 10 +++++++
lib/spack/spack/spec.py | 40 +++++++++++++++++++-------
lib/spack/spack/test/spec_dag.py | 6 ----
lib/spack/spack/test/spec_semantics.py | 38 +++++++++++++++++++-----
lib/spack/spack/variant.py | 36 +++++++++++++++++++++++
5 files changed, 107 insertions(+), 23 deletions(-)
create mode 100644 lib/spack/spack/variant.py
diff --git a/lib/spack/spack/concretize.py b/lib/spack/spack/concretize.py
index 3f569f9dce..15e886ad3c 100644
--- a/lib/spack/spack/concretize.py
+++ b/lib/spack/spack/concretize.py
@@ -101,6 +101,16 @@ def concretize_architecture(self, spec):
spec.architecture = spack.architecture.sys_type()
+ def concretize_variants(self, spec):
+ """If the spec already has variants filled in, return. Otherwise, add
+ the default variants from the package specification.
+ """
+ for name, variant in spec.package.variants.items():
+ if name not in spec.variants:
+ spec.variants[name] = spack.spec.VariantSpec(
+ name, variant.default)
+
+
def concretize_compiler(self, spec):
"""If the spec already has a compiler, we're done. If not, then take
the compiler used for the nearest ancestor with a compiler
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index fca14f97db..4639aea452 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -270,7 +270,7 @@ def __repr__(self):
@key_ordering
-class Variant(object):
+class VariantSpec(object):
"""Variants are named, build-time options for a package. Names depend
on the particular package being built, and each named variant can
be enabled or disabled.
@@ -285,7 +285,7 @@ def _cmp_key(self):
def copy(self):
- return Variant(self.name, self.enabled)
+ return VariantSpec(self.name, self.enabled)
def __str__(self):
@@ -294,9 +294,27 @@ def __str__(self):
class VariantMap(HashableMap):
- def satisfies(self, other):
- return all(self[key].enabled == other[key].enabled
- for key in other if key in self)
+ def satisfies(self, other, self_is_concrete):
+ if self_is_concrete:
+ return all(k in self and self[k].enabled == other[k].enabled
+ for k in other)
+ else:
+ return all(self[k].enabled == other[k].enabled
+ for k in other if k in self)
+
+
+ def constrain(self, other, other_is_concrete):
+ if other_is_concrete:
+ for k in self:
+ if k not in other:
+ raise UnsatisfiableVariantSpecError(self[k], '')
+
+ for k in other:
+ if k in self:
+ if self[k].enabled != other[k].enabled:
+ raise UnsatisfiableVariantSpecError(self[k], other[k])
+ else:
+ self[k] = other[k].copy()
def __str__(self):
@@ -375,7 +393,7 @@ def _add_variant(self, name, enabled):
"""Called by the parser to add a variant."""
if name in self.variants: raise DuplicateVariantError(
"Cannot specify variant '%s' twice" % name)
- self.variants[name] = Variant(name, enabled)
+ self.variants[name] = VariantSpec(name, enabled)
def _set_compiler(self, compiler):
@@ -607,6 +625,7 @@ def _concretize_helper(self, presets=None, visited=None):
spack.concretizer.concretize_architecture(self)
spack.concretizer.concretize_compiler(self)
spack.concretizer.concretize_version(self)
+ spack.concretizer.concretize_variants(self)
presets[self.name] = self
visited.add(self.name)
@@ -789,8 +808,7 @@ def _normalize_helper(self, visited, spec_deps, provider_index):
else:
required = index.providers_for(vspec.name)
if required:
- raise UnsatisfiableProviderSpecError(
- required[0], pkg_dep)
+ raise UnsatisfiableProviderSpecError(required[0], pkg_dep)
provider_index.update(pkg_dep)
if name not in spec_deps:
@@ -929,7 +947,7 @@ def constrain(self, other, **kwargs):
self.compiler = other.compiler
self.versions.intersect(other.versions)
- self.variants.update(other.variants)
+ self.variants.constrain(other.variants, other._concrete)
self.architecture = self.architecture or other.architecture
if constrain_deps:
@@ -998,11 +1016,13 @@ def satisfies(self, other, **kwargs):
# All these attrs have satisfies criteria of their own,
# but can be None to indicate no constraints.
for s, o in ((self.versions, other.versions),
- (self.variants, other.variants),
(self.compiler, other.compiler)):
if s and o and not s.satisfies(o):
return False
+ if not self.variants.satisfies(other.variants, self._concrete):
+ return False
+
# Architecture satisfaction is currently just string equality.
# Can be None for unconstrained, though.
if (self.architecture and other.architecture and
diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py
index fb67aa8a8d..ecbc46981c 100644
--- a/lib/spack/spack/test/spec_dag.py
+++ b/lib/spack/spack/test/spec_dag.py
@@ -242,12 +242,6 @@ def test_unsatisfiable_compiler_version(self):
self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError, spec.normalize)
- def test_unsatisfiable_variant(self):
- set_pkg_dep('mpileaks', 'mpich+debug')
- spec = Spec('mpileaks ^mpich~debug ^callpath ^dyninst ^libelf ^libdwarf')
- self.assertRaises(spack.spec.UnsatisfiableVariantSpecError, spec.normalize)
-
-
def test_unsatisfiable_architecture(self):
set_pkg_dep('mpileaks', 'mpich=bgqos_0')
spec = Spec('mpileaks ^mpich=sles_10_ppc64 ^callpath ^dyninst ^libelf ^libdwarf')
diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py
index 1db7956f04..8614b74c7a 100644
--- a/lib/spack/spack/test/spec_semantics.py
+++ b/lib/spack/spack/test/spec_semantics.py
@@ -33,8 +33,8 @@ class SpecSematicsTest(MockPackagesTest):
# ================================================================================
# Utility functions to set everything up.
# ================================================================================
- def check_satisfies(self, spec, anon_spec):
- left = Spec(spec)
+ def check_satisfies(self, spec, anon_spec, concrete=False):
+ left = Spec(spec, concrete=concrete)
right = parse_anonymous_spec(anon_spec, left.name)
# Satisfies is one-directional.
@@ -46,8 +46,8 @@ def check_satisfies(self, spec, anon_spec):
right.copy().constrain(left)
- def check_unsatisfiable(self, spec, anon_spec):
- left = Spec(spec)
+ def check_unsatisfiable(self, spec, anon_spec, concrete=False):
+ left = Spec(spec, concrete=concrete)
right = parse_anonymous_spec(anon_spec, left.name)
self.assertFalse(left.satisfies(right))
@@ -150,9 +150,33 @@ def test_satisfies_virtual_dependency_versions(self):
self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0')
- def test_satisfies_variant(self):
- self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')
- self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')
+ def test_satisfies_matching_variant(self):
+ self.check_satisfies('mpich+foo', 'mpich+foo')
+ self.check_satisfies('mpich~foo', 'mpich~foo')
+
+
+ def test_satisfies_unconstrained_variant(self):
+ # only asked for mpich, no constraints. Either will do.
+ self.check_satisfies('mpich+foo', 'mpich')
+ self.check_satisfies('mpich~foo', 'mpich')
+
+
+ def test_unsatisfiable_variants(self):
+ # This case is different depending on whether the specs are concrete.
+
+ # 'mpich' is not concrete:
+ self.check_satisfies('mpich', 'mpich+foo', False)
+ self.check_satisfies('mpich', 'mpich~foo', False)
+
+ # 'mpich' is concrete:
+ self.check_unsatisfiable('mpich', 'mpich+foo', True)
+ self.check_unsatisfiable('mpich', 'mpich~foo', True)
+
+
+ def test_unsatisfiable_variant_mismatch(self):
+ # No matchi in specs
+ self.check_unsatisfiable('mpich~foo', 'mpich+foo')
+ self.check_unsatisfiable('mpich+foo', 'mpich~foo')
diff --git a/lib/spack/spack/variant.py b/lib/spack/spack/variant.py
new file mode 100644
index 0000000000..3d3e2b0f6d
--- /dev/null
+++ b/lib/spack/spack/variant.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Variant is a class describing flags on builds, or "variants".
+
+Could be generalized later to describe aribitrary parameters, but
+currently variants are just flags.
+
+"""
+
+class Variant(object):
+ """Represents a variant on a build. Can be either on or off."""
+ def __init__(self, default, description):
+ self.default = bool(default)
+ self.description = str(description)
From 793b842f99126fe71ca7f5d98e197b05b868c846 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Mon, 27 Apr 2015 00:45:59 -0700
Subject: [PATCH 15/25] tests for variant concretization
---
lib/spack/spack/spec.py | 42 ++++++++++++++++++++++--------
lib/spack/spack/test/concretize.py | 14 +++++++++-
2 files changed, 44 insertions(+), 12 deletions(-)
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 4639aea452..7eb9d42cd1 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -294,8 +294,13 @@ def __str__(self):
class VariantMap(HashableMap):
- def satisfies(self, other, self_is_concrete):
- if self_is_concrete:
+ def __init__(self, spec):
+ super(VariantMap, self).__init__()
+ self.spec = spec
+
+
+ def satisfies(self, other):
+ if self.spec._concrete:
return all(k in self and self[k].enabled == other[k].enabled
for k in other)
else:
@@ -303,8 +308,8 @@ def satisfies(self, other, self_is_concrete):
for k in other if k in self)
- def constrain(self, other, other_is_concrete):
- if other_is_concrete:
+ def constrain(self, other):
+ if other.spec._concrete:
for k in self:
if k not in other:
raise UnsatisfiableVariantSpecError(self[k], '')
@@ -316,6 +321,18 @@ def constrain(self, other, other_is_concrete):
else:
self[k] = other[k].copy()
+ @property
+ def concrete(self):
+ return self.spec._concrete or all(
+ v in self for v in self.spec.package.variants)
+
+
+ def copy(self):
+ clone = VariantMap(None)
+ for name, variant in self.items():
+ clone[name] = variant.copy()
+ return clone
+
def __str__(self):
sorted_keys = sorted(self.keys())
@@ -361,10 +378,11 @@ def __init__(self, spec_like, *dep_like, **kwargs):
self.name = other.name
self.dependents = other.dependents
self.versions = other.versions
- self.variants = other.variants
self.architecture = other.architecture
self.compiler = other.compiler
self.dependencies = other.dependencies
+ self.variants = other.variants
+ self.variants.spec = self
# Specs are by default not assumed to be normal, but in some
# cases we've read them from a file want to assume normal.
@@ -457,14 +475,15 @@ def virtual(self):
@property
def concrete(self):
"""A spec is concrete if it can describe only ONE build of a package.
- If any of the name, version, architecture, compiler, or depdenencies
- are ambiguous,then it is not concrete.
+ If any of the name, version, architecture, compiler,
+ variants, or depdenencies are ambiguous,then it is not concrete.
"""
if self._concrete:
return True
self._concrete = bool(not self.virtual
and self.versions.concrete
+ and self.variants.concrete
and self.architecture
and self.compiler and self.compiler.concrete
and self.dependencies.concrete)
@@ -947,7 +966,7 @@ def constrain(self, other, **kwargs):
self.compiler = other.compiler
self.versions.intersect(other.versions)
- self.variants.constrain(other.variants, other._concrete)
+ self.variants.constrain(other.variants)
self.architecture = self.architecture or other.architecture
if constrain_deps:
@@ -1020,7 +1039,7 @@ def satisfies(self, other, **kwargs):
if s and o and not s.satisfies(o):
return False
- if not self.variants.satisfies(other.variants, self._concrete):
+ if not self.variants.satisfies(other.variants):
return False
# Architecture satisfaction is currently just string equality.
@@ -1089,11 +1108,12 @@ def _dup(self, other, **kwargs):
# Local node attributes get copied first.
self.name = other.name
self.versions = other.versions.copy()
- self.variants = other.variants.copy()
self.architecture = other.architecture
self.compiler = other.compiler.copy() if other.compiler else None
self.dependents = DependencyMap()
self.dependencies = DependencyMap()
+ self.variants = other.variants.copy()
+ self.variants.spec = self
# If we copy dependencies, preserve DAG structure in the new spec
if kwargs.get('deps', True):
@@ -1429,7 +1449,7 @@ def spec(self):
spec = Spec.__new__(Spec)
spec.name = self.token.value
spec.versions = VersionList()
- spec.variants = VariantMap()
+ spec.variants = VariantMap(spec)
spec.architecture = None
spec.compiler = None
spec.dependents = DependencyMap()
diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py
index a7f4812c8c..cc839a2340 100644
--- a/lib/spack/spack/test/concretize.py
+++ b/lib/spack/spack/test/concretize.py
@@ -35,7 +35,13 @@ def check_spec(self, abstract, concrete):
self.assertEqual(abstract.versions, concrete.versions)
if abstract.variants:
- self.assertEqual(abstract.versions, concrete.versions)
+ for name in abstract.variants:
+ avariant = abstract.variants[name]
+ cvariant = concrete.variants[name]
+ self.assertEqual(avariant.enabled, cvariant.enabled)
+
+ for name in abstract.package.variants:
+ self.assertTrue(name in concrete.variants)
if abstract.compiler and abstract.compiler.concrete:
self.assertEqual(abstract.compiler, concrete.compiler)
@@ -66,6 +72,12 @@ def test_concretize_dag(self):
self.check_concretize('libelf')
+ def test_concretize_variant(self):
+ self.check_concretize('mpich+debug')
+ self.check_concretize('mpich~debug')
+ self.check_concretize('mpich')
+
+
def test_concretize_with_virtual(self):
self.check_concretize('mpileaks ^mpi')
self.check_concretize('mpileaks ^mpi@:1.1')
From 3c4948742458ed61e5be88006494c1d82a2d8576 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Sun, 10 May 2015 13:11:03 -0700
Subject: [PATCH 16/25] Doc tweak.
- add LLNL release number to new RTD theme.
---
lib/spack/docs/_themes/sphinx_rtd_theme/footer.html | 6 +++++-
lib/spack/docs/conf.py | 4 ++--
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/lib/spack/docs/_themes/sphinx_rtd_theme/footer.html b/lib/spack/docs/_themes/sphinx_rtd_theme/footer.html
index 6347a440d7..d000dcbc2c 100644
--- a/lib/spack/docs/_themes/sphinx_rtd_theme/footer.html
+++ b/lib/spack/docs/_themes/sphinx_rtd_theme/footer.html
@@ -22,7 +22,12 @@
{%- endif %}
{%- endif %}
+
+ Written by Todd Gamblin (tgamblin@llnl.gov) and
+ many contributors. LLNL-CODE-647188.
+
{%- if last_updated %}
+
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
{%- endif %}
@@ -33,4 +38,3 @@
{%- endif %}
-
diff --git a/lib/spack/docs/conf.py b/lib/spack/docs/conf.py
index c2b2d0e37c..7303d7fef6 100644
--- a/lib/spack/docs/conf.py
+++ b/lib/spack/docs/conf.py
@@ -94,7 +94,7 @@
# General information about the project.
project = u'Spack'
-copyright = u'2013-2014, Lawrence Livermore National Laboratory'
+copyright = u'2013-2015, Lawrence Livermore National Laboratory.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -203,7 +203,7 @@
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+#html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
From 43e546559285621e439d30df974fe9b8d49c5381 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Sun, 10 May 2015 17:56:27 -0700
Subject: [PATCH 17/25] Fix bug in directory layout hidden files()
---
lib/spack/spack/directory_layout.py | 2 +-
lib/spack/spack/package.py | 5 ++++-
var/spack/packages/python/package.py | 4 +++-
3 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py
index c2e2ea4deb..fe02fff3b0 100644
--- a/lib/spack/spack/directory_layout.py
+++ b/lib/spack/spack/directory_layout.py
@@ -182,7 +182,7 @@ def __init__(self, root, **kwargs):
@property
def hidden_file_paths(self):
- return (self.metadata_dir)
+ return (self.metadata_dir,)
def relative_path_for_spec(self, spec):
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index b0bb1fb7bc..9ddd55f5c0 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -984,8 +984,10 @@ def do_activate(self, **kwargs):
self._sanity_check_extension()
force = kwargs.get('force', False)
- spack.install_layout.check_extension_conflict(self.extendee_spec, self.spec)
+ spack.install_layout.check_extension_conflict(
+ self.extendee_spec, self.spec)
+ # Activate any package dependencies that are also extensions.
if not force:
for spec in self.spec.traverse(root=False):
if spec.package.extends(self.extendee_spec):
@@ -1016,6 +1018,7 @@ def ignore(filename):
conflict = tree.find_conflict(self.prefix, ignore=ignore)
if conflict:
raise ExtensionConflictError(conflict)
+
tree.merge(self.prefix, ignore=ignore)
diff --git a/var/spack/packages/python/package.py b/var/spack/packages/python/package.py
index 31a12ea653..797900527d 100644
--- a/var/spack/packages/python/package.py
+++ b/var/spack/packages/python/package.py
@@ -139,7 +139,9 @@ def write_easy_install_pth(self, exts):
def activate(self, ext_pkg, **args):
- args.update(ignore=self.python_ignore(ext_pkg, args))
+ ignore=self.python_ignore(ext_pkg, args)
+ args.update(ignore=ignore)
+
super(Python, self).activate(ext_pkg, **args)
exts = spack.install_layout.extension_map(self.spec)
From b4a26c496c403ff976d2df18a9615d29dd07191c Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Sun, 10 May 2015 18:46:17 -0700
Subject: [PATCH 18/25] Better hash output in find.
---
lib/spack/spack/cmd/find.py | 34 +++++++++++++++++++++++-----------
lib/spack/spack/spec.py | 12 ++++++++----
2 files changed, 31 insertions(+), 15 deletions(-)
diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py
index 70b10edb4e..15c1cc9196 100644
--- a/lib/spack/spack/cmd/find.py
+++ b/lib/spack/spack/cmd/find.py
@@ -40,9 +40,6 @@
def setup_parser(subparser):
format_group = subparser.add_mutually_exclusive_group()
- format_group.add_argument(
- '-l', '--long', action='store_const', dest='mode', const='long',
- help='Show dependency hashes as well as versions.')
format_group.add_argument(
'-p', '--paths', action='store_const', dest='mode', const='paths',
help='Show paths to package install directories')
@@ -50,13 +47,22 @@ def setup_parser(subparser):
'-d', '--deps', action='store_const', dest='mode', const='deps',
help='Show full dependency DAG of installed packages')
+ subparser.add_argument(
+ '-l', '--long', action='store_true', dest='long',
+ help='Show dependency hashes as well as versions.')
+
subparser.add_argument(
'query_specs', nargs=argparse.REMAINDER,
help='optional specs to filter results')
+def gray_hash(spec):
+ return colorize('@K{[%s]}' % spec.dag_hash(7))
+
+
def display_specs(specs, **kwargs):
mode = kwargs.get('mode', 'short')
+ hashes = kwargs.get('long', False)
# Make a dict with specs keyed by architecture and compiler.
index = index_by(specs, ('architecture', 'compiler'))
@@ -85,13 +91,20 @@ def display_specs(specs, **kwargs):
elif mode == 'deps':
for spec in specs:
- print spec.tree(indent=4, format='$_$@$+$#', color=True),
+ print spec.tree(
+ format='$_$@$+',
+ color=True,
+ indent=4,
+ prefix=(lambda s: gray_hash(s)) if hashes else None)
- elif mode in ('short', 'long'):
- fmt = '$-_$@$+'
- if mode == 'long':
- fmt += '$#'
- colify(s.format(fmt, color=True) for s in specs)
+ elif mode == 'short':
+ def fmt(s):
+ string = ""
+ if hashes:
+ string += gray_hash(s) + ' '
+ string += s.format('$-_$@$+', color=True)
+ return string
+ colify(fmt(s) for s in specs)
else:
raise ValueError(
@@ -125,5 +138,4 @@ def find(parser, args):
if sys.stdout.isatty():
tty.msg("%d installed packages." % len(specs))
- display_specs(specs, mode=args.mode)
-
+ display_specs(specs, mode=args.mode, long=args.long)
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 21e36de14d..5f34a33ac1 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -120,6 +120,7 @@
enabled_variant_color = '@B'
disabled_variant_color = '@r'
dependency_color = '@.'
+hash_color = '@K'
"""This map determines the coloring of specs when using color output.
We make the fields different colors to enhance readability.
@@ -129,7 +130,8 @@
'=' : architecture_color,
'+' : enabled_variant_color,
'~' : disabled_variant_color,
- '^' : dependency_color }
+ '^' : dependency_color,
+ '#' : hash_color }
"""Regex used for splitting by spec field separators."""
_separators = '[%s]' % ''.join(color_formats.keys())
@@ -1296,7 +1298,7 @@ def format(self, format_string='$_$@$%@$+$=', **kwargs):
$%@ Compiler & compiler version
$+ Options
$= Architecture
- $# Dependencies' 8-char sha1 prefix
+ $# 7-char prefix of DAG hash
$$ $
Optionally you can provide a width, e.g. $20_ for a 20-wide name.
@@ -1352,8 +1354,7 @@ def write(s, c):
if self.architecture:
write(fmt % (c + str(self.architecture)), c)
elif c == '#':
- if self.dependencies:
- out.write(fmt % ('-' + self.dag_hash(8)))
+ out.write('-' + fmt % (self.dag_hash(7)))
elif c == '$':
if fmt != '':
raise ValueError("Can't use format width with $$.")
@@ -1399,12 +1400,15 @@ def tree(self, **kwargs):
cover = kwargs.pop('cover', 'nodes')
indent = kwargs.pop('indent', 0)
fmt = kwargs.pop('format', '$_$@$%@$+$=')
+ prefix = kwargs.pop('prefix', None)
check_kwargs(kwargs, self.tree)
out = ""
cur_id = 0
ids = {}
for d, node in self.traverse(order='pre', cover=cover, depth=True):
+ if prefix is not None:
+ out += prefix(node)
out += " " * indent
if depth:
out += "%-4d" % d
From 13ff1a9bf60dff03edadefa0c411600b210ca989 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Sun, 10 May 2015 18:53:09 -0700
Subject: [PATCH 19/25] Remove debug print in link_tree
---
lib/spack/llnl/util/link_tree.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/lib/spack/llnl/util/link_tree.py b/lib/spack/llnl/util/link_tree.py
index db13b80780..583f077b79 100644
--- a/lib/spack/llnl/util/link_tree.py
+++ b/lib/spack/llnl/util/link_tree.py
@@ -54,7 +54,6 @@ def find_conflict(self, dest_root, **kwargs):
"""Returns the first file in dest that conflicts with src"""
kwargs['follow_nonexisting'] = False
for src, dest in traverse_tree(self._root, dest_root, **kwargs):
- print src, dest
if os.path.isdir(src):
if os.path.exists(dest) and not os.path.isdir(dest):
return dest
From cd5fa128c5d65bc169cec68b4333a67e70dacc4b Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 12 May 2015 09:56:59 -0700
Subject: [PATCH 20/25] Work on SPACK-41: Optional dependencies work for simple
conditions.
- Can depend conditionally based on variant, compiler, arch, deps, etc
- normalize() is not iterative yet: no chaining depends_ons
- really need a SAT solver, but iterative will at least handle
simple cases.
- Added "strict" option to Spec.satisfies()
- strict checks that ALL of other's constraints are met (not just
the ones self shares)
- Consider splitting these out into two methods: could_satisfy() and
satisfies()
- didn't do this yet as it would require changing code that uses
satisfies()
- Changed semantics of __contains__ to use strict satisfaction (SPACK-56)
- Added tests for optional dependencies.
- The constrain() method on Specs, compilers, versions, etc. now
returns whether the spec changed as a result of the call.
---
lib/spack/spack/directives.py | 67 +++---
lib/spack/spack/package.py | 43 ----
lib/spack/spack/spec.py | 218 ++++++++++++------
lib/spack/spack/test/__init__.py | 3 +-
lib/spack/spack/test/mock_packages_test.py | 2 +-
lib/spack/spack/test/optional_deps.py | 86 +++++++
lib/spack/spack/test/spec_dag.py | 12 +-
lib/spack/spack/version.py | 32 ++-
var/spack/mock_packages/a/package.py | 12 +
var/spack/mock_packages/b/package.py | 12 +
var/spack/mock_packages/c/package.py | 12 +
var/spack/mock_packages/e/package.py | 12 +
.../optional-dep-test-2/package.py | 18 ++
.../optional-dep-test/package.py | 29 +++
14 files changed, 398 insertions(+), 160 deletions(-)
create mode 100644 lib/spack/spack/test/optional_deps.py
create mode 100644 var/spack/mock_packages/a/package.py
create mode 100644 var/spack/mock_packages/b/package.py
create mode 100644 var/spack/mock_packages/c/package.py
create mode 100644 var/spack/mock_packages/e/package.py
create mode 100644 var/spack/mock_packages/optional-dep-test-2/package.py
create mode 100644 var/spack/mock_packages/optional-dep-test/package.py
diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py
index 5c17fe4044..9297d6dac3 100644
--- a/lib/spack/spack/directives.py
+++ b/lib/spack/spack/directives.py
@@ -115,10 +115,7 @@ class Foo(Package):
"""
- def __init__(self, **kwargs):
- # dict argument allows directives to have storage on the package.
- dicts = kwargs.get('dicts', None)
-
+ def __init__(self, dicts=None):
if isinstance(dicts, basestring):
dicts = (dicts,)
elif type(dicts) not in (list, tuple):
@@ -154,13 +151,14 @@ def wrapped(*args, **kwargs):
return wrapped
-@directive(dicts='versions')
+@directive('versions')
def version(pkg, ver, checksum=None, **kwargs):
"""Adds a version and metadata describing how to fetch it.
Metadata is just stored as a dict in the package's versions
dictionary. Package must turn it into a valid fetch strategy
later.
"""
+ # TODO: checksum vs md5 distinction is confusing -- fix this.
# special case checksum for backward compatibility
if checksum:
kwargs['md5'] = checksum
@@ -169,18 +167,29 @@ def version(pkg, ver, checksum=None, **kwargs):
pkg.versions[Version(ver)] = kwargs
-@directive(dicts='dependencies')
-def depends_on(pkg, *specs):
- """Adds a dependencies local variable in the locals of
- the calling class, based on args. """
- for string in specs:
- for spec in spack.spec.parse(string):
- if pkg.name == spec.name:
- raise CircularReferenceError('depends_on', pkg.name)
- pkg.dependencies[spec.name] = spec
+def _depends_on(pkg, spec, when=None):
+ if when is None:
+ when = pkg.name
+ when_spec = parse_anonymous_spec(when, pkg.name)
+
+ dep_spec = Spec(spec)
+ if pkg.name == dep_spec.name:
+ raise CircularReferenceError('depends_on', pkg.name)
+
+ conditions = pkg.dependencies.setdefault(dep_spec.name, {})
+ if when_spec in conditions:
+ conditions[when_spec].constrain(dep_spec, deps=False)
+ else:
+ conditions[when_spec] = dep_spec
-@directive(dicts=('extendees', 'dependencies'))
+@directive('dependencies')
+def depends_on(pkg, spec, when=None):
+ """Creates a dict of deps with specs defining when they apply."""
+ _depends_on(pkg, spec, when=when)
+
+
+@directive(('extendees', 'dependencies'))
def extends(pkg, spec, **kwargs):
"""Same as depends_on, but dependency is symlinked into parent prefix.
@@ -198,14 +207,12 @@ def extends(pkg, spec, **kwargs):
if pkg.extendees:
raise DirectiveError("Packages can extend at most one other package.")
- spec = Spec(spec)
- if pkg.name == spec.name:
- raise CircularReferenceError('extends', pkg.name)
- pkg.dependencies[spec.name] = spec
- pkg.extendees[spec.name] = (spec, kwargs)
+ when = kwargs.pop('when', pkg.name)
+ _depends_on(pkg, spec, when=when)
+ pkg.extendees[spec] = (Spec(spec), kwargs)
-@directive(dicts='provided')
+@directive('provided')
def provides(pkg, *specs, **kwargs):
"""Allows packages to provide a virtual dependency. If a package provides
'mpi', other packages can declare that they depend on "mpi", and spack
@@ -221,17 +228,17 @@ def provides(pkg, *specs, **kwargs):
pkg.provided[provided_spec] = provider_spec
-@directive(dicts='patches')
-def patch(pkg, url_or_filename, **kwargs):
+@directive('patches')
+def patch(pkg, url_or_filename, level=1, when=None):
"""Packages can declare patches to apply to source. You can
optionally provide a when spec to indicate that a particular
patch should only be applied when the package's spec meets
certain conditions (e.g. a particular version).
"""
- level = kwargs.get('level', 1)
- when = kwargs.get('when', pkg.name)
-
+ if when is None:
+ when = pkg.name
when_spec = parse_anonymous_spec(when, pkg.name)
+
if when_spec not in pkg.patches:
pkg.patches[when_spec] = [Patch(pkg.name, url_or_filename, level)]
else:
@@ -240,13 +247,13 @@ def patch(pkg, url_or_filename, **kwargs):
pkg.patches[when_spec].append(Patch(pkg.name, url_or_filename, level))
-@directive(dicts='variants')
-def variant(pkg, name, **kwargs):
+@directive('variants')
+def variant(pkg, name, default=False, description=""):
"""Define a variant for the package. Packager can specify a default
value (on or off) as well as a text description."""
- default = bool(kwargs.get('default', False))
- description = str(kwargs.get('description', "")).strip()
+ default = bool(default)
+ description = str(description).strip()
if not re.match(spack.spec.identifier_re, name):
raise DirectiveError("Invalid variant name in %s: '%s'" % (pkg.name, name))
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index ea3b46088a..452544be49 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -50,7 +50,6 @@
from llnl.util.lang import *
import spack
-import spack.spec
import spack.error
import spack.compilers
import spack.mirror
@@ -540,41 +539,6 @@ def preorder_traversal(self, visited=None, **kwargs):
yield pkg
- def validate_dependencies(self):
- """Ensure that this package and its dependencies all have consistent
- constraints on them.
-
- NOTE that this will NOT find sanity problems through a virtual
- dependency. Virtual deps complicate the problem because we
- don't know in advance which ones conflict with others in the
- dependency DAG. If there's more than one virtual dependency,
- it's a full-on SAT problem, so hold off on this for now.
- The vdeps are actually skipped in preorder_traversal, so see
- that for details.
-
- TODO: investigate validating virtual dependencies.
- """
- # This algorithm just attempts to merge all the constraints on the same
- # package together, loses information about the source of the conflict.
- # What we'd really like to know is exactly which two constraints
- # conflict, but that algorithm is more expensive, so we'll do it
- # the simple, less informative way for now.
- merged = spack.spec.DependencyMap()
-
- try:
- for pkg in self.preorder_traversal():
- for name, spec in pkg.dependencies.iteritems():
- if name not in merged:
- merged[name] = spec.copy()
- else:
- merged[name].constrain(spec)
-
- except spack.spec.UnsatisfiableSpecError, e:
- raise InvalidPackageDependencyError(
- "Package %s has inconsistent dependency constraints: %s"
- % (self.name, e.message))
-
-
def provides(self, vpkg_name):
"""True if this package provides a virtual package with the specified name."""
return vpkg_name in self.provided
@@ -1198,13 +1162,6 @@ def __init__(self, message, long_msg=None):
super(PackageError, self).__init__(message, long_msg)
-class InvalidPackageDependencyError(PackageError):
- """Raised when package specification is inconsistent with requirements of
- its dependencies."""
- def __init__(self, message):
- super(InvalidPackageDependencyError, self).__init__(message)
-
-
class PackageVersionError(PackageError):
"""Raised when a version URL cannot automatically be determined."""
def __init__(self, version):
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index f2625ae596..69b0a70445 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -222,20 +222,24 @@ def _autospec(self, compiler_spec_like):
return CompilerSpec(compiler_spec_like)
- def satisfies(self, other):
+ def satisfies(self, other, strict=False):
other = self._autospec(other)
return (self.name == other.name and
- self.versions.satisfies(other.versions))
+ self.versions.satisfies(other.versions, strict=strict))
def constrain(self, other):
+ """Intersect self's versions with other.
+
+ Return whether the CompilerSpec changed.
+ """
other = self._autospec(other)
# ensure that other will actually constrain this spec.
if not other.satisfies(self):
raise UnsatisfiableCompilerSpecError(other, self)
- self.versions.intersect(other.versions)
+ return self.versions.intersect(other.versions)
@property
@@ -316,8 +320,8 @@ def __init__(self, spec):
self.spec = spec
- def satisfies(self, other):
- if self.spec._concrete:
+ def satisfies(self, other, strict=False):
+ if strict or self.spec._concrete:
return all(k in self and self[k].enabled == other[k].enabled
for k in other)
else:
@@ -326,17 +330,25 @@ def satisfies(self, other):
def constrain(self, other):
+ """Add all variants in other that aren't in self to self.
+
+ Raises an error if any common variants don't match.
+ Return whether the spec changed.
+ """
if other.spec._concrete:
for k in self:
if k not in other:
raise UnsatisfiableVariantSpecError(self[k], '')
+ changed = False
for k in other:
if k in self:
if self[k].enabled != other[k].enabled:
raise UnsatisfiableVariantSpecError(self[k], other[k])
else:
self[k] = other[k].copy()
+ changed =True
+ return changed
@property
def concrete(self):
@@ -867,6 +879,59 @@ def flatten(self):
self._add_dependency(dep)
+ def _evaluate_dependency_conditions(self, name):
+ """Evaluate all the conditions on a dependency with this name.
+
+ If the package depends on in this configuration, return
+ the dependency. If no conditions are True (and we don't
+ depend on it), return None.
+ """
+ pkg = spack.db.get(self.name)
+ conditions = pkg.dependencies[name]
+
+ # evaluate when specs to figure out constraints on the dependency.
+ dep = None
+ for when_spec, dep_spec in conditions.items():
+ sat = self.satisfies(when_spec, strict=True)
+# print self, "satisfies", when_spec, ":", sat
+ if sat:
+ if dep is None:
+ dep = Spec(name)
+ try:
+ dep.constrain(dep_spec)
+ except UnsatisfiableSpecError, e:
+ e.message = ("Conflicting conditional dependencies on package "
+ "%s for spec %s" % (self.name, self))
+ raise e
+ return dep
+
+
+ def _find_provider(self, vdep, provider_index):
+ """Find provider for a virtual spec in the provider index.
+ Raise an exception if there is a conflicting virtual
+ dependency already in this spec.
+ """
+ assert(vdep.virtual)
+ providers = provider_index.providers_for(vdep)
+
+ # If there is a provider for the vpkg, then use that instead of
+ # the virtual package.
+ if providers:
+ # Can't have multiple providers for the same thing in one spec.
+ if len(providers) > 1:
+ raise MultipleProviderError(vdep, providers)
+ return providers[0]
+ else:
+ # The user might have required something insufficient for
+ # pkg_dep -- so we'll get a conflict. e.g., user asked for
+ # mpi@:1.1 but some package required mpi@2.1:.
+ required = provider_index.providers_for(vdep.name)
+ if len(required) > 1:
+ raise MultipleProviderError(vdep, required)
+ elif required:
+ raise UnsatisfiableProviderSpecError(required[0], vdep)
+
+
def _normalize_helper(self, visited, spec_deps, provider_index):
"""Recursive helper function for _normalize."""
if self.name in visited:
@@ -881,34 +946,22 @@ def _normalize_helper(self, visited, spec_deps, provider_index):
# Combine constraints from package dependencies with
# constraints on the spec's dependencies.
pkg = spack.db.get(self.name)
- for name, pkg_dep in self.package.dependencies.items():
+ for name in pkg.dependencies:
+ # If pkg_dep is None, no conditions matched and we don't depend on this.
+ pkg_dep = self._evaluate_dependency_conditions(name)
+ if not pkg_dep:
+ continue
+
# If it's a virtual dependency, try to find a provider
if pkg_dep.virtual:
- providers = provider_index.providers_for(pkg_dep)
-
- # If there is a provider for the vpkg, then use that instead of
- # the virtual package.
- if providers:
- # Can't have multiple providers for the same thing in one spec.
- if len(providers) > 1:
- raise MultipleProviderError(pkg_dep, providers)
-
- pkg_dep = providers[0]
- name = pkg_dep.name
-
- else:
- # The user might have required something insufficient for
- # pkg_dep -- so we'll get a conflict. e.g., user asked for
- # mpi@:1.1 but some package required mpi@2.1:.
- required = provider_index.providers_for(name)
- if len(required) > 1:
- raise MultipleProviderError(pkg_dep, required)
- elif required:
- raise UnsatisfiableProviderSpecError(
- required[0], pkg_dep)
+ visited.add(pkg_dep.name)
+ provider = self._find_provider(pkg_dep, provider_index)
+ if provider:
+ pkg_dep = provider
+ name = provider.name
else:
- # if it's a real dependency, check whether it provides something
- # already required in the spec.
+ # if it's a real dependency, check whether it provides
+ # something already required in the spec.
index = ProviderIndex([pkg_dep], restrict=True)
for vspec in (v for v in spec_deps.values() if v.virtual):
if index.providers_for(vspec):
@@ -966,19 +1019,14 @@ def normalize(self, **kwargs):
# Ensure first that all packages & compilers in the DAG exist.
self.validate_names()
- # Ensure that the package & dep descriptions are consistent & sane
- if not self.virtual:
- self.package.validate_dependencies()
-
# Get all the dependencies into one DependencyMap
spec_deps = self.flat_dependencies(copy=False)
- # Figure out which of the user-provided deps provide virtual deps.
- # Remove virtual deps that are already provided by something in the spec
- spec_packages = [d.package for d in spec_deps.values() if not d.virtual]
-
+ # Initialize index of virtual dependency providers
index = ProviderIndex(spec_deps.values(), restrict=True)
+ # traverse the package DAG and fill out dependencies according
+ # to package files & their 'when' specs
visited = set()
self._normalize_helper(visited, spec_deps, index)
@@ -986,12 +1034,6 @@ def normalize(self, **kwargs):
# actually deps of this package. Raise an error.
extra = set(spec_deps.keys()).difference(visited)
- # Also subtract out all the packags that provide a needed vpkg
- vdeps = [v for v in self.package.virtual_dependencies()]
-
- vpkg_providers = index.providers_for(*vdeps)
- extra.difference_update(p.name for p in vpkg_providers)
-
# Anything left over is not a valid part of the spec.
if extra:
raise InvalidDependencyException(
@@ -1030,6 +1072,10 @@ def validate_names(self):
def constrain(self, other, **kwargs):
+ """Merge the constraints of other with self.
+
+ Returns True if the spec changed as a result, False if not.
+ """
other = self._autospec(other)
constrain_deps = kwargs.get('deps', True)
@@ -1055,18 +1101,22 @@ def constrain(self, other, **kwargs):
elif self.compiler is None:
self.compiler = other.compiler
- self.versions.intersect(other.versions)
- self.variants.constrain(other.variants)
+ changed = False
+ changed |= self.versions.intersect(other.versions)
+ changed |= self.variants.constrain(other.variants)
+ changed |= bool(self.architecture)
self.architecture = self.architecture or other.architecture
if constrain_deps:
- self._constrain_dependencies(other)
+ changed |= self._constrain_dependencies(other)
+
+ return changed
def _constrain_dependencies(self, other):
"""Apply constraints of other spec's dependencies to this spec."""
if not self.dependencies or not other.dependencies:
- return
+ return False
# TODO: might want more detail than this, e.g. specific deps
# in violation. if this becomes a priority get rid of this
@@ -1075,12 +1125,17 @@ def _constrain_dependencies(self, other):
raise UnsatisfiableDependencySpecError(other, self)
# Handle common first-order constraints directly
+ changed = False
for name in self.common_dependencies(other):
- self[name].constrain(other[name], deps=False)
+ changed |= self[name].constrain(other[name], deps=False)
+
# Update with additional constraints from other spec
for name in other.dep_difference(self):
self._add_dependency(other[name].copy())
+ changed = True
+
+ return changed
def common_dependencies(self, other):
@@ -1114,46 +1169,72 @@ def _autospec(self, spec_like):
return parse_anonymous_spec(spec_like, self.name)
- def satisfies(self, other, **kwargs):
+ def satisfies(self, other, deps=True, strict=False):
+ """Determine if this spec satisfies all constraints of another.
+
+ There are two senses for satisfies:
+
+ * `loose` (default): the absence of a constraint in self
+ implies that it *could* be satisfied by other, so we only
+ check that there are no conflicts with other for
+ constraints that this spec actually has.
+
+ * `strict`: strict means that we *must* meet all the
+ constraints specified on other.
+ """
other = self._autospec(other)
- satisfy_deps = kwargs.get('deps', True)
# First thing we care about is whether the name matches
if self.name != other.name:
return False
- # All these attrs have satisfies criteria of their own,
- # but can be None to indicate no constraints.
- for s, o in ((self.versions, other.versions),
- (self.compiler, other.compiler)):
- if s and o and not s.satisfies(o):
+ if self.versions and other.versions:
+ if not self.versions.satisfies(other.versions, strict=strict):
return False
+ elif strict and (self.versions or other.versions):
+ return False
- if not self.variants.satisfies(other.variants):
+ # None indicates no constraints when not strict.
+ if self.compiler and other.compiler:
+ if not self.compiler.satisfies(other.compiler, strict=strict):
+ return False
+ elif strict and (other.compiler and not self.compiler):
+ return False
+
+ if not self.variants.satisfies(other.variants, strict=strict):
return False
# Architecture satisfaction is currently just string equality.
- # Can be None for unconstrained, though.
- if (self.architecture and other.architecture and
- self.architecture != other.architecture):
+ # If not strict, None means unconstrained.
+ if self.architecture and other.architecture:
+ if self.architecture != other.architecture:
+ return False
+ elif strict and (other.architecture and not self.architecture):
return False
# If we need to descend into dependencies, do it, otherwise we're done.
- if satisfy_deps:
- return self.satisfies_dependencies(other)
+ if deps:
+ return self.satisfies_dependencies(other, strict=strict)
else:
return True
- def satisfies_dependencies(self, other):
+ def satisfies_dependencies(self, other, strict=False):
"""This checks constraints on common dependencies against each other."""
- # if either spec doesn't restrict dependencies then both are compatible.
- if not self.dependencies or not other.dependencies:
+ if strict:
+ if other.dependencies and not self.dependencies:
+ return False
+
+ if not all(dep in self.dependencies for dep in other.dependencies):
+ return False
+
+ elif not self.dependencies or not other.dependencies:
+ # if either spec doesn't restrict dependencies then both are compatible.
return True
# Handle first-order constraints directly
for name in self.common_dependencies(other):
- if not self[name].satisfies(other[name]):
+ if not self[name].satisfies(other[name], deps=False):
return False
# For virtual dependencies, we need to dig a little deeper.
@@ -1255,7 +1336,7 @@ def __contains__(self, spec):
"""
spec = self._autospec(spec)
for s in self.traverse():
- if s.satisfies(spec):
+ if s.satisfies(spec, strict=True):
return True
return False
@@ -1411,7 +1492,8 @@ def write(s, c):
elif compiler:
if c == '@':
- if self.compiler and self.compiler.versions:
+ if (self.compiler and self.compiler.versions and
+ self.compiler.versions != _any_version):
write(c + str(self.compiler.versions), '%')
elif c == '$':
escape = True
diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py
index 77c8bd3191..7ff512c370 100644
--- a/lib/spack/spack/test/__init__.py
+++ b/lib/spack/spack/test/__init__.py
@@ -53,7 +53,8 @@
'url_extrapolate',
'cc',
'link_tree',
- 'spec_yaml']
+ 'spec_yaml',
+ 'optional_deps']
def list_tests():
diff --git a/lib/spack/spack/test/mock_packages_test.py b/lib/spack/spack/test/mock_packages_test.py
index e948376039..09fb9ebe30 100644
--- a/lib/spack/spack/test/mock_packages_test.py
+++ b/lib/spack/spack/test/mock_packages_test.py
@@ -35,7 +35,7 @@ def set_pkg_dep(pkg, spec):
Use this to mock up constraints.
"""
spec = Spec(spec)
- spack.db.get(pkg).dependencies[spec.name] = spec
+ spack.db.get(pkg).dependencies[spec.name] = { Spec(pkg) : spec }
class MockPackagesTest(unittest.TestCase):
diff --git a/lib/spack/spack/test/optional_deps.py b/lib/spack/spack/test/optional_deps.py
new file mode 100644
index 0000000000..4d8f86a33e
--- /dev/null
+++ b/lib/spack/spack/test/optional_deps.py
@@ -0,0 +1,86 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import unittest
+
+import spack
+from spack.spec import Spec, CompilerSpec
+from spack.test.mock_packages_test import *
+
+class ConcretizeTest(MockPackagesTest):
+
+ def check_normalize(self, spec_string, expected):
+ spec = Spec(spec_string)
+ spec.normalize()
+ self.assertEqual(spec, expected)
+ self.assertTrue(spec.eq_dag(expected))
+
+
+ def test_normalize_simple_conditionals(self):
+ self.check_normalize('optional-dep-test', Spec('optional-dep-test'))
+ self.check_normalize('optional-dep-test~a', Spec('optional-dep-test~a'))
+
+ self.check_normalize('optional-dep-test+a',
+ Spec('optional-dep-test+a', Spec('a')))
+
+ self.check_normalize('optional-dep-test@1.1',
+ Spec('optional-dep-test@1.1', Spec('b')))
+
+ self.check_normalize('optional-dep-test%intel',
+ Spec('optional-dep-test%intel', Spec('c')))
+
+ self.check_normalize('optional-dep-test%intel@64.1',
+ Spec('optional-dep-test%intel@64.1', Spec('c'), Spec('d')))
+
+ self.check_normalize('optional-dep-test%intel@64.1.2',
+ Spec('optional-dep-test%intel@64.1.2', Spec('c'), Spec('d')))
+
+ self.check_normalize('optional-dep-test%clang@35',
+ Spec('optional-dep-test%clang@35', Spec('e')))
+
+
+ def test_multiple_conditionals(self):
+ self.check_normalize('optional-dep-test+a@1.1',
+ Spec('optional-dep-test+a@1.1', Spec('a'), Spec('b')))
+
+ self.check_normalize('optional-dep-test+a%intel',
+ Spec('optional-dep-test+a%intel', Spec('a'), Spec('c')))
+
+ self.check_normalize('optional-dep-test@1.1%intel',
+ Spec('optional-dep-test@1.1%intel', Spec('b'), Spec('c')))
+
+ self.check_normalize('optional-dep-test@1.1%intel@64.1.2+a',
+ Spec('optional-dep-test@1.1%intel@64.1.2+a',
+ Spec('b'), Spec('a'), Spec('c'), Spec('d')))
+
+ self.check_normalize('optional-dep-test@1.1%clang@36.5+a',
+ Spec('optional-dep-test@1.1%clang@36.5+a',
+ Spec('b'), Spec('a'), Spec('e')))
+
+
+ def test_chained_mpi(self):
+ self.check_normalize('optional-dep-test-2+mpi',
+ Spec('optional-dep-test-2+mpi',
+ Spec('optional-dep-test+mpi',
+ Spec('mpi'))))
diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py
index ecbc46981c..549f829d3e 100644
--- a/lib/spack/spack/test/spec_dag.py
+++ b/lib/spack/spack/test/spec_dag.py
@@ -44,8 +44,11 @@ def test_conflicting_package_constraints(self):
set_pkg_dep('callpath', 'mpich@2.0')
spec = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
- self.assertRaises(spack.package.InvalidPackageDependencyError,
- spec.package.validate_dependencies)
+
+ # TODO: try to do something to showt that the issue was with
+ # TODO: the user's input or with package inconsistencies.
+ self.assertRaises(spack.spec.UnsatisfiableVersionSpecError,
+ spec.normalize)
def test_preorder_node_traversal(self):
@@ -140,11 +143,6 @@ def test_postorder_path_traversal(self):
def test_conflicting_spec_constraints(self):
mpileaks = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
- try:
- mpileaks.package.validate_dependencies()
- except spack.package.InvalidPackageDependencyError, e:
- self.fail("validate_dependencies raised an exception: %s"
- % e.message)
# Normalize then add conflicting constraints to the DAG (this is an
# extremely unlikely scenario, but we test for it anyway)
diff --git a/lib/spack/spack/version.py b/lib/spack/spack/version.py
index 61b1e328ce..35db05e018 100644
--- a/lib/spack/spack/version.py
+++ b/lib/spack/spack/version.py
@@ -93,12 +93,12 @@ def check_type(t):
def coerced(method):
"""Decorator that ensures that argument types of a method are coerced."""
@wraps(method)
- def coercing_method(a, b):
+ def coercing_method(a, b, *args, **kwargs):
if type(a) == type(b) or a is None or b is None:
- return method(a, b)
+ return method(a, b, *args, **kwargs)
else:
ca, cb = coerce_versions(a, b)
- return getattr(ca, method.__name__)(cb)
+ return getattr(ca, method.__name__)(cb, *args, **kwargs)
return coercing_method
@@ -607,15 +607,22 @@ def from_dict(dictionary):
@coerced
- def satisfies(self, other):
- """A VersionList satisfies another if some version in the list would
- would satisfy some version in the other list. This uses essentially
- the same algorithm as overlaps() does for VersionList, but it calls
- satisfies() on member Versions and VersionRanges.
+ def satisfies(self, other, strict=False):
+ """A VersionList satisfies another if some version in the list
+ would satisfy some version in the other list. This uses
+ essentially the same algorithm as overlaps() does for
+ VersionList, but it calls satisfies() on member Versions
+ and VersionRanges.
+
+ If strict is specified, this version list must lie entirely
+ *within* the other in order to satisfy it.
"""
if not other or not self:
return False
+ if strict:
+ return self in other
+
s = o = 0
while s < len(self) and o < len(other):
if self[s].satisfies(other[o]):
@@ -652,9 +659,14 @@ def intersection(self, other):
@coerced
def intersect(self, other):
- isection = self.intersection(other)
- self.versions = isection.versions
+ """Intersect this spec's list with other.
+ Return True if the spec changed as a result; False otherwise
+ """
+ isection = self.intersection(other)
+ changed = (isection.versions != self.versions)
+ self.versions = isection.versions
+ return changed
@coerced
def __contains__(self, other):
diff --git a/var/spack/mock_packages/a/package.py b/var/spack/mock_packages/a/package.py
new file mode 100644
index 0000000000..fa63c08df0
--- /dev/null
+++ b/var/spack/mock_packages/a/package.py
@@ -0,0 +1,12 @@
+from spack import *
+
+class A(Package):
+ """Simple package with no dependencies"""
+
+ homepage = "http://www.example.com"
+ url = "http://www.example.com/a-1.0.tar.gz"
+
+ version('1.0', '0123456789abcdef0123456789abcdef')
+
+ def install(self, spec, prefix):
+ pass
diff --git a/var/spack/mock_packages/b/package.py b/var/spack/mock_packages/b/package.py
new file mode 100644
index 0000000000..cb88aa2157
--- /dev/null
+++ b/var/spack/mock_packages/b/package.py
@@ -0,0 +1,12 @@
+from spack import *
+
+class B(Package):
+ """Simple package with no dependencies"""
+
+ homepage = "http://www.example.com"
+ url = "http://www.example.com/b-1.0.tar.gz"
+
+ version('1.0', '0123456789abcdef0123456789abcdef')
+
+ def install(self, spec, prefix):
+ pass
diff --git a/var/spack/mock_packages/c/package.py b/var/spack/mock_packages/c/package.py
new file mode 100644
index 0000000000..f51b913fa9
--- /dev/null
+++ b/var/spack/mock_packages/c/package.py
@@ -0,0 +1,12 @@
+from spack import *
+
+class C(Package):
+ """Simple package with no dependencies"""
+
+ homepage = "http://www.example.com"
+ url = "http://www.example.com/c-1.0.tar.gz"
+
+ version('1.0', '0123456789abcdef0123456789abcdef')
+
+ def install(self, spec, prefix):
+ pass
diff --git a/var/spack/mock_packages/e/package.py b/var/spack/mock_packages/e/package.py
new file mode 100644
index 0000000000..76c6b64c7f
--- /dev/null
+++ b/var/spack/mock_packages/e/package.py
@@ -0,0 +1,12 @@
+from spack import *
+
+class E(Package):
+ """Simple package with no dependencies"""
+
+ homepage = "http://www.example.com"
+ url = "http://www.example.com/e-1.0.tar.gz"
+
+ version('1.0', '0123456789abcdef0123456789abcdef')
+
+ def install(self, spec, prefix):
+ pass
diff --git a/var/spack/mock_packages/optional-dep-test-2/package.py b/var/spack/mock_packages/optional-dep-test-2/package.py
new file mode 100644
index 0000000000..ef0587588e
--- /dev/null
+++ b/var/spack/mock_packages/optional-dep-test-2/package.py
@@ -0,0 +1,18 @@
+from spack import *
+
+class OptionalDepTest2(Package):
+ """Depends on the optional-dep-test package"""
+
+ homepage = "http://www.example.com"
+ url = "http://www.example.com/optional-dep-test-2-1.0.tar.gz"
+
+ version('1.0', '0123456789abcdef0123456789abcdef')
+
+ variant('odt', default=False)
+ variant('mpi', default=False)
+
+ depends_on('optional-dep-test', when='+odt')
+ depends_on('optional-dep-test+mpi', when='+mpi')
+
+ def install(self, spec, prefix):
+ pass
diff --git a/var/spack/mock_packages/optional-dep-test/package.py b/var/spack/mock_packages/optional-dep-test/package.py
new file mode 100644
index 0000000000..bb57576ca9
--- /dev/null
+++ b/var/spack/mock_packages/optional-dep-test/package.py
@@ -0,0 +1,29 @@
+from spack import *
+
+class OptionalDepTest(Package):
+ """Description"""
+
+ homepage = "http://www.example.com"
+ url = "http://www.example.com/optional_dep_test-1.0.tar.gz"
+
+ version('1.0', '0123456789abcdef0123456789abcdef')
+ version('1.1', '0123456789abcdef0123456789abcdef')
+
+ variant('a', default=False)
+ variant('f', default=False)
+ variant('mpi', default=False)
+
+ depends_on('a', when='+a')
+ depends_on('b', when='@1.1')
+ depends_on('c', when='%intel')
+ depends_on('d', when='%intel@64.1')
+ depends_on('e', when='%clang@34:40')
+
+ depends_on('f', when='+f')
+ depends_on('g', when='^f')
+ depends_on('mpi', when='^g')
+
+ depends_on('mpi', when='+mpi')
+
+ def install(self, spec, prefix):
+ pass
From c44db0133f821a3294dfbbbce40a7254b3e1ed3c Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 12 May 2015 11:45:48 -0700
Subject: [PATCH 21/25] Fix SPACK-41: Optional deps work with complex condition
chains.
---
lib/spack/spack/spec.py | 134 ++++++++++++++++----------
lib/spack/spack/test/optional_deps.py | 9 ++
2 files changed, 94 insertions(+), 49 deletions(-)
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 69b0a70445..0fd9b1f5f5 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -893,7 +893,6 @@ def _evaluate_dependency_conditions(self, name):
dep = None
for when_spec, dep_spec in conditions.items():
sat = self.satisfies(when_spec, strict=True)
-# print self, "satisfies", when_spec, ":", sat
if sat:
if dep is None:
dep = Spec(name)
@@ -932,67 +931,104 @@ def _find_provider(self, vdep, provider_index):
raise UnsatisfiableProviderSpecError(required[0], vdep)
+ def _merge_dependency(self, dep, visited, spec_deps, provider_index):
+ """Merge the dependency into this spec.
+
+ This is the core of the normalize() method. There are a few basic steps:
+
+ * If dep is virtual, evaluate whether it corresponds to an
+ existing concrete dependency, and merge if so.
+
+ * If it's real and it provides some virtual dep, see if it provides
+ what some virtual dependency wants and merge if so.
+
+ * Finally, if none of the above, merge dependency and its
+ constraints into this spec.
+
+ This method returns True if the spec was changed, False otherwise.
+ """
+ changed = False
+
+ # If it's a virtual dependency, try to find a provider and
+ # merge that.
+ if dep.virtual:
+ visited.add(dep.name)
+ provider = self._find_provider(dep, provider_index)
+ if provider:
+ dep = provider
+
+ else:
+ # if it's a real dependency, check whether it provides
+ # something already required in the spec.
+ index = ProviderIndex([dep], restrict=True)
+ for vspec in (v for v in spec_deps.values() if v.virtual):
+ if index.providers_for(vspec):
+ vspec._replace_with(dep)
+ del spec_deps[vspec.name]
+ changed = True
+ else:
+ required = index.providers_for(vspec.name)
+ if required:
+ raise UnsatisfiableProviderSpecError(required[0], dep)
+ provider_index.update(dep)
+
+ # If the spec isn't already in the set of dependencies, clone
+ # it from the package description.
+ if dep.name not in spec_deps:
+ spec_deps[dep.name] = dep.copy()
+
+ # Constrain package information with spec info
+ try:
+ changed |= spec_deps[dep.name].constrain(dep)
+
+ except UnsatisfiableSpecError, e:
+ e.message = "Invalid spec: '%s'. "
+ e.message += "Package %s requires %s %s, but spec asked for %s"
+ e.message %= (spec_deps[dep.name], dep.name, e.constraint_type,
+ e.required, e.provided)
+ raise e
+
+ # Add merged spec to my deps and recurse
+ dependency = spec_deps[dep.name]
+ if dep.name not in self.dependencies:
+ self._add_dependency(dependency)
+ changed = True
+
+ changed |= dependency._normalize_helper(visited, spec_deps, provider_index)
+ return changed
+
+
def _normalize_helper(self, visited, spec_deps, provider_index):
"""Recursive helper function for _normalize."""
if self.name in visited:
- return
+ return False
visited.add(self.name)
# if we descend into a virtual spec, there's nothing more
# to normalize. Concretize will finish resolving it later.
if self.virtual:
- return
+ return False
- # Combine constraints from package dependencies with
- # constraints on the spec's dependencies.
- pkg = spack.db.get(self.name)
- for name in pkg.dependencies:
- # If pkg_dep is None, no conditions matched and we don't depend on this.
- pkg_dep = self._evaluate_dependency_conditions(name)
- if not pkg_dep:
- continue
+ # Combine constraints from package deps with constraints from
+ # the spec, until nothing changes.
+ any_change = False
+ changed = True
- # If it's a virtual dependency, try to find a provider
- if pkg_dep.virtual:
- visited.add(pkg_dep.name)
- provider = self._find_provider(pkg_dep, provider_index)
- if provider:
- pkg_dep = provider
- name = provider.name
- else:
- # if it's a real dependency, check whether it provides
- # something already required in the spec.
- index = ProviderIndex([pkg_dep], restrict=True)
- for vspec in (v for v in spec_deps.values() if v.virtual):
- if index.providers_for(vspec):
- vspec._replace_with(pkg_dep)
- del spec_deps[vspec.name]
- else:
- required = index.providers_for(vspec.name)
- if required:
- raise UnsatisfiableProviderSpecError(required[0], pkg_dep)
- provider_index.update(pkg_dep)
+ while changed:
+ changed = False
+ pkg = spack.db.get(self.name)
+ for dep_name in pkg.dependencies:
+ # Do we depend on dep_name? If so pkg_dep is not None.
+ pkg_dep = self._evaluate_dependency_conditions(dep_name)
- if name not in spec_deps:
- # If the spec doesn't reference a dependency that this package
- # needs, then clone it from the package description.
- spec_deps[name] = pkg_dep.copy()
+ # If pkg_dep is a dependency, merge it.
+ if pkg_dep:
+ changed |= self._merge_dependency(
+ pkg_dep, visited, spec_deps, provider_index)
- try:
- # Constrain package information with spec info
- spec_deps[name].constrain(pkg_dep)
+ any_change |= changed
- except UnsatisfiableSpecError, e:
- e.message = "Invalid spec: '%s'. "
- e.message += "Package %s requires %s %s, but spec asked for %s"
- e.message %= (spec_deps[name], name, e.constraint_type,
- e.required, e.provided)
- raise e
-
- # Add merged spec to my deps and recurse
- dependency = spec_deps[name]
- self._add_dependency(dependency)
- dependency._normalize_helper(visited, spec_deps, provider_index)
+ return any_change
def normalize(self, **kwargs):
diff --git a/lib/spack/spack/test/optional_deps.py b/lib/spack/spack/test/optional_deps.py
index 4d8f86a33e..669e02f8c9 100644
--- a/lib/spack/spack/test/optional_deps.py
+++ b/lib/spack/spack/test/optional_deps.py
@@ -84,3 +84,12 @@ def test_chained_mpi(self):
Spec('optional-dep-test-2+mpi',
Spec('optional-dep-test+mpi',
Spec('mpi'))))
+
+
+ def test_transitive_chain(self):
+ # Each of these dependencies comes from a conditional
+ # dependency on another. This requires iterating to evaluate
+ # the whole chain.
+ self.check_normalize('optional-dep-test+f',
+ Spec('optional-dep-test+f', Spec('f'), Spec('g'), Spec('mpi')))
+
From 095ff1cb4ab3923ac31fb269df8e390438e492b4 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 12 May 2015 14:52:41 -0700
Subject: [PATCH 22/25] Add debug handler to print a stacktrace on Ctrl-C in
debug mode.
---
bin/spack | 4 +++
lib/spack/spack/util/debug.py | 52 +++++++++++++++++++++++++++++++++++
2 files changed, 56 insertions(+)
create mode 100644 lib/spack/spack/util/debug.py
diff --git a/bin/spack b/bin/spack
index 013acf4db8..7dd3dbabbb 100755
--- a/bin/spack
+++ b/bin/spack
@@ -93,6 +93,10 @@ def main():
tty.set_debug(args.debug)
spack.debug = args.debug
+ if spack.debug:
+ import spack.util.debug as debug
+ debug.register_interrupt_handler()
+
spack.spack_working_dir = working_dir
if args.mock:
from spack.packages import PackageDB
diff --git a/lib/spack/spack/util/debug.py b/lib/spack/spack/util/debug.py
new file mode 100644
index 0000000000..37985eccdd
--- /dev/null
+++ b/lib/spack/spack/util/debug.py
@@ -0,0 +1,52 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Debug signal handler: prints a stack trace and enters interpreter.
+
+``register_interrupt_handler()`` enables a ctrl-C handler that prints
+a stack trace and drops the user into an interpreter.
+
+"""
+import os
+import code
+import traceback
+import signal
+
+def debug_handler(sig, frame):
+ """Interrupt running process, and provide a python prompt for
+ interactive debugging."""
+ d = {'_frame':frame} # Allow access to frame object.
+ d.update(frame.f_globals) # Unless shadowed by global
+ d.update(frame.f_locals)
+
+ i = code.InteractiveConsole(d)
+ message = "Signal received : entering python shell.\nTraceback:\n"
+ message += ''.join(traceback.format_stack(frame))
+ i.interact(message)
+ os._exit(1) # Use os._exit to avoid test harness.
+
+
+def register_interrupt_handler():
+ """Register a handler to print a stack trace and enter an interpreter on Ctrl-C"""
+ signal.signal(signal.SIGINT, debug_handler)
From 805122c7895e9498a5330654cb18d3ad7e6a40a1 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 12 May 2015 14:52:46 -0700
Subject: [PATCH 23/25] SPACK-41: bugfix for nonconvergent normalize()
- constrain() wasn't reporting changes properly.
---
lib/spack/spack/spec.py | 11 +++++++----
lib/spack/spack/test/optional_deps.py | 1 -
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 0fd9b1f5f5..4a67614be7 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -1014,9 +1014,9 @@ def _normalize_helper(self, visited, spec_deps, provider_index):
any_change = False
changed = True
+ pkg = spack.db.get(self.name)
while changed:
changed = False
- pkg = spack.db.get(self.name)
for dep_name in pkg.dependencies:
# Do we depend on dep_name? If so pkg_dep is not None.
pkg_dep = self._evaluate_dependency_conditions(dep_name)
@@ -1132,16 +1132,19 @@ def constrain(self, other, **kwargs):
raise UnsatisfiableArchitectureSpecError(self.architecture,
other.architecture)
+ changed = False
if self.compiler is not None and other.compiler is not None:
- self.compiler.constrain(other.compiler)
+ changed |= self.compiler.constrain(other.compiler)
elif self.compiler is None:
+ changed |= (self.compiler != other.compiler)
self.compiler = other.compiler
- changed = False
changed |= self.versions.intersect(other.versions)
changed |= self.variants.constrain(other.variants)
- changed |= bool(self.architecture)
+
+ old = self.architecture
self.architecture = self.architecture or other.architecture
+ changed |= (self.architecture != old)
if constrain_deps:
changed |= self._constrain_dependencies(other)
diff --git a/lib/spack/spack/test/optional_deps.py b/lib/spack/spack/test/optional_deps.py
index 669e02f8c9..265a983f3f 100644
--- a/lib/spack/spack/test/optional_deps.py
+++ b/lib/spack/spack/test/optional_deps.py
@@ -92,4 +92,3 @@ def test_transitive_chain(self):
# the whole chain.
self.check_normalize('optional-dep-test+f',
Spec('optional-dep-test+f', Spec('f'), Spec('g'), Spec('mpi')))
-
From cd1ca36488fb9c9c5e195a830ea9a3a4c7bde760 Mon Sep 17 00:00:00 2001
From: Todd Gamblin
Date: Tue, 12 May 2015 15:48:57 -0700
Subject: [PATCH 24/25] SPACK-41: More tests to ensure that constrain() reports
changes.
---
lib/spack/spack/spec.py | 9 ++--
lib/spack/spack/test/spec_semantics.py | 59 ++++++++++++++++++++++++++
2 files changed, 65 insertions(+), 3 deletions(-)
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 4a67614be7..aa13f0422c 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -1107,13 +1107,12 @@ def validate_names(self):
raise UnknownVariantError(spec.name, vname)
- def constrain(self, other, **kwargs):
+ def constrain(self, other, deps=True):
"""Merge the constraints of other with self.
Returns True if the spec changed as a result, False if not.
"""
other = self._autospec(other)
- constrain_deps = kwargs.get('deps', True)
if not self.name == other.name:
raise UnsatisfiableSpecNameError(self.name, other.name)
@@ -1146,7 +1145,7 @@ def constrain(self, other, **kwargs):
self.architecture = self.architecture or other.architecture
changed |= (self.architecture != old)
- if constrain_deps:
+ if deps:
changed |= self._constrain_dependencies(other)
return changed
@@ -1154,6 +1153,8 @@ def constrain(self, other, **kwargs):
def _constrain_dependencies(self, other):
"""Apply constraints of other spec's dependencies to this spec."""
+ other = self._autospec(other)
+
if not self.dependencies or not other.dependencies:
return False
@@ -1260,6 +1261,8 @@ def satisfies(self, other, deps=True, strict=False):
def satisfies_dependencies(self, other, strict=False):
"""This checks constraints on common dependencies against each other."""
+ other = self._autospec(other)
+
if strict:
if other.dependencies and not self.dependencies:
return False
diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py
index 8614b74c7a..20df2603f5 100644
--- a/lib/spack/spack/test/spec_semantics.py
+++ b/lib/spack/spack/test/spec_semantics.py
@@ -64,6 +64,16 @@ def check_constrain(self, expected, spec, constraint):
self.assertEqual(exp, spec)
+ def check_constrain_changed(self, spec, constraint):
+ spec = Spec(spec)
+ self.assertTrue(spec.constrain(constraint))
+
+
+ def check_constrain_not_changed(self, spec, constraint):
+ spec = Spec(spec)
+ self.assertFalse(spec.constrain(constraint))
+
+
def check_invalid_constraint(self, spec, constraint):
spec = Spec(spec)
constraint = Spec(constraint)
@@ -200,6 +210,11 @@ def test_constrain_arch(self):
self.check_constrain('libelf=bgqos_0', 'libelf', 'libelf=bgqos_0')
+ def test_constrain_compiler(self):
+ self.check_constrain('libelf=bgqos_0', 'libelf=bgqos_0', 'libelf=bgqos_0')
+ self.check_constrain('libelf=bgqos_0', 'libelf', 'libelf=bgqos_0')
+
+
def test_invalid_constraint(self):
self.check_invalid_constraint('libelf@0:2.0', 'libelf@2.1:3')
self.check_invalid_constraint('libelf@0:2.5%gcc@4.8:4.9', 'libelf@2.1:3%gcc@4.5:4.7')
@@ -208,3 +223,47 @@ def test_invalid_constraint(self):
self.check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo')
self.check_invalid_constraint('libelf=bgqos_0', 'libelf=x86_54')
+
+
+ def test_constrain_changed(self):
+ self.check_constrain_changed('libelf', '@1.0')
+ self.check_constrain_changed('libelf', '@1.0:5.0')
+ self.check_constrain_changed('libelf', '%gcc')
+ self.check_constrain_changed('libelf%gcc', '%gcc@4.5')
+ self.check_constrain_changed('libelf', '+debug')
+ self.check_constrain_changed('libelf', '~debug')
+ self.check_constrain_changed('libelf', '=bgqos_0')
+
+
+ def test_constrain_not_changed(self):
+ self.check_constrain_not_changed('libelf', 'libelf')
+ self.check_constrain_not_changed('libelf@1.0', '@1.0')
+ self.check_constrain_not_changed('libelf@1.0:5.0', '@1.0:5.0')
+ self.check_constrain_not_changed('libelf%gcc', '%gcc')
+ self.check_constrain_not_changed('libelf%gcc@4.5', '%gcc@4.5')
+ self.check_constrain_not_changed('libelf+debug', '+debug')
+ self.check_constrain_not_changed('libelf~debug', '~debug')
+ self.check_constrain_not_changed('libelf=bgqos_0', '=bgqos_0')
+ self.check_constrain_not_changed('libelf^foo', 'libelf^foo')
+ self.check_constrain_not_changed('libelf^foo^bar', 'libelf^foo^bar')
+
+
+ def test_constrain_dependency_changed(self):
+ self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0:5.0')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo%gcc')
+ self.check_constrain_changed('libelf^foo%gcc', 'libelf^foo%gcc@4.5')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo+debug')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo~debug')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo=bgqos_0')
+
+
+ def test_constrain_dependency_not_changed(self):
+ self.check_constrain_not_changed('libelf^foo@1.0', 'libelf^foo@1.0')
+ self.check_constrain_not_changed('libelf^foo@1.0:5.0', 'libelf^foo@1.0:5.0')
+ self.check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc')
+ self.check_constrain_not_changed('libelf^foo%gcc@4.5', 'libelf^foo%gcc@4.5')
+ self.check_constrain_not_changed('libelf^foo+debug', 'libelf^foo+debug')
+ self.check_constrain_not_changed('libelf^foo~debug', 'libelf^foo~debug')
+ self.check_constrain_not_changed('libelf^foo=bgqos_0', 'libelf^foo=bgqos_0')
+
From 46b91ddf57beb54f05fc6a3cc70283d4b17d1bd3 Mon Sep 17 00:00:00 2001
From: Matthew LeGendre
Date: Mon, 18 May 2015 15:19:20 -0700
Subject: [PATCH 25/25] YAML config files for compilers and mirrors
---
.gitignore | 1 +
lib/spack/spack/cmd/compiler.py | 2 +-
lib/spack/spack/cmd/config.py | 31 +-
lib/spack/spack/cmd/mirror.py | 18 +-
lib/spack/spack/compilers/__init__.py | 43 +-
lib/spack/spack/config.py | 641 +++++++-----------
lib/spack/spack/stage.py | 8 +-
lib/spack/spack/test/config.py | 65 +-
lib/spack/spack/test/mock_packages_test.py | 29 +-
var/spack/mock_configs/site_spackconfig | 12 -
.../site_spackconfig/compilers.yaml | 12 +
var/spack/mock_configs/user_spackconfig | 0
12 files changed, 358 insertions(+), 504 deletions(-)
delete mode 100644 var/spack/mock_configs/site_spackconfig
create mode 100644 var/spack/mock_configs/site_spackconfig/compilers.yaml
delete mode 100644 var/spack/mock_configs/user_spackconfig
diff --git a/.gitignore b/.gitignore
index 828fb04e7d..1c6ca4c99e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,7 @@
*~
.DS_Store
.idea
+/etc/spack/*
/etc/spackconfig
/share/spack/dotkit
/share/spack/modules
diff --git a/lib/spack/spack/cmd/compiler.py b/lib/spack/spack/cmd/compiler.py
index e37f44b3b7..2a64dc914e 100644
--- a/lib/spack/spack/cmd/compiler.py
+++ b/lib/spack/spack/cmd/compiler.py
@@ -68,7 +68,7 @@ def compiler_add(args):
spack.compilers.add_compilers_to_config('user', *compilers)
n = len(compilers)
tty.msg("Added %d new compiler%s to %s" % (
- n, 's' if n > 1 else '', spack.config.get_filename('user')))
+ n, 's' if n > 1 else '', spack.config.get_config_scope_filename('user', 'compilers')))
colify(reversed(sorted(c.spec for c in compilers)), indent=4)
else:
tty.msg("Found no new compilers")
diff --git a/lib/spack/spack/cmd/config.py b/lib/spack/spack/cmd/config.py
index 283bfc19b9..8c18f88b64 100644
--- a/lib/spack/spack/cmd/config.py
+++ b/lib/spack/spack/cmd/config.py
@@ -43,42 +43,27 @@ def setup_parser(subparser):
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='config_command')
- set_parser = sp.add_parser('set', help='Set configuration values.')
- set_parser.add_argument('key', help="Key to set value for.")
- set_parser.add_argument('value', nargs='?', default=None,
- help="Value to associate with key")
-
- get_parser = sp.add_parser('get', help='Get configuration values.')
- get_parser.add_argument('key', help="Key to get value for.")
+ get_parser = sp.add_parser('get', help='Print configuration values.')
+ get_parser.add_argument('category', help="Configuration category to print.")
edit_parser = sp.add_parser('edit', help='Edit configuration file.')
-
-
-def config_set(args):
- # default scope for writing is 'user'
- if not args.scope:
- args.scope = 'user'
-
- config = spack.config.get_config(args.scope)
- config.set_value(args.key, args.value)
- config.write()
+ edit_parser.add_argument('category', help="Configuration category to edit")
def config_get(args):
- config = spack.config.get_config(args.scope)
- print config.get_value(args.key)
+ spack.config.print_category(args.category)
def config_edit(args):
if not args.scope:
args.scope = 'user'
- config_file = spack.config.get_filename(args.scope)
+ if not args.category:
+ args.category = None
+ config_file = spack.config.get_config_scope_filename(args.scope, args.category)
spack.editor(config_file)
def config(parser, args):
- action = { 'set' : config_set,
- 'get' : config_get,
+ action = { 'get' : config_get,
'edit' : config_edit }
action[args.config_command](args)
-
diff --git a/lib/spack/spack/cmd/mirror.py b/lib/spack/spack/cmd/mirror.py
index 22838e1344..02a1467ee6 100644
--- a/lib/spack/spack/cmd/mirror.py
+++ b/lib/spack/spack/cmd/mirror.py
@@ -75,27 +75,22 @@ def mirror_add(args):
if url.startswith('/'):
url = 'file://' + url
- config = spack.config.get_config('user')
- config.set_value('mirror', args.name, 'url', url)
- config.write()
+ mirror_dict = { args.name : url }
+ spack.config.add_to_mirror_config({ args.name : url })
def mirror_remove(args):
"""Remove a mirror by name."""
- config = spack.config.get_config('user')
name = args.name
- if not config.has_named_section('mirror', name):
+ rmd_something = spack.config.remove_from_config('mirrors', name)
+ if not rmd_something:
tty.die("No such mirror: %s" % name)
- config.remove_named_section('mirror', name)
- config.write()
def mirror_list(args):
"""Print out available mirrors to the console."""
- config = spack.config.get_config()
- sec_names = config.get_section_names('mirror')
-
+ sec_names = spack.config.get_mirror_config()
if not sec_names:
tty.msg("No mirrors configured.")
return
@@ -103,8 +98,7 @@ def mirror_list(args):
max_len = max(len(s) for s in sec_names)
fmt = "%%-%ds%%s" % (max_len + 4)
- for name in sec_names:
- val = config.get_value('mirror', name, 'url')
+ for name, val in sec_names.iteritems():
print fmt % (name, val)
diff --git a/lib/spack/spack/compilers/__init__.py b/lib/spack/spack/compilers/__init__.py
index 8cb11c3208..b7b021a1ac 100644
--- a/lib/spack/spack/compilers/__init__.py
+++ b/lib/spack/spack/compilers/__init__.py
@@ -60,24 +60,25 @@ def _get_config():
first."""
# If any configuration file has compilers, just stick with the
# ones already configured.
- config = spack.config.get_config()
+ config = spack.config.get_compilers_config()
existing = [spack.spec.CompilerSpec(s)
- for s in config.get_section_names('compiler')]
+ for s in config]
if existing:
return config
compilers = find_compilers(*get_path('PATH'))
- new_compilers = [
- c for c in compilers if c.spec not in existing]
- add_compilers_to_config('user', *new_compilers)
+ add_compilers_to_config('user', *compilers)
# After writing compilers to the user config, return a full config
# from all files.
- return spack.config.get_config(refresh=True)
+ return spack.config.get_compilers_config()
-@memoized
+_cached_default_compiler = None
def default_compiler():
+ global _cached_default_compiler
+ if _cached_default_compiler:
+ return _cached_default_compiler
versions = []
for name in _default_order: # TODO: customize order.
versions = find(name)
@@ -86,7 +87,8 @@ def default_compiler():
if not versions:
raise NoCompilersError()
- return sorted(versions)[-1]
+ _cached_default_compiler = sorted(versions)[-1]
+ return _cached_default_compiler
def find_compilers(*path):
@@ -122,20 +124,18 @@ def find_compilers(*path):
def add_compilers_to_config(scope, *compilers):
- config = spack.config.get_config(scope)
+ compiler_config_tree = {}
for compiler in compilers:
- add_compiler(config, compiler)
- config.write()
+ compiler_entry = {}
+ for c in _required_instance_vars:
+ val = getattr(compiler, c)
+ if not val:
+ val = "None"
+ compiler_entry[c] = val
+ compiler_config_tree[str(compiler.spec)] = compiler_entry
+ spack.config.add_to_compiler_config(compiler_config_tree, scope)
-def add_compiler(config, compiler):
- def setup_field(cspec, name, exe):
- path = exe if exe else "None"
- config.set_value('compiler', cspec, name, path)
-
- for c in _required_instance_vars:
- setup_field(compiler.spec, c, getattr(compiler, c))
-
def supported_compilers():
"""Return a set of names of compilers supported by Spack.
@@ -157,8 +157,7 @@ def all_compilers():
available to build with. These are instances of CompilerSpec.
"""
configuration = _get_config()
- return [spack.spec.CompilerSpec(s)
- for s in configuration.get_section_names('compiler')]
+ return [spack.spec.CompilerSpec(s) for s in configuration]
@_auto_compiler_spec
@@ -176,7 +175,7 @@ def compilers_for_spec(compiler_spec):
config = _get_config()
def get_compiler(cspec):
- items = dict((k,v) for k,v in config.items('compiler "%s"' % cspec))
+ items = config[str(cspec)]
if not all(n in items for n in _required_instance_vars):
raise InvalidCompilerConfigurationError(cspec)
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index 85ee16a1c2..34dee86473 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -28,452 +28,315 @@
===============================
When Spack runs, it pulls configuration data from several config
-files, much like bash shells. In Spack, there are two configuration
-scopes:
+directories, each of which contains configuration files. In Spack,
+there are two configuration scopes:
1. ``site``: Spack loads site-wide configuration options from
- ``$(prefix)/etc/spackconfig``.
+ ``$(prefix)/etc/spack/``.
2. ``user``: Spack next loads per-user configuration options from
- ~/.spackconfig.
-
-If user options have the same names as site options, the user options
-take precedence.
+ ~/.spack/.
+Spack may read configuration files from both of these locations. When
+configurations conflict, the user config options take precedence over
+the site configurations. Each configuration directory may contain
+several configuration files, such as compilers.yaml or mirrors.yaml.
Configuration file format
===============================
-Configuration files are formatted using .gitconfig syntax, which is
-much like Windows .INI format. This format is implemented by Python's
-ConfigParser class, and it's easy to read and versatile.
+Configuration files are formatted using YAML syntax.
+This format is implemented by Python's
+yaml class, and it's easy to read and versatile.
-The file is divided into sections, like this ``compiler`` section::
+The config files are structured as trees, like this ``compiler`` section::
- [compiler]
- cc = /usr/bin/gcc
+ compilers:
+ chaos_5_x86_64_ib:
+ gcc@4.4.7:
+ cc: /usr/bin/gcc
+ cxx: /usr/bin/g++
+ f77: /usr/bin/gfortran
+ fc: /usr/bin/gfortran
+ bgqos_0:
+ xlc@12.1:
+ cc: /usr/local/bin/mpixlc
+ ...
-In each section there are options (cc), and each option has a value
-(/usr/bin/gcc).
+In this example, entries like ''compilers'' and ''xlc@12.1'' are used to
+categorize entries beneath them in the tree. At the root of the tree,
+entries like ''cc'' and ''cxx'' are specified as name/value pairs.
-Borrowing from git, we also allow named sections, e.g.:
+Spack returns these trees as nested dicts. The dict for the above example
+would looks like:
- [compiler "gcc@4.7.3"]
- cc = /usr/bin/gcc
+ { 'compilers' :
+ { 'chaos_5_x86_64_ib' :
+ { 'gcc@4.4.7' :
+ { 'cc' : '/usr/bin/gcc',
+ 'cxx' : '/usr/bin/g++'
+ 'f77' : '/usr/bin/gfortran'
+ 'fc' : '/usr/bin/gfortran' }
+ }
+ { 'bgqos_0' :
+ { 'cc' : '/usr/local/bin/mpixlc' }
+ }
+ }
-This is a compiler section, but it's for the specific compiler,
-``gcc@4.7.3``. ``gcc@4.7.3`` is the name.
-
-
-Keys
-===============================
-
-Together, the section, name, and option, separated by periods, are
-called a ``key``. Keys can be used on the command line to set
-configuration options explicitly (this is also borrowed from git).
-
-For example, to change the C compiler used by gcc@4.7.3, you could do
-this:
-
- spack config compiler.gcc@4.7.3.cc /usr/local/bin/gcc
-
-That will create a named compiler section in the user's .spackconfig
-like the one shown above.
+Some routines, like get_mirrors_config and get_compilers_config may strip
+off the top-levels of the tree and return subtrees.
"""
import os
-import re
-import inspect
-import ConfigParser as cp
+import exceptions
+import sys
from external.ordereddict import OrderedDict
from llnl.util.lang import memoized
import spack.error
-__all__ = [
- 'SpackConfigParser', 'get_config', 'SpackConfigurationError',
- 'InvalidConfigurationScopeError', 'InvalidSectionNameError',
- 'ReadOnlySpackConfigError', 'ConfigParserError', 'NoOptionError',
- 'NoSectionError']
+from contextlib import closing
+from external import yaml
+from external.yaml.error import MarkedYAMLError
+import llnl.util.tty as tty
+from llnl.util.filesystem import mkdirp
-_named_section_re = r'([^ ]+) "([^"]+)"'
+_config_sections = {}
+class _ConfigCategory:
+ name = None
+ filename = None
+ merge = True
+ def __init__(self, n, f, m):
+ self.name = n
+ self.filename = f
+ self.merge = m
+ self.files_read_from = []
+ self.result_dict = {}
+ _config_sections[n] = self
+
+_ConfigCategory('compilers', 'compilers.yaml', True)
+_ConfigCategory('mirrors', 'mirrors.yaml', True)
+_ConfigCategory('view', 'views.yaml', True)
+_ConfigCategory('order', 'orders.yaml', True)
"""Names of scopes and their corresponding configuration files."""
-_scopes = OrderedDict({
- 'site' : os.path.join(spack.etc_path, 'spackconfig'),
- 'user' : os.path.expanduser('~/.spackconfig')
-})
+config_scopes = [('site', os.path.join(spack.etc_path, 'spack')),
+ ('user', os.path.expanduser('~/.spack'))]
-_field_regex = r'^([\w-]*)' \
- r'(?:\.(.*(?=.)))?' \
- r'(?:\.([\w-]+))?$'
+_compiler_by_arch = {}
+_read_config_file_result = {}
+def _read_config_file(filename):
+ """Read a given YAML configuration file"""
+ global _read_config_file_result
+ if filename in _read_config_file_result:
+ return _read_config_file_result[filename]
-_section_regex = r'^([\w-]*)\s*' \
- r'\"([^"]*\)\"$'
+ try:
+ with open(filename) as f:
+ ydict = yaml.load(f)
+ except MarkedYAMLError, e:
+ tty.die("Error parsing yaml%s: %s" % (str(e.context_mark), e.problem))
+ except exceptions.IOError, e:
+ _read_config_file_result[filename] = None
+ return None
+ _read_config_file_result[filename] = ydict
+ return ydict
-# Cache of configs -- we memoize this for performance.
-_config = {}
+def clear_config_caches():
+ """Clears the caches for configuration files, which will cause them
+ to be re-read upon the next request"""
+ for key,s in _config_sections.iteritems():
+ s.files_read_from = []
+ s.result_dict = {}
+ spack.config._read_config_file_result = {}
+ spack.config._compiler_by_arch = {}
+ spack.compilers._cached_default_compiler = None
-def get_config(scope=None, **kwargs):
- """Get a Spack configuration object, which can be used to set options.
- With no arguments, this returns a SpackConfigParser with config
- options loaded from all config files. This is how client code
- should read Spack configuration options.
+def _merge_dicts(d1, d2):
+ """Recursively merges two configuration trees, with entries
+ in d2 taking precedence over d1"""
+ if not d1:
+ return d2.copy()
+ if not d2:
+ return d1
- Optionally, a scope parameter can be provided. Valid scopes
- are ``site`` and ``user``. If a scope is provided, only the
- options from that scope's configuration file are loaded. The
- caller can set or unset options, then call ``write()`` on the
- config object to write it back out to the original config file.
+ for key2, val2 in d2.iteritems():
+ if not key2 in d1:
+ d1[key2] = val2
+ continue
+ val1 = d1[key2]
+ if isinstance(val1, dict) and isinstance(val2, dict):
+ d1[key2] = _merge_dicts(val1, val2)
+ continue
+ if isinstance(val1, list) and isinstance(val2, list):
+ val1.extend(val2)
+ seen = set()
+ d1[key2] = [ x for x in val1 if not (x in seen or seen.add(x)) ]
+ continue
+ d1[key2] = val2
+ return d1
- By default, this will cache configurations and return the last
- read version of the config file. If the config file is
- modified and you need to refresh, call get_config with the
- refresh=True keyword argument. This will force all files to be
- re-read.
- """
- refresh = kwargs.get('refresh', False)
- if refresh:
- _config.clear()
- if scope not in _config:
- if scope is None:
- _config[scope] = SpackConfigParser([path for path in _scopes.values()])
- elif scope not in _scopes:
- raise UnknownConfigurationScopeError(scope)
+def get_config(category_name):
+ """Get the confguration tree for the names category. Strips off the
+ top-level category entry from the dict"""
+ global config_scopes
+ category = _config_sections[category_name]
+ if category.result_dict:
+ return category.result_dict
+
+ category.result_dict = {}
+ for scope, scope_path in config_scopes:
+ path = os.path.join(scope_path, category.filename)
+ result = _read_config_file(path)
+ if not result:
+ continue
+ if not category_name in result:
+ continue
+ category.files_read_from.insert(0, path)
+ result = result[category_name]
+ if category.merge:
+ category.result_dict = _merge_dicts(category.result_dict, result)
else:
- _config[scope] = SpackConfigParser(_scopes[scope])
-
- return _config[scope]
+ category.result_dict = result
+ return category.result_dict
-def get_filename(scope):
- """Get the filename for a particular config scope."""
- if not scope in _scopes:
- raise UnknownConfigurationScopeError(scope)
- return _scopes[scope]
+def get_compilers_config(arch=None):
+ """Get the compiler configuration from config files for the given
+ architecture. Strips off the architecture component of the
+ configuration"""
+ global _compiler_by_arch
+ if not arch:
+ arch = spack.architecture.sys_type()
+ if arch in _compiler_by_arch:
+ return _compiler_by_arch[arch]
-
-def _parse_key(key):
- """Return the section, name, and option the field describes.
- Values are returned in a 3-tuple.
-
- e.g.:
- The field name ``compiler.gcc@4.7.3.cc`` refers to the 'cc' key
- in a section that looks like this:
-
- [compiler "gcc@4.7.3"]
- cc = /usr/local/bin/gcc
-
- * The section is ``compiler``
- * The name is ``gcc@4.7.3``
- * The key is ``cc``
- """
- match = re.search(_field_regex, key)
- if match:
- return match.groups()
+ cc_config = get_config('compilers')
+ if arch in cc_config and 'all' in cc_config:
+ arch_compiler = dict(cc_config[arch])
+ _compiler_by_arch[arch] = _merge_dict(arch_compiler, cc_config['all'])
+ elif arch in cc_config:
+ _compiler_by_arch[arch] = cc_config[arch]
+ elif 'all' in cc_config:
+ _compiler_by_arch[arch] = cc_config['all']
else:
- raise InvalidSectionNameError(key)
+ _compiler_by_arch[arch] = {}
+ return _compiler_by_arch[arch]
-def _make_section_name(section, name):
- if not name:
- return section
- return '%s "%s"' % (section, name)
+def get_mirror_config():
+ """Get the mirror configuration from config files"""
+ return get_config('mirrors')
-def _autokey(fun):
- """Allow a function to be called with a string key like
- 'compiler.gcc.cc', or with the section, name, and option
- separated. Function should take at least three args, e.g.:
-
- fun(self, section, name, option, [...])
-
- This will allow the function above to be called normally or
- with a string key, e.g.:
-
- fun(self, key, [...])
- """
- argspec = inspect.getargspec(fun)
- fun_nargs = len(argspec[0])
-
- def string_key_func(*args):
- nargs = len(args)
- if nargs == fun_nargs - 2:
- section, name, option = _parse_key(args[1])
- return fun(args[0], section, name, option, *args[2:])
-
- elif nargs == fun_nargs:
- return fun(*args)
-
- else:
- raise TypeError(
- "%s takes %d or %d args (found %d)."
- % (fun.__name__, fun_nargs - 2, fun_nargs, len(args)))
- return string_key_func
+def get_config_scope_dirname(scope):
+ """For a scope return the config directory"""
+ global config_scopes
+ for s,p in config_scopes:
+ if s == scope:
+ return p
+ tty.die("Unknown scope %s. Valid options are %s" %
+ (scope, ", ".join([s for s,p in config_scopes])))
-
-class SpackConfigParser(cp.RawConfigParser):
- """Slightly modified from Python's raw config file parser to accept
- leading whitespace and preserve comments.
- """
- # Slightly modify Python option expressions to allow leading whitespace
- OPTCRE = re.compile(r'\s*' + cp.RawConfigParser.OPTCRE.pattern)
-
- def __init__(self, file_or_files):
- cp.RawConfigParser.__init__(self, dict_type=OrderedDict)
-
- if isinstance(file_or_files, basestring):
- self.read([file_or_files])
- self.filename = file_or_files
-
- else:
- self.read(file_or_files)
- self.filename = None
+def get_config_scope_filename(scope, category_name):
+ """For some scope and category, get the name of the configuration file"""
+ if not category_name in _config_sections:
+ tty.die("Unknown config category %s. Valid options are: %s" %
+ (category_name, ", ".join([s for s in _config_sections])))
+ return os.path.join(get_config_scope_dirname(scope), _config_sections[category_name].filename)
- @_autokey
- def set_value(self, section, name, option, value):
- """Set the value for a key. If the key is in a section or named
- section that does not yet exist, add that section.
- """
- sn = _make_section_name(section, name)
- if not self.has_section(sn):
- self.add_section(sn)
-
- # Allow valueless config options to be set like this:
- # spack config set mirror https://foo.bar.com
- #
- # Instead of this, which parses incorrectly:
- # spack config set mirror.https://foo.bar.com
- #
- if option is None:
- option = value
- value = None
-
- self.set(sn, option, value)
-
-
- @_autokey
- def get_value(self, section, name, option):
- """Get the value for a key. Raises NoOptionError or NoSectionError if
- the key is not present."""
- sn = _make_section_name(section, name)
+def add_to_config(category_name, addition_dict, scope=None):
+ """Merge a new dict into a configuration tree and write the new
+ configuration to disk"""
+ global _read_config_file_result
+ get_config(category_name)
+ category = _config_sections[category_name]
+ #If scope is specified, use it. Otherwise use the last config scope that
+ #we successfully parsed data from.
+ file = None
+ path = None
+ if not scope and not category.files_read_from:
+ scope = 'user'
+ if scope:
try:
- if not option:
- # TODO: format this better
- return self.items(sn)
+ dir = get_config_scope_dirname(scope)
+ if not os.path.exists(dir):
+ mkdirp(dir)
+ path = os.path.join(dir, category.filename)
+ file = open(path, 'w')
+ except exceptions.IOError, e:
+ pass
+ else:
+ for p in category.files_read_from:
+ try:
+ file = open(p, 'w')
+ except exceptions.IOError, e:
+ pass
+ if file:
+ path = p
+ break;
+ if not file:
+ tty.die('Unable to write to config file %s' % path)
- return self.get(sn, option)
+ #Merge the new information into the existing file info, then write to disk
+ new_dict = _read_config_file_result[path]
+ if new_dict and category_name in new_dict:
+ new_dict = new_dict[category_name]
+ new_dict = _merge_dicts(new_dict, addition_dict)
+ new_dict = { category_name : new_dict }
+ _read_config_file_result[path] = new_dict
+ yaml.dump(new_dict, stream=file, default_flow_style=False)
+ file.close()
- # Wrap ConfigParser exceptions in SpackExceptions
- except cp.NoOptionError, e: raise NoOptionError(e)
- except cp.NoSectionError, e: raise NoSectionError(e)
- except cp.Error, e: raise ConfigParserError(e)
+ #Merge the new information into the cached results
+ category.result_dict = _merge_dicts(category.result_dict, addition_dict)
- @_autokey
- def has_value(self, section, name, option):
- """Return whether the configuration file has a value for a
- particular key."""
- sn = _make_section_name(section, name)
- return self.has_option(sn, option)
+def add_to_mirror_config(addition_dict, scope=None):
+ """Add mirrors to the configuration files"""
+ add_to_config('mirrors', addition_dict, scope)
- def has_named_section(self, section, name):
- sn = _make_section_name(section, name)
- return self.has_section(sn)
+def add_to_compiler_config(addition_dict, scope=None, arch=None):
+ """Add compilerss to the configuration files"""
+ if not arch:
+ arch = spack.architecture.sys_type()
+ add_to_config('compilers', { arch : addition_dict }, scope)
+ clear_config_caches()
- def remove_named_section(self, section, name):
- sn = _make_section_name(section, name)
- self.remove_section(sn)
+def remove_from_config(category_name, key_to_rm, scope=None):
+ """Remove a configuration key and write a new configuration to disk"""
+ global config_scopes
+ get_config(category_name)
+ scopes_to_rm_from = [scope] if scope else [s for s,p in config_scopes]
+ category = _config_sections[category_name]
+
+ rmd_something = False
+ for s in scopes_to_rm_from:
+ path = get_config_scope_filename(scope, category_name)
+ result = _read_config_file(path)
+ if not result:
+ continue
+ if not key_to_rm in result[category_name]:
+ continue
+ with closing(open(path, 'w')) as f:
+ result[category_name].pop(key_to_rm, None)
+ yaml.dump(result, stream=f, default_flow_style=False)
+ category.result_dict.pop(key_to_rm, None)
+ rmd_something = True
+ return rmd_something
- def get_section_names(self, sectype):
- """Get all named sections with the specified type.
- A named section looks like this:
+"""Print a configuration to stdout"""
+def print_category(category_name):
+ if not category_name in _config_sections:
+ tty.die("Unknown config category %s. Valid options are: %s" %
+ (category_name, ", ".join([s for s in _config_sections])))
+ yaml.dump(get_config(category_name), stream=sys.stdout, default_flow_style=False)
- [compiler "gcc@4.7"]
-
- Names of sections are returned as a list, e.g.:
-
- ['gcc@4.7', 'intel@12.3', 'pgi@4.2']
-
- You can get items in the sections like this:
- """
- sections = []
- for secname in self.sections():
- match = re.match(_named_section_re, secname)
- if match:
- t, name = match.groups()
- if t == sectype:
- sections.append(name)
- return sections
-
-
- def write(self, path_or_fp=None):
- """Write this configuration out to a file.
-
- If called with no arguments, this will write the
- configuration out to the file from which it was read. If
- this config was read from multiple files, e.g. site
- configuration and then user configuration, write will
- simply raise an error.
-
- If called with a path or file object, this will write the
- configuration out to the supplied path or file object.
- """
- if path_or_fp is None:
- if not self.filename:
- raise ReadOnlySpackConfigError()
- path_or_fp = self.filename
-
- if isinstance(path_or_fp, basestring):
- path_or_fp = open(path_or_fp, 'w')
-
- self._write(path_or_fp)
-
-
- def _read(self, fp, fpname):
- """This is a copy of Python 2.6's _read() method, with support for
- continuation lines removed."""
- cursect = None # None, or a dictionary
- optname = None
- comment = 0
- lineno = 0
- e = None # None, or an exception
- while True:
- line = fp.readline()
- if not line:
- break
- lineno = lineno + 1
- # comment or blank line?
- if ((line.strip() == '' or line[0] in '#;') or
- (line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR")):
- self._sections["comment-%d" % comment] = line
- comment += 1
- # a section header or option header?
- else:
- # is it a section header?
- mo = self.SECTCRE.match(line)
- if mo:
- sectname = mo.group('header')
- if sectname in self._sections:
- cursect = self._sections[sectname]
- elif sectname == cp.DEFAULTSECT:
- cursect = self._defaults
- else:
- cursect = self._dict()
- cursect['__name__'] = sectname
- self._sections[sectname] = cursect
- # So sections can't start with a continuation line
- optname = None
- # no section header in the file?
- elif cursect is None:
- raise cp.MissingSectionHeaderError(fpname, lineno, line)
- # an option line?
- else:
- mo = self.OPTCRE.match(line)
- if mo:
- optname, vi, optval = mo.group('option', 'vi', 'value')
- if vi in ('=', ':') and ';' in optval:
- # ';' is a comment delimiter only if it follows
- # a spacing character
- pos = optval.find(';')
- if pos != -1 and optval[pos-1].isspace():
- optval = optval[:pos]
- optval = optval.strip()
- # allow empty values
- if optval == '""':
- optval = ''
- optname = self.optionxform(optname.rstrip())
- cursect[optname] = optval
- else:
- # a non-fatal parsing error occurred. set up the
- # exception but keep going. the exception will be
- # raised at the end of the file and will contain a
- # list of all bogus lines
- if not e:
- e = cp.ParsingError(fpname)
- e.append(lineno, repr(line))
- # if any parsing errors occurred, raise an exception
- if e:
- raise e
-
-
-
-
- def _write(self, fp):
- """Write an .ini-format representation of the configuration state.
-
- This is taken from the default Python 2.6 source. It writes 4
- spaces at the beginning of lines instead of no leading space.
- """
- if self._defaults:
- fp.write("[%s]\n" % cp.DEFAULTSECT)
- for (key, value) in self._defaults.items():
- fp.write(" %s = %s\n" % (key, str(value).replace('\n', '\n\t')))
- fp.write("\n")
-
- for section in self._sections:
- # Handles comments and blank lines.
- if isinstance(self._sections[section], basestring):
- fp.write(self._sections[section])
- continue
-
- else:
- # Allow leading whitespace
- fp.write("[%s]\n" % section)
- for (key, value) in self._sections[section].items():
- if key != "__name__":
- fp.write(" %s = %s\n" %
- (key, str(value).replace('\n', '\n\t')))
-
-
-class SpackConfigurationError(spack.error.SpackError):
- def __init__(self, *args):
- super(SpackConfigurationError, self).__init__(*args)
-
-
-class InvalidConfigurationScopeError(SpackConfigurationError):
- def __init__(self, scope):
- super(InvalidConfigurationScopeError, self).__init__(
- "Invalid configuration scope: '%s'" % scope,
- "Options are: %s" % ", ".join(*_scopes.values()))
-
-
-class InvalidSectionNameError(SpackConfigurationError):
- """Raised when the name for a section is invalid."""
- def __init__(self, name):
- super(InvalidSectionNameError, self).__init__(
- "Invalid section specifier: '%s'" % name)
-
-
-class ReadOnlySpackConfigError(SpackConfigurationError):
- """Raised when user attempts to write to a config read from multiple files."""
- def __init__(self):
- super(ReadOnlySpackConfigError, self).__init__(
- "Can only write to a single-file SpackConfigParser")
-
-
-class ConfigParserError(SpackConfigurationError):
- """Wrapper for the Python ConfigParser's errors"""
- def __init__(self, error):
- super(ConfigParserError, self).__init__(str(error))
- self.error = error
-
-
-class NoOptionError(ConfigParserError):
- """Wrapper for ConfigParser NoOptionError"""
- def __init__(self, error):
- super(NoOptionError, self).__init__(error)
-
-
-class NoSectionError(ConfigParserError):
- """Wrapper for ConfigParser NoOptionError"""
- def __init__(self, error):
- super(NoSectionError, self).__init__(error)
diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py
index d451743508..008c5f0429 100644
--- a/lib/spack/spack/stage.py
+++ b/lib/spack/spack/stage.py
@@ -344,13 +344,9 @@ def destroy(self):
def _get_mirrors():
"""Get mirrors from spack configuration."""
- config = spack.config.get_config()
+ config = spack.config.get_mirror_config()
+ return [val for name, val in config.iteritems()]
- mirrors = []
- sec_names = config.get_section_names('mirror')
- for name in sec_names:
- mirrors.append(config.get_value('mirror', name, 'url'))
- return mirrors
def ensure_access(file=spack.stage_path):
diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py
index c676e9a35b..790b22f3b0 100644
--- a/lib/spack/spack/test/config.py
+++ b/lib/spack/spack/test/config.py
@@ -26,44 +26,49 @@
import shutil
import os
from tempfile import mkdtemp
+import spack
+from spack.packages import PackageDB
+from spack.test.mock_packages_test import *
-from spack.config import *
+class ConfigTest(MockPackagesTest):
+ def setUp(self):
+ self.initmock()
+ self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-')
+ spack.config.config_scopes = [('test_low_priority', os.path.join(self.tmp_dir, 'low')),
+ ('test_high_priority', os.path.join(self.tmp_dir, 'high'))]
-class ConfigTest(unittest.TestCase):
+ def tearDown(self):
+ self.cleanmock()
+ shutil.rmtree(self.tmp_dir, True)
- @classmethod
- def setUp(cls):
- cls.tmp_dir = mkdtemp('.tmp', 'spack-config-test-')
-
-
- @classmethod
- def tearDown(cls):
- shutil.rmtree(cls.tmp_dir, True)
-
-
- def get_path(self):
- return os.path.join(ConfigTest.tmp_dir, "spackconfig")
+ def check_config(self, comps):
+ config = spack.config.get_compilers_config()
+ compiler_list = ['cc', 'cxx', 'f77', 'f90']
+ for key in comps:
+ for c in compiler_list:
+ if comps[key][c] == '/bad':
+ continue
+ self.assertEqual(comps[key][c], config[key][c])
def test_write_key(self):
- config = SpackConfigParser(self.get_path())
- config.set_value('compiler.cc', 'a')
- config.set_value('compiler.cxx', 'b')
- config.set_value('compiler', 'gcc@4.7.3', 'cc', 'c')
- config.set_value('compiler', 'gcc@4.7.3', 'cxx', 'd')
- config.write()
+ a_comps = {"gcc@4.7.3" : { "cc" : "/gcc473", "cxx" : "/g++473", "f77" : None, "f90" : None },
+ "gcc@4.5.0" : { "cc" : "/gcc450", "cxx" : "/g++450", "f77" : "/gfortran", "f90" : "/gfortran" },
+ "clang@3.3" : { "cc" : "/bad", "cxx" : "/bad", "f77" : "/bad", "f90" : "/bad" }}
- config = SpackConfigParser(self.get_path())
+ b_comps = {"icc@10.0" : { "cc" : "/icc100", "cxx" : "/icc100", "f77" : None, "f90" : None },
+ "icc@11.1" : { "cc" : "/icc111", "cxx" : "/icp111", "f77" : "/ifort", "f90" : "/ifort" },
+ "clang@3.3" : { "cc" : "/clang", "cxx" : "/clang++", "f77" : None, "f90" : None}}
- self.assertEqual(config.get_value('compiler.cc'), 'a')
- self.assertEqual(config.get_value('compiler.cxx'), 'b')
- self.assertEqual(config.get_value('compiler', 'gcc@4.7.3', 'cc'), 'c')
- self.assertEqual(config.get_value('compiler', 'gcc@4.7.3', 'cxx'), 'd')
+ spack.config.add_to_compiler_config(a_comps, 'test_low_priority')
+ spack.config.add_to_compiler_config(b_comps, 'test_high_priority')
- self.assertEqual(config.get_value('compiler', None, 'cc'), 'a')
- self.assertEqual(config.get_value('compiler', None, 'cxx'), 'b')
- self.assertEqual(config.get_value('compiler.gcc@4.7.3.cc'), 'c')
- self.assertEqual(config.get_value('compiler.gcc@4.7.3.cxx'), 'd')
+ self.check_config(a_comps)
+ self.check_config(b_comps)
+
+ spack.config.clear_config_caches()
+
+ self.check_config(a_comps)
+ self.check_config(b_comps)
- self.assertRaises(NoOptionError, config.get_value, 'compiler', None, 'fc')
diff --git a/lib/spack/spack/test/mock_packages_test.py b/lib/spack/spack/test/mock_packages_test.py
index 09fb9ebe30..00f81114af 100644
--- a/lib/spack/spack/test/mock_packages_test.py
+++ b/lib/spack/spack/test/mock_packages_test.py
@@ -31,7 +31,7 @@
def set_pkg_dep(pkg, spec):
- """Alters dependence information for a pacakge.
+ """Alters dependence information for a package.
Use this to mock up constraints.
"""
spec = Spec(spec)
@@ -39,21 +39,32 @@ def set_pkg_dep(pkg, spec):
class MockPackagesTest(unittest.TestCase):
- def setUp(self):
+ def initmock(self):
# Use the mock packages database for these tests. This allows
# us to set up contrived packages that don't interfere with
# real ones.
self.real_db = spack.db
spack.db = PackageDB(spack.mock_packages_path)
- self.real_scopes = spack.config._scopes
- spack.config._scopes = {
- 'site' : spack.mock_site_config,
- 'user' : spack.mock_user_config }
+ spack.config.clear_config_caches()
+ self.real_scopes = spack.config.config_scopes
+ spack.config.config_scopes = [
+ ('site', spack.mock_site_config),
+ ('user', spack.mock_user_config)]
+
+
+ def cleanmock(self):
+ """Restore the real packages path after any test."""
+ spack.db = self.real_db
+ spack.config.config_scopes = self.real_scopes
+ spack.config.clear_config_caches()
+
+
+ def setUp(self):
+ self.initmock()
def tearDown(self):
- """Restore the real packages path after any test."""
- spack.db = self.real_db
- spack.config._scopes = self.real_scopes
+ self.cleanmock()
+
diff --git a/var/spack/mock_configs/site_spackconfig b/var/spack/mock_configs/site_spackconfig
deleted file mode 100644
index 1358720362..0000000000
--- a/var/spack/mock_configs/site_spackconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-[compiler "gcc@4.5.0"]
- cc = /path/to/gcc
- cxx = /path/to/g++
- f77 = /path/to/gfortran
- fc = /path/to/gfortran
-
-[compiler "clang@3.3"]
- cc = /path/to/clang
- cxx = /path/to/clang++
- f77 = None
- fc = None
-
diff --git a/var/spack/mock_configs/site_spackconfig/compilers.yaml b/var/spack/mock_configs/site_spackconfig/compilers.yaml
new file mode 100644
index 0000000000..0a2dc893e2
--- /dev/null
+++ b/var/spack/mock_configs/site_spackconfig/compilers.yaml
@@ -0,0 +1,12 @@
+compilers:
+ all:
+ clang@3.3:
+ cc: /path/to/clang
+ cxx: /path/to/clang++
+ f77: None
+ fc: None
+ gcc@4.5.0:
+ cc: /path/to/gcc
+ cxx: /path/to/g++
+ f77: /path/to/gfortran
+ fc: /path/to/gfortran
diff --git a/var/spack/mock_configs/user_spackconfig b/var/spack/mock_configs/user_spackconfig
deleted file mode 100644
index e69de29bb2..0000000000