diff --git a/docker/cacerts/README.md b/docker/cacerts/README.md
new file mode 100644
index 0000000..ceae86e
--- /dev/null
+++ b/docker/cacerts/README.md
@@ -0,0 +1,13 @@
+This directory contains non-standard CA certificates needed to build the docker
+images.
+
+Failures building the Docker containers defined in ../ due to SSL certificate
+verification errors may be a consequence of your local network's firewall. In
+particular, the firewall may be substituting external site certificates with
+its own signed by a non-standard CA certficate (chain). If so, you can place
+the necessary certificates into this directory; they will be passed into the
+containers, allowing them to safely connect to those external sites.
+
+Be sure the certificates are in PEM format and include a .crt file extension.
+
+Do not remove this README file; doing so may cause a Docker build faiure.
diff --git a/docker/dockbuild.sh b/docker/dockbuild.sh
index a1b34b5..0793f22 100755
--- a/docker/dockbuild.sh
+++ b/docker/dockbuild.sh
@@ -33,6 +33,11 @@ setup_build
log_intro # record start of build into log
+# install CA certs into containers that can use them
+if { echo $BUILD_IMAGES | grep -qs pymongo; }; then
+ cp_ca_certs_to pymongo
+fi
+
for container in $BUILD_IMAGES; do
echo '+ ' docker build $BUILD_OPTS -t $PACKAGE_NAME/$container $container | logit
docker build $BUILD_OPTS -t $PACKAGE_NAME/$container $container 2>&1 | logit
diff --git a/docker/ejsonschema/Dockerfile b/docker/ejsonschema/Dockerfile
index 05ff5c1..d82179b 100644
--- a/docker/ejsonschema/Dockerfile
+++ b/docker/ejsonschema/Dockerfile
@@ -9,9 +9,9 @@ RUN PYTHON=python3.8 uwsgi --build-plugin "/usr/src/uwsgi/plugins/python python3
RUN update-alternatives --install /usr/lib/uwsgi/plugins/python3_plugin.so \
python_plugin.so /usr/lib/uwsgi/plugins/python38_plugin.so 1
-RUN python -m pip install setuptools --upgrade
+RUN python -m pip install "setuptools<66.0.0"
RUN python -m pip install json-spec jsonschema==2.4.0 requests \
- pytest==4.6.5 filelock crossrefapi pyyaml
+ pytest==4.6.5 filelock crossrefapi pyyaml jsonpath_ng
RUN python -m pip install --no-dependencies jsonmerge==1.3.0
WORKDIR /root
diff --git a/docker/jqfromsrc/Dockerfile b/docker/jqfromsrc/Dockerfile
index 5b2107e..258e41d 100644
--- a/docker/jqfromsrc/Dockerfile
+++ b/docker/jqfromsrc/Dockerfile
@@ -2,7 +2,7 @@ From oar-metadata/pymongo
RUN apt-get update && \
apt-get install -y libonig-dev curl build-essential libtool zip \
- unzip autoconf git
+ unzip autoconf git bison
RUN pip install pipenv
WORKDIR /root
diff --git a/docker/pymongo/Dockerfile b/docker/pymongo/Dockerfile
index 57b77c6..55d269a 100644
--- a/docker/pymongo/Dockerfile
+++ b/docker/pymongo/Dockerfile
@@ -11,6 +11,11 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.8 1; \
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 1; \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1
RUN locale-gen en_US.UTF-8
+
+COPY cacerts/README.md cacerts/*.crt /usr/local/share/ca-certificates/
+RUN update-ca-certificates
+ENV REQUESTS_CA_BUNDLE /etc/ssl/certs/ca-certificates.crt
+
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
diff --git a/docker/pymongo/cacerts/README.md b/docker/pymongo/cacerts/README.md
new file mode 100644
index 0000000..464b529
--- /dev/null
+++ b/docker/pymongo/cacerts/README.md
@@ -0,0 +1,12 @@
+This directory contains non-standard CA certificates needed to build the docker
+images.
+
+Failures building the Docker containers defined in ../ due to SSL certificate
+verification errors may be a consequence of your local network's firewall. In
+particular, the firewall may be substituting external site certificates with
+its own signed by a non-standard CA certficate (chain). If so, you can place
+the necessary certificates into this directory; they will be passed into the
+containers, allowing them to safely connect to those external sites.
+
+Be sure the certificates are in PEM format and include a .crt file extension.
+
diff --git a/jq/nerdm2datacite.jq b/jq/nerdm2datacite.jq
index 9ed6614..0c1b474 100644
--- a/jq/nerdm2datacite.jq
+++ b/jq/nerdm2datacite.jq
@@ -336,20 +336,25 @@ def make_ispartof_rel:
{
relatedIdentifier: .["@id"] | todoiurl,
relatedIdentifierType: "DOI",
- relationType: "isPartOf"
+ relationType: "IsPartOf"
}
- elif (.location) then
- {
- relatedIdentifier: (.location | todoiurl),
- relationType: "isPartOf"
- } |
- if (.relatedIdentifier | test("^https?://(dx.)?doi.org/")) then
- (.relatedIdentifierType = "DOI")
+ else
+ if ((.location|not) and .["@id"] and (.["@id"] | contains("ark:/88434/"))) then
+ .location = "https://data.nist.gov/od/id/" + .["@id"]
+ end |
+ if (.location) then
+ {
+ relatedIdentifier: (.location | todoiurl),
+ relationType: "IsPartOf"
+ } |
+ if (.relatedIdentifier | test("^https?://(dx.)?doi.org/")) then
+ (.relatedIdentifierType = "DOI")
+ else
+ (.relatedIdentifierType = "URL")
+ end
else
- (.relatedIdentifierType = "URL")
+ empty
end
- else
- empty
end
;
@@ -442,7 +447,7 @@ def resource2datacite:
],
relatedIdentifiers: [
(
- (.isPartOf | make_ispartof_rel),
+ (.isPartOf | if (.) then (.[] | make_ispartof_rel) else empty end),
(.references | if (.) then (.[] | make_ref_rel) else empty end),
(.isReplacedBy | make_obsoletedby_rel)
)
diff --git a/jq/tests/test_nerdm2datacite.jqt b/jq/tests/test_nerdm2datacite.jqt
index acda4b3..be81bc5 100644
--- a/jq/tests/test_nerdm2datacite.jqt
+++ b/jq/tests/test_nerdm2datacite.jqt
@@ -300,34 +300,41 @@ include "nerdm2datacite"; make_formats
#
include "nerdm2datacite"; make_ispartof_rel
{ "@id": "doi:10.18434/spd0fjpek351", "location": "http://dx.doi.org/10.18434/spd0fjpek351", "title": "Hello!"}
-{ "relatedIdentifier": "https://doi.org/10.18434/spd0fjpek351", "relatedIdentifierType": "DOI", "relationType": "isPartOf" }
+{ "relatedIdentifier": "https://doi.org/10.18434/spd0fjpek351", "relatedIdentifierType": "DOI", "relationType": "IsPartOf" }
#--------------
# testing make_ispartof_rel
#
include "nerdm2datacite"; make_ispartof_rel
{ "@id": "http://doi.org/10.18434/spd0fjpek351", "location": "http://dx.doi.org/10.18434/spd0fjpek351", "title": "Hello!"}
-{ "relatedIdentifier": "https://doi.org/10.18434/spd0fjpek351", "relatedIdentifierType": "DOI", "relationType": "isPartOf" }
+{ "relatedIdentifier": "https://doi.org/10.18434/spd0fjpek351", "relatedIdentifierType": "DOI", "relationType": "IsPartOf" }
#--------------
# testing make_ispartof_rel
#
include "nerdm2datacite"; make_ispartof_rel
{ "location": "http://dx.doi.org/10.18434/spd0fjpek351", "title": "Hello!"}
-{ "relatedIdentifier": "https://doi.org/10.18434/spd0fjpek351", "relatedIdentifierType": "DOI", "relationType": "isPartOf" }
+{ "relatedIdentifier": "https://doi.org/10.18434/spd0fjpek351", "relatedIdentifierType": "DOI", "relationType": "IsPartOf" }
#--------------
# testing make_ispartof_rel
#
include "nerdm2datacite"; make_ispartof_rel
{ "@id": "ark:/88434/jres0-1", "location": "http://jres.nist.gov/10.18434/spd0fjpek351", "title": "Hello!"}
-{ "relatedIdentifier": "http://jres.nist.gov/10.18434/spd0fjpek351", "relatedIdentifierType": "URL", "relationType": "isPartOf" }
+{ "relatedIdentifier": "http://jres.nist.gov/10.18434/spd0fjpek351", "relatedIdentifierType": "URL", "relationType": "IsPartOf" }
#--------------
-# testing make_ispartof_rel: should return an empty result
+# testing make_ispartof_rel: should a PDR URL
#
include "nerdm2datacite"; make_ispartof_rel
{ "@id": "ark:/88434/jres0-1", "title": "Hello!"}
+{ "relatedIdentifier": "https://data.nist.gov/od/id/ark:/88434/jres0-1", "relatedIdentifierType": "URL", "relationType": "IsPartOf" }
+
+#--------------
+# testing make_ispartof_rel: should return an empty result (because the ID is unrecognized)
+#
+include "nerdm2datacite"; make_ispartof_rel
+{ "@id": "ark:/88888/jres0-1", "title": "Hello!"}
# Line above must be empty
diff --git a/model/README-NERDm.md b/model/README-NERDm.md
new file mode 100644
index 0000000..b63bec1
--- /dev/null
+++ b/model/README-NERDm.md
@@ -0,0 +1,135 @@
+# The NIST Extensible Resource Data Model (NERDm): JSON schemas for rich description of digital resources
+
+## Overview
+
+The NIST Extensible Resource Data Model (NERDm) is set of schemas for encoding in JSON format metadata
+that describe digital resources. The variety of digital resources it can describe includes not only
+digital data sets and collections, but also software, digital services, web sites and portals, and
+digital twins. It was created to serve as the internal metadata format used by the NIST Public Data
+Repository and Science Portal to drive rich presentations on the web and to enable discovery; however, it
+was also designed to enable programmatic access to resources and their metadata by external users.
+Interoperability was also a key design aim: the schemas are defined using the JSON Schema standard [1, 2,
+3], metadata are encoded as JSON-LD [4, 5], and their semantics are tied to community ontologies, with an
+emphasis on DCAT [6] and the US federal Project Open Data (POD) [7] models. Finally, extensibility is also
+central to its design: the schemas are composed of a central core schema and various extension schemas.
+New extensions to support richer metadata concepts can be added over time without breaking existing
+applications.
+
+### About Validation
+
+Validation is central to NERDm's extensibility model. Consuming applications should be able to choose
+which metadata extensions they care to support and ignore terms and extensions they don't support.
+Furthermore, they should not fail when a NERDm document leverages extensions they don't recognize, even
+when on-the-fly validation is required. To support this flexibility, the NERDm framework allows
+documents to declare what extensions are being used and where. We have developed an optional extension
+to the standard JSON Schema validation (see ejsonschema below) to support flexible validation: while a
+standard JSON Schema validater can validate a NERDm document against the NERDm core schema, our extension
+will validate a NERDm document against any recognized extensions and ignore those that are not
+recognized.
+
+### Data Model Summary
+
+The NERDm data model is based around the concept of resource, semantically equivalent to a schema.org
+Resource, and as in schema.org, there can be different types of resources, such as data sets and
+software. A NERDm document indicates what types the resource qualifies as via the JSON-LD `@type`
+property. All NERDm Resources are described by metadata terms from the core NERDm schema; however,
+different resource types can by describe by additional metadata properties (often drawing on particular
+NERDm extension schemas). A Resource can contain Components of various types (including
+DCAT-defined Distributions); these can include specifically downloadable data files, hierachical data
+collecitons, links to web sites (like software repositories), software tools, or other NERDm Resources.
+Through the NERDm extension system, domain-specific metadata can be included at either the resource or
+component level. The direct semantic and syntactic connections to the DCAT, POD, and schema.org schemas
+is intended to ensure unambiguous conversion of NERDm documents into those schemas.
+
+### NERDm Schemas
+
+ * [nerdm-schema.json](nerdm-schema.json) -- the Core NERDm schema that includes definitions of the
+ base `Resource` and `Component` types
+ * [nerdm-pub-schema.json](nerdm-pub-schema.json) -- an extension schema that define different kinds
+ of resource publications.
+ * [nerdm-rls-schema.json](nerdm-rls-schema.json) -- an extension schema that defines types that help
+ describe different versions or releases of resources.
+ * [nerdm-bib-schema.json](nerdm-bib-schema.json) -- an extension schema that defines types for richer
+ descriptions of bibliographic references. In particular, this enables closer interoperability
+ with DataCite metadata.
+ * [nerdm-agg-schema.json](nerdm-agg-schema.json) -- an extension schema that defines different types of
+ data collections or aggregations that are important to the NIST Public Data Repository.
+ * [nerdm-exp-schema.json](nerdm-exp-schema.json) -- an extension schema that defines types for
+ describing experimental data and their context.
+ * [nerdm-sip-schema.json](nerdm-sip-schema.json) -- an extension schema used by the NIST Public Data
+ Repository to describe an Submission Information Package (SIP).
+
+
+### Status and Future
+
+As of this writing, the Core NERDm schema and its framework stands at version 0.7 and is compatible with
+the "draft-04" version of JSON Schema. Version 1.0 is projected to be released in 2023. In that
+release, the NERDm schemas will be updated to the "draft2020" version of JSON Schema [2, 3]. Other
+improvements will include stronger support for RDF and the Linked Data Platform through its support of
+JSON-LD [5].
+
+## Key Links
+
+
+ - The NERDm JSON Schema Files:
+
+ https://github.com/usnistgov/oar-metadata/tree/integration/model
+ - This directory contains the latest (and previous) versions of the core NERDm Schema and various
+ extensions. All files with names of the form, "*-schema*.json" are JSON Schema definition files; those
+ that do not include a version in the file name represent the latest versions. The latest version of the
+ core schema is called `nerdm-schema.json`, and schemas with names of the form,
+ "nerdm-[ext]_-schema.json", contain extension schemas. All NERDm schemas here are documented
+ internally, including semantic definitions of all terms.
+
+ - ejsonschema: Software for Validating JSON supporting extension schemas
+
+ https://github.com/usnistgov/ejsonschema
+ - This software repository provides Python software that extends the community software library,
+ python-jsonschema
+ (https://github.com/python-jsonschema/jsonschema)
+ to support NERDm's extension framework. Use the scripts/validate script to validate NERDm
+ documents on the command line. (Type
validate -h
for more information.)
+
+ - Example NERDm Documents
+
+ https://github.com/usnistgov/oar-metadata/tree/integration/model/examples
+ - This folder contains example NERDm documents that illustrate the NERDm data model and use of
+ extension schemas. These all can be validated using the ejsonschema validate script.
+
+ - NERDm Support Software
+
+ https://github.com/usnistgov/oar-metadata
+ - This software repository includes a Python package,
nistoar.nerdm
, that aids in
+ creating and handling NERDm documents. In particular, it includes converters that convert NERDm
+ instances into other formats (like POD, schema.org, DataCite and DCAT). It can also transform NERDm
+ documents conforming to earlier versions of the schemas to that of the latest versions.
+
+
+## References
+
+[1] JSON Schema Website, URL: https://json-schema.org/
+
+[2] Galiegue, F., Zyp, K, and Court, G. (2013). JSON Schema: core definitions and terminology (draft04),
+IETF Internet-Draft, URL: https://datatracker.ietf.org/doc/html/draft-zyp-json-schema-04
+
+[3] Galiegue, F., Zyp, K, and Court, G. (2013). JSON Schema: interactive and no interactive validation
+ (draft04), IETF Internet-Draft, URL: https://datatracker.ietf.org/doc/html/draft-fge-json-schema-00
+
+[4] JSON-LD Website, URL: https://json-ld.org/
+
+[5] Sporny, M., Longley, D., Kellogg, G., Lanthaler, M., Champin, P., Lindstrom (2020) JSON-LD 1.1: A
+ JSON-based Serialization for Linked Data, W3C Recommendation 16 July 2020, URL:
+ https://www.w3.org/TR/json-ld/
+
+[6] Albertoni, R., Browning, D., Cox, S., Gonzalez Beltran, A., Perego, A, Winstanley, P. (2020) Data
+ Catalog Vocabulary (DCAT) - Version 2, W3C Recommendation 04 February 2020, URL:
+ https://www.w3.org/TR/vocab-dcat-2/
+
+[7] United States Government, DCAT-US Schema v1.1 (Project Open Data Metadata Schema), URL:
+ https://resources.data.gov/resources/dcat-us/
+
+[8] McBride, B. (2004). The Resource Description Framework (RDF) and its Vocabulary Description Language
+ RDFS. Handbook on Ontologies, 51-65. https://doi.org/10.1007/978-3-540-24750-0_3
+
+[9] Candan, K. S., Liu, H., & Suvarna, R. (2001). Resource description framework. ACM SIGKDD Explorations
+Newsletter, 3(1), 6-19. https://doi.org/10.1145/507533.507536
\ No newline at end of file
diff --git a/model/README.md b/model/README.md
new file mode 100644
index 0000000..b51b6bc
--- /dev/null
+++ b/model/README.md
@@ -0,0 +1,7 @@
+This folder contains files that define various kinds of data models supported by the oar-metadata software.
+
+For more infomation...
+
+ * ...about the NERDm Schema Framework, see [README-NERDm.md](README-NERDm.md)
+ * check out NERDm examples in the examples subfolder
+
diff --git a/oar-build/_dockbuild.sh b/oar-build/_dockbuild.sh
index f4ba894..88e41d6 100644
--- a/oar-build/_dockbuild.sh
+++ b/oar-build/_dockbuild.sh
@@ -60,6 +60,21 @@ function setup_build {
BUILD_OPTS=`collect_build_opts`
}
+function cp_ca_certs_to {
+ # assuming we are in the docker dir
+ [ \! -d cacerts ] || {
+ crts=`compgen -G 'cacerts/*.crt' || true`
+ [ -z "$crts" ] || {
+ echo "${prog}: installing CA certs from docker/cacerts"
+ for cont in $@; do
+ mkdir -p $cont/cacerts
+ echo '+' cp $crts cacerts/README.md $cont/cacerts
+ cp $crts cacerts/README.md $cont/cacerts
+ done
+ }
+ }
+}
+
function help {
helpfile=$OAR_BUILD_DIR/dockbuild_help.txt
[ -f "$OAR_DOCKER_DIR/dockbuild_help.txt" ] && \
diff --git a/python/nistoar/base/config.py b/python/nistoar/base/config.py
index 3221e98..6a6f7be 100644
--- a/python/nistoar/base/config.py
+++ b/python/nistoar/base/config.py
@@ -7,6 +7,8 @@
from collections.abc import Mapping
from urllib.parse import urlparse
+import jsonpath_ng as jp
+
from . import OARException
oar_home = None
@@ -476,3 +478,26 @@ def lookup_config_server(serverport):
"""
raise NotImplementedError()
+NO_VALUE=NotImplemented
+RAISE=NO_VALUE
+def hget_jp(obj: Mapping, path: str, default=None):
+ """
+ return the first value from within a hierarchical dictionary (e.g. JSON or config structure)
+ that corresponds to a given location path. The location path is JSONPath-compliant string
+ (https://goessner.net/articles/JsonPath/). This function is intended for use with paths that
+ uniquely locate data--i.e. resolve to only one value.
+ :param dict obj: the dictionary to search for a matching value.
+ :param str path: a string indicating the location of the value to return. This should be
+ a JSONPath-compliant string (where the initial "$." is optional)
+ :raises KeyError: if default is not provide (i.e. is RAISE) and the path does not resolve to
+ an existing location.
+ """
+ try:
+ return jp.parse(path).find(obj)[0].value
+ except IndexError:
+ if default is RAISE:
+ raise KeyError(path)
+ return default
+
+hget = hget_jp
+
diff --git a/python/nistoar/id/versions.py b/python/nistoar/id/versions.py
new file mode 100644
index 0000000..b8545af
--- /dev/null
+++ b/python/nistoar/id/versions.py
@@ -0,0 +1,234 @@
+"""
+Utilities for managing semantic version strings that will be assigned to OAR documents
+when they are released (e.g. published). This module implements the version convention
+used by the OAR framework.
+
+The centerpiece of this module is the :py:class:`Version` class which allow a version string to be
+compared for sorting or to be incremented.
+"""
+import re, typing
+
+__all__ = [ "Version", "OARVersion", "cmp_versions", "cmp_oar_versions" ]
+
+_ver_delim = re.compile(r"[\._]")
+_numeric_ver = re.compile(r"^\d+([\._]\d+)*")
+_proper_ver = re.compile(_numeric_ver.pattern+'$')
+
+class Version(object):
+ """
+ a version class that can facilitate comparisons
+ """
+
+ def _toint(self, field):
+ try:
+ return int(field)
+ except ValueError:
+ return field
+
+ def __init__(self, vers):
+ """
+ convert a version string to a Version instance
+ """
+ self._vs = vers
+ self.fields = [self._toint(n) for n in _ver_delim.split(self._vs)]
+
+ def __str__(self):
+ return self._vs
+
+ def __eq__(self, other):
+ if not isinstance(other, Version):
+ other = Version(other)
+ return self.fields == other.fields
+
+ def __lt__(self, other):
+ if not isinstance(other, Version):
+ other = Version(other)
+ return self.fields < other.fields
+
+ def __le__(self, other):
+ if not isinstance(other, Version):
+ other = Version(other)
+ return self < other or self == other
+
+ def __ge__(self, other):
+ return not (self < other)
+ def __gt__(self, other):
+ return not self.__le__(other)
+ def __ne__(self, other):
+ return not (self == other)
+
+ @classmethod
+ def is_proper_version(cls, vers: str) -> bool:
+ """
+ return true if the given version string is of the form M.M.M... where
+ each M is any non-negative number.
+ """
+ return _proper_ver.match(vers) is not None
+
+def cmp_versions(ver1: str, ver2: str) -> int:
+ """
+ compare two version strings for their order.
+ :return int: -1 if v1 < v2, 0 if v1 = v2, and +1 if v1 > v2
+ """
+ a = Version(ver1)
+ b = Version(ver2)
+ if a < b:
+ return -1
+ elif a == b:
+ return 0
+ return +1
+
+OARVersion = typing.NewType("OARVersion", Version)
+
+class OARVersion(Version):
+ """
+ a Version class that supports OAR document version label conventions.
+
+ The OAR document version conventions are:
+ * contains at least 3 numeric fields
+ * an increment in the third field (from the left) represents a
+ change in only the document's metadata or similar inconsequential
+ change.
+ * an increment in the second field represents a consequential change
+ which may include a change in the attached data products.
+ * an increment in the first field represenets a major shift in the
+ scope or aim of the document. Such an increment is usually a choice
+ of the document's authors (as it implies intent) as opposed to
+ some automatically applied based on specific technical criteria.
+ * a version whose last field is followed by a '+' (and additional
+ optional characters) indicate that the document is in draft form
+ and whose version to be applied at publication time has not yet been
+ set. The version to the left of the '+' is the version of the
+ previously published version that the draft is derived from.
+
+ This class provides support for the above conventions for incrementing the version.
+ If simple comparison is all that is needed, then the lighterweight :py:class:`Version`
+ should be used instead.
+ """
+
+ def __init__(self, vers):
+ self._sfx = ''
+ m = _numeric_ver.match(vers)
+ if m:
+ self._sfx = vers[m.end():]
+ vers = vers[:m.end()]
+ super(OARVersion, self).__init__(vers)
+
+ @property
+ def suffix(self):
+ return self._sfx
+
+ def is_draft(self):
+ """
+ return True if this version represents
+ """
+ return False if not self._sfx else self._sfx[0] == '+'
+
+ def __eq__(self, other):
+ if not isinstance(other, OARVersion):
+ other = OARVersion(str(other))
+
+ if self.suffix != other.suffix:
+ return False
+ return super().__eq__(other)
+
+ def __lt__(self, other):
+ if not isinstance(other, OARVersion):
+ other = OARVersion(str(other))
+
+ if self.fields == other.fields:
+ if other.suffix and self.is_draft() and not other.is_draft():
+ return True
+ elif self.suffix and not self.is_draft() and other.is_draft():
+ return False
+ return self.suffix < other.suffix
+
+ return self.fields < other.fields
+
+ def _set(self, *flds, suffix=''):
+ vers = '.'.join([str(f) for f in flds]) + suffix
+ new = OARVersion(vers)
+ self._vs = new._vs
+ self.fields = new.fields
+ self._sfx = new.suffix
+
+ def __str__(self):
+ return self._vs + self._sfx
+
+ def increment_field(self, pos: int) -> OARVersion:
+ """
+ increment the field of the version at a given position. If this version has a suffix
+ indicative of a draft (see :py:meth:`is_draft`), the suffix will be dropped.
+ :param int pos: the 0-origin position of the field to increment. The value can be negative to
+ change field positions relative to the end (i.e. -1 increments the last field).
+ If the ``pos`` indicates a position beyond the current number of fields, the
+ version fields will be padded with zeros up to that position before incrementing.
+ :returns: self, allowing for chaining with, say, :py:meth:`drop_suffix`.
+ :rtype: OARVersion
+ :raises IndexError: if th
+ """
+ if pos >= len(self.fields):
+ for i in range(len(self.fields), pos+1):
+ self.fields.append(0)
+ elif pos < -1*len(self.fields):
+ raise IndexError(pos)
+
+ self.fields[pos] += 1
+ for i in range(pos+1, len(self.fields)):
+ self.field[i] = 0
+ self._vs = '.'.join([str(f) for f in self.fields])
+ if self.is_draft():
+ self.drop_suffix()
+
+ return self
+
+ def trivial_incr(self) -> OARVersion:
+ """
+ increment the third field of the version representing a metadata or other trivial change
+ in the document it is assinged to.
+ :returns: self, allowing for chaining with, say, :py:meth:`drop_suffix`.
+ :rtype: OARVersion
+ """
+ return increment_level(2)
+
+ def minor_incr(self) -> OARVersion:
+ """
+ increment the third field of the version representing a data or other minor change
+ in the document it is assinged to.
+ :returns: self, allowing for chaining with, say, :py:meth:`drop_suffix`.
+ :rtype: OARVersion
+ """
+ return self.increment_field(1)
+
+ def major_incr(self) -> OARVersion:
+ """
+ increment the third field of the version representing a major change
+ in the document it is assinged to.
+ :returns: self, allowing for chaining with, say, :py:meth:`drop_suffix`.
+ :rtype: OARVersion
+ """
+ return self.increment_field(0)
+
+ def drop_suffix(self) -> OARVersion:
+ """
+ remove the suffix from this version
+ :returns: self, allowing for chaining with, say, :py:meth:`incrment_field`.
+ :rtype: OARVersion
+ """
+ self._sfx = ''
+ return self
+
+def cmp_oar_versions(ver1: str, ver2: str) -> int:
+ """
+ compare two version strings for their order using the OAR document version conventions
+ :return int: -1 if v1 < v2, 0 if v1 = v2, and +1 if v1 > v2
+ """
+ a = OARVersion(ver1)
+ b = OARVersion(ver2)
+ if a < b:
+ return -1
+ elif a == b:
+ return 0
+ return +1
+
+
diff --git a/python/nistoar/nerdm/utils.py b/python/nistoar/nerdm/utils.py
index c387615..deeae27 100644
--- a/python/nistoar/nerdm/utils.py
+++ b/python/nistoar/nerdm/utils.py
@@ -2,6 +2,11 @@
Utility functions and classes for interrogating and manipulating NERDm metadata objects
"""
import re
+import jsonpath_ng as jp
+from collections.abc import Mapping, Sequence
+from typing import Union, List
+
+from nistoar.base.config import hget
META_PREFIXES = "_$"
@@ -181,3 +186,82 @@ def cmp_versions(ver1, ver2):
return 0
return +1
+_doc_properties = "title description asOntology notes comments valueDocumentation equivalentTo".split()
+def declutter_schema(schema: Mapping, post2020: bool=False):
+ """
+ remove documentation nodes from a JSON Schema object in situ
+ """
+ for prop in _doc_properties:
+ if prop in schema:
+ del schema[prop]
+
+ if "properties" in schema:
+ for prop in schema['properties']:
+ declutter_schema(schema['properties'][prop])
+
+ deftag = "definitions" if not post2020 else "$defs"
+ if deftag in schema:
+ for defname in schema[deftag]:
+ declutter_schema(schema[deftag][defname])
+
+ for seq in "allOf anyOf oneOf".split():
+ if seq in schema:
+ for itm in schema[seq]:
+ declutter_schema(itm)
+
+
+def unrequire_props_in(schema: Mapping, locations: Union[str, List[str]], post2020: bool=False):
+ """
+ remove ``"required"`` fields at the specified locations from within the given JSON Schema.
+
+ The provided locations should point to schema definitions within the given schema dictionary.
+ This function will remove the ``"required"`` property within the located schema (if it exists)
+ as well as any found within ``"allOf"``, ``"anyOf"``, or ``"oneOf"`` properties.
+
+ :param dict schema: a dictionary representing a JSON Schema
+ :param str|list locations: slash-delimited paths to an internal schema that contains a required.
+ An example might be "definitions/Resource". An empty string indicates
+ the top-level processa
+ """
+ if isinstance(locations, str):
+ locations = [ locations ]
+
+ for loc in locations:
+ subsch = hget(schema, loc)
+ if subsch and isinstance(subsch, Mapping):
+ if "required" in subsch:
+ del subsch["required"]
+ for seq in "allOf anyOf oneOf".split():
+ if seq in subsch and isinstance(subsch[seq], Sequence):
+ for itm in subsch[seq]:
+ unrequire_props_in(itm, "$", post2020)
+
+
+def loosen_schema(schema: Mapping, directives: Mapping, opts=None):
+ """
+ apply the given loosening directive to the given JSON Schema. The directives is a
+ dictionary describes what to do with the following properties (the directives) supported:
+
+ ``derequire``
+ a list of type definitions within the schema from which the required property
+ should be removed (via :py:func:`~nistoar.nerdm.utils.unrequire_props_in`). Each
+ type name listed will be assumed to be an item under the "definitions" node in the
+ schema this directive is applied to.
+ ``dedocument``
+ a boolean indicating whether the documentation annotations should be removed from
+ the schema. If not set, the default is determined by opts.dedoc if opts is given or
+ True, otherwise.
+
+ :param dict schema: the schema document as a JSON Schema schema dictionary
+ :param dict directives: the dictionary of directives to apply
+ :param opt: an options object (containing scripts command-line options)
+ """
+ if directives.get("dedocument", True):
+ declutter_schema(schema)
+
+ p2020 = directives.get("post2020")
+ deftag = "$defs" if p2020 else "definitions"
+
+ dereqtps = [ deftag+'.'+t for t in directives.get("derequire", []) ]
+ unrequire_props_in(schema, dereqtps, p2020)
+
diff --git a/python/tests/nistoar/base/test_config.py b/python/tests/nistoar/base/test_config.py
index 4fd4d39..86d12eb 100644
--- a/python/tests/nistoar/base/test_config.py
+++ b/python/tests/nistoar/base/test_config.py
@@ -1,10 +1,14 @@
import os, sys, pdb, shutil, logging, json, re, importlib
import unittest as test
+from pathlib import Path
from nistoar.testing import *
import nistoar.base.config as config
-datadir = os.path.join(os.path.dirname(__file__), "data")
+testdir = Path(__file__).resolve().parents[0]
+datadir = str(testdir / "data")
+basedir = testdir.parents[3]
+schemadir = basedir / 'model'
tmpd = None
def setUpModule():
@@ -71,6 +75,29 @@ def test_merge_config(self):
self.assertEqual(out['zub'], 'dub')
self.assertEqual(out['tell'], {"a": 1})
+ def test_hget_jp(self):
+ with open(schemadir/'nerdm-schema.json') as fd:
+ schema = json.load(fd)
+
+ self.assertEqual(config.hget_jp(schema, "definitions.Resource.properties.title.title"), "Title")
+ self.assertEqual(config.hget_jp(schema, "definitions.ResourceReference.allOf[1].required"), ["title"])
+
+ self.assertIsNone(config.hget_jp(schema, "definitions.goober.title"))
+ self.assertEqual(config.hget_jp(schema, "definitions.goober.title", "Dr."), "Dr.")
+ with self.assertRaises(KeyError):
+ config.hget_jp(schema, "definitions.goober.title", config.RAISE)
+
+ with self.assertRaises(KeyError):
+ config.hget_jp(schema, "definitions.ResourceReference.allOf[23].required", config.RAISE)
+
+ # make sure results are not copies of the original
+ ressch = config.hget_jp(schema, "definitions.Resource")
+ self.assertIn("required", ressch)
+ del ressch['required']
+ with self.assertRaises(KeyError):
+ config.hget_jp(schema, "definitions.Resource.required", config.RAISE)
+
+
class TestLogConfig(test.TestCase):
def resetLogfile(self):
diff --git a/python/tests/nistoar/id/test_versions.py b/python/tests/nistoar/id/test_versions.py
new file mode 100644
index 0000000..43e9c04
--- /dev/null
+++ b/python/tests/nistoar/id/test_versions.py
@@ -0,0 +1,172 @@
+import os, sys, pdb, shutil, logging, json
+import unittest as test
+
+import nistoar.id.versions as util
+
+class TestVersion(test.TestCase):
+
+ def test_ctor(self):
+ ver = util.Version("3.3.5.0")
+ self.assertEqual(ver._vs, "3.3.5.0")
+ self.assertEqual(ver.fields, [3,3,5,0])
+
+ def testEQ(self):
+ ver = util.Version("3.3.0")
+ self.assertEqual(ver, util.Version("3.3.0"))
+ self.assertTrue(ver == "3.3.0")
+ self.assertFalse(ver == "3.3.1")
+ self.assertFalse(ver == "1.3")
+
+ def testNE(self):
+ ver = util.Version("3.3.0")
+ self.assertNotEqual(ver, util.Version("3.3.2"))
+ self.assertFalse(ver != "3.3.0")
+ self.assertTrue(ver != "3.3.1")
+ self.assertTrue(ver != "1.3")
+
+ def testGE(self):
+ ver = util.Version("3.3.0")
+ self.assertTrue(ver >= "3.2.0")
+ self.assertTrue(ver >= "3.3.0")
+ self.assertTrue(ver >= "1.3")
+
+ self.assertFalse(ver >= "5.3")
+ self.assertFalse(ver >= util.Version("5.3"))
+
+ def testGT(self):
+ ver = util.Version("3.3.0")
+ self.assertTrue(ver > "3.2.0")
+ self.assertTrue(ver > "1.3")
+
+ self.assertFalse(ver > "3.3.0")
+ self.assertFalse(ver >= "5.3")
+ self.assertFalse(ver >= util.Version("5.3"))
+
+ def testLE(self):
+ ver = util.Version("3.3.0")
+ self.assertTrue(ver <= "3.5.0")
+ self.assertTrue(ver <= "3.3.1")
+ self.assertTrue(ver <= "3.3.0")
+ self.assertTrue(ver <= "5.3")
+
+ self.assertFalse(ver <= "1.3")
+ self.assertFalse(ver <= util.Version("2.3"))
+
+ def testLT(self):
+ ver = util.Version("3.3.0")
+ self.assertTrue(ver < "3.5.0")
+ self.assertTrue(ver < "3.3.1")
+ self.assertTrue(ver < "5.3")
+
+ self.assertFalse(ver < "3.3.0")
+ self.assertFalse(ver < "1.3")
+ self.assertFalse(ver < util.Version("2.3"))
+
+ def testIsProper(self):
+ self.assertTrue(util.Version.is_proper_version("33"))
+ self.assertTrue(util.Version.is_proper_version("3.3"))
+ self.assertTrue(util.Version.is_proper_version("13_3_0"))
+ self.assertTrue(util.Version.is_proper_version("1.23_400.10"))
+
+ self.assertFalse(util.Version.is_proper_version("-33"))
+ self.assertFalse(util.Version.is_proper_version("3.3r23"))
+ self.assertFalse(util.Version.is_proper_version("13.3.0-1"))
+ self.assertFalse(util.Version.is_proper_version("dev"))
+
+ def test_sorted(self):
+ vers = "2.0.1 3.0 0.1.1 0 12.3 2.0.1.0".split()
+ expect = "0 0.1.1 2.0.1 2.0.1.0 3.0 12.3".split()
+ self.assertEqual(sorted(vers, key=util.Version), expect)
+
+ def test_cmp_versions(self):
+ self.assertEqual(util.cmp_versions("1.0.0", "1.0.2"), -1)
+ self.assertEqual(util.cmp_versions("1.0.1", "1.0.1"), 0)
+ self.assertEqual(util.cmp_versions("1.0.2", "1.0.1"), 1)
+ self.assertEqual(util.cmp_versions("1.0", "1.0.2"), -1)
+ self.assertEqual(util.cmp_versions("1.0.0", "1.0"), 1)
+ self.assertEqual(util.cmp_versions("1", "1.0"), -1)
+ self.assertEqual(util.cmp_versions("1.0.2", "1.1.0"), -1)
+ self.assertEqual(util.cmp_versions("1.2.1", "1.0.1"), 1)
+ self.assertEqual(util.cmp_versions("1.0.2", "4.0.1"), -1)
+ self.assertEqual(util.cmp_versions("12.0.2", "4.0.1"), 1)
+
+class TestOARVersion(test.TestCase):
+
+ def test_ctor(self):
+ ver = util.OARVersion("3.3.5.0")
+ self.assertEqual(ver._vs, "3.3.5.0")
+ self.assertEqual(ver.fields, [3,3,5,0])
+ self.assertEqual(ver.suffix, '')
+
+ ver = util.OARVersion("3.3.5.0.1goob30")
+ self.assertEqual(ver._vs, "3.3.5.0.1")
+ self.assertEqual(ver.fields, [3,3,5,0,1])
+ self.assertEqual(ver.suffix, 'goob30')
+
+ ver = util.OARVersion("3.3+what")
+ self.assertEqual(ver._vs, "3.3")
+ self.assertEqual(ver.fields, [3,3])
+ self.assertEqual(ver.suffix, '+what')
+
+ def testEQ(self):
+ ver = util.OARVersion("3.3.0rc1")
+ self.assertEqual(ver, util.Version("3.3.0rc1"))
+ self.assertTrue(ver == "3.3.0rc1")
+ self.assertFalse(ver == "3.3.0")
+ self.assertFalse(ver == "3.3.1")
+ self.assertFalse(ver == "1.3")
+
+ ver = util.OARVersion("3.3.0")
+ self.assertEqual(ver, util.Version("3.3.0"))
+ self.assertTrue(ver == "3.3.0")
+ self.assertFalse(ver == "3.3.1")
+ self.assertFalse(ver == "1.3")
+
+ def testLT(self):
+ ver = util.OARVersion("3.3.0")
+ self.assertTrue(ver < "3.5.0")
+ self.assertTrue(ver < "3.3.1")
+ self.assertTrue(ver < "3.3.1+")
+ self.assertTrue(ver < "5.3")
+
+ self.assertFalse(ver < "3.3.0")
+ self.assertFalse(ver < "1.3")
+ self.assertFalse(ver < util.Version("2.3"))
+
+ ver = util.OARVersion("3.3.0+ (in edit)")
+ self.assertFalse(ver < "3.3.0")
+ self.assertTrue(ver < "3.3.0#rc1")
+ self.assertTrue(ver < "3.3.0rc1")
+
+ self.assertFalse(ver < "3.3.0+")
+ self.assertFalse(ver < "3.3.0+ (in Edit)")
+
+ ver = util.OARVersion("3.3.0rc3")
+ self.assertFalse(ver < "3.3.0alpha3")
+
+
+ def test_cmp_oar_versions(self):
+ self.assertEqual(util.cmp_oar_versions("1.0.0", "1.0.2"), -1)
+ self.assertEqual(util.cmp_oar_versions("1.0.1", "1.0.1"), 0)
+ self.assertEqual(util.cmp_oar_versions("1.0.2", "1.0.1"), 1)
+ self.assertEqual(util.cmp_oar_versions("1.0", "1.0.2"), -1)
+ self.assertEqual(util.cmp_oar_versions("1.0.0", "1.0"), 1)
+ self.assertEqual(util.cmp_oar_versions("1", "1.0"), -1)
+ self.assertEqual(util.cmp_oar_versions("1.0.2", "1.1.0"), -1)
+ self.assertEqual(util.cmp_oar_versions("1.2.1", "1.0.1"), 1)
+ self.assertEqual(util.cmp_oar_versions("1.0.2", "4.0.1"), -1)
+ self.assertEqual(util.cmp_oar_versions("12.0.2", "4.0.1"), 1)
+
+ self.assertEqual(util.cmp_oar_versions("1.0.1", "1.0.1+draft"), -1)
+ self.assertEqual(util.cmp_oar_versions("1.0.1+", "1.0.1"), 1)
+ self.assertEqual(util.cmp_oar_versions("1.0.1+", "1.0.1+"), 0)
+ self.assertEqual(util.cmp_oar_versions("1.0.12alpha", "1.0.12beta"), -1)
+ self.assertEqual(util.cmp_oar_versions("1.0.12beta", "1.0.12alpha"), 1)
+ self.assertEqual(util.cmp_oar_versions("1.0.12beta", "1.0.12beta"), 0)
+ self.assertEqual(util.cmp_oar_versions("1.0.1beta", "1.0.12beta"), -1)
+ self.assertEqual(util.cmp_oar_versions("1.0.12+", "1.0.12beta"), -1)
+
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/python/tests/nistoar/nerdm/test_utils.py b/python/tests/nistoar/nerdm/test_utils.py
index 11fdab2..037aa52 100644
--- a/python/tests/nistoar/nerdm/test_utils.py
+++ b/python/tests/nistoar/nerdm/test_utils.py
@@ -1,9 +1,15 @@
import os, sys, pdb, shutil, logging, json
import unittest as test
+from pathlib import Path
+from collections import OrderedDict
from nistoar.nerdm import utils
from nistoar.nerdm import constants as const
+testdir = Path(__file__).resolve().parents[0]
+basedir = testdir.parents[3]
+schemadir = basedir / 'model'
+
class TestUtils(test.TestCase):
def test_meta_prop_ch(self):
@@ -100,6 +106,131 @@ def test_schema_version_cmp(self):
self.assertEqual(utils.cmp_versions(utils.get_nerdm_schema_version(data), "0.5"), 1)
self.assertEqual(utils.cmp_versions(utils.get_nerdm_schema_version(data), "2.5"), -1)
self.assertEqual(utils.cmp_versions(utils.get_nerdm_schema_version(data), "1.3"), 0)
+
+ def test_declutter_schema(self):
+ with open(schemadir/'nerdm-schema.json') as fd:
+ schema = json.load(fd)
+
+ self.assertTrue(utils.hget(schema, "title"))
+ self.assertTrue(utils.hget(schema, "description"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.title"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.description"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.notes"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.title"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.notes"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.description"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.asOntology"))
+
+ utils.declutter_schema(schema)
+
+ self.assertFalse(utils.hget(schema, "title"))
+ self.assertFalse(utils.hget(schema, "description"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.title"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.description"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.notes"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.properties.title.title"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.properties.title.notes"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.properties.title.description"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.properties.title.asOntology"))
+
+ def test_declutter_schema_post2020(self):
+ with open(schemadir/'nerdm-schema.json') as fd:
+ schema = json.load(fd)
+
+ self.assertTrue(utils.hget(schema, "title"))
+ self.assertTrue(utils.hget(schema, "description"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.title"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.description"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.notes"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.title"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.notes"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.description"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.asOntology"))
+
+ utils.declutter_schema(schema, True)
+
+ # the file is not post-2020 compliant, so only the top level documentation will be found
+ self.assertFalse(utils.hget(schema, "title"))
+ self.assertFalse(utils.hget(schema, "description"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.title"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.description"))
+ self.assertFalse(utils.hget(schema, "definitions.Resource.notes"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.title"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.notes"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.description"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.properties.title.asOntology"))
+
+ def test_unrequire_props_in(self):
+ with open(schemadir/'nerdm-schema.json') as fd:
+ schema = json.load(fd)
+
+ self.assertTrue(utils.hget(schema, "definitions.Resource.required"))
+ self.assertTrue(utils.hget(schema, "definitions.ResourceReference.allOf[1].required"))
+ self.assertTrue(utils.hget(schema, "definitions.Topic.required"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.required"))
+
+ utils.unrequire_props_in(schema, "definitions.Resource")
+ self.assertTrue(not utils.hget(schema, "definitions.Resource.required"))
+ self.assertTrue(utils.hget(schema, "definitions.ResourceReference.allOf[1].required"))
+ self.assertTrue(utils.hget(schema, "definitions.Topic.required"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.required"))
+
+ utils.unrequire_props_in(schema, ["definitions.ResourceReference"])
+ self.assertTrue(not utils.hget(schema, "definitions.Resource.required"))
+ self.assertTrue(not utils.hget(schema, "definitions.ResourceReference.allOf[1].required"))
+ self.assertTrue(utils.hget(schema, "definitions.Topic.required"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.required"))
+
+ utils.unrequire_props_in(schema, ["definitions.Resource",
+ "definitions.Topic",
+ "goober",
+ "definitions.Organization"])
+ self.assertTrue(not utils.hget(schema, "definitions.Resource.required"))
+ self.assertTrue(not utils.hget(schema, "definitions.ResourceReference.allOf[1].required"))
+ self.assertTrue(not utils.hget(schema, "definitions.Topic.required"))
+ self.assertTrue(not utils.hget(schema, "definitions.Organization.required"))
+
+ def test_loosen_schema(self):
+ with open(schemadir/"nerdm-schema.json") as fd:
+ schema = json.load(fd, object_pairs_hook=OrderedDict)
+
+ self.assertTrue(utils.hget(schema, "title"))
+ self.assertTrue(utils.hget(schema, "description"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.required"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.description"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.required"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.description"))
+
+ utils.loosen_schema(schema, {"derequire": ["Resource"], "dedocument": True})
+
+ self.assertTrue(not utils.hget(schema, "title"))
+ self.assertTrue(not utils.hget(schema, "description"))
+ self.assertTrue(not utils.hget(schema, "definitions.Resource.required"))
+ self.assertTrue(not utils.hget(schema, "definitions.Resource.description"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.required"))
+ self.assertTrue(not utils.hget(schema, "definitions.Organization.description"))
+
+ def test_loosen_schema_no_dedoc(self):
+ with open(schemadir/"nerdm-schema.json") as fd:
+ schema = json.load(fd, object_pairs_hook=OrderedDict)
+
+ self.assertTrue(utils.hget(schema, "title"))
+ self.assertTrue(utils.hget(schema, "description"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.required"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.description"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.required"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.description"))
+
+ utils.loosen_schema(schema, {"derequire": ["Resource"], "dedocument": False})
+
+ self.assertTrue(utils.hget(schema, "title"))
+ self.assertTrue(utils.hget(schema, "description"))
+ self.assertTrue(not utils.hget(schema, "definitions.Resource.required"))
+ self.assertTrue(utils.hget(schema, "definitions.Resource.description"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.required"))
+ self.assertTrue(utils.hget(schema, "definitions.Organization.description"))
+
+
class TestVersion(test.TestCase):
diff --git a/scripts/_install-env.sh b/scripts/_install-env.sh
index cd4f99f..fb2536c 100644
--- a/scripts/_install-env.sh
+++ b/scripts/_install-env.sh
@@ -41,10 +41,10 @@ while [ "$1" != "" ]; do
done
[ "$INSTALL_DIR" = "/usr/local" ] && {
- true ${PY_LIBDIR:=$INSTALL_DIR/lib/python2.7/dist-packages}
+ true ${PY_LIBDIR:=$INSTALL_DIR/lib/python3.8/dist-packages}
}
[ "$INSTALL_DIR" = "/usr" ] && {
- true ${PY_LIBDIR:=$INSTALL_DIR/lib/python2.7}
+ true ${PY_LIBDIR:=$INSTALL_DIR/lib/python3.8}
}
true ${ETC_DIR:=$INSTALL_DIR/etc}
diff --git a/scripts/install_ca_certs.sh b/scripts/install_ca_certs.sh
new file mode 100755
index 0000000..c759a6d
--- /dev/null
+++ b/scripts/install_ca_certs.sh
@@ -0,0 +1,37 @@
+#! /bin/bash
+#
+# install_ca_certs.sh -- copy the specified CA certificates into this source so that they can be used
+# to build the software via docker.
+#
+# usage: install_ca_certs.sh CA_CERT_FILE...
+#
+# where CA_CERT_FILE is a file path to a CA certificate to install
+#
+# This script helps address the problem with docker-based builds when run within a firewall that
+# replaces external site certificates with ones signed by a non-standard CA, causing the retrieval
+# of software dependencies to fail. This script is used by oar-docker's localbuild script to receive
+# extra CA certificates that addresses such failures. Because localdeploy makes no assumptions about
+# how this source code repository builds using docker, this script encapsulates that knowledge on
+# behalf of localbuild.
+#
+# Note: if this repository does not require/support use of non-standard CA certificates, remove (or
+# rename) this script.
+#
+set -e
+prog=`basename $0`
+execdir=`dirname $0`
+[ "$execdir" = "" -o "$execdir" = "." ] && execdir=$PWD
+basedir=`dirname $execdir`
+
+cacertdir="$basedir/docker/cacerts"
+[ -d "$cacertdir" ] || exit 0 # I guess we don't need the certs
+
+crts=`echo $@ | sed -e 's/^ *//' -e 's/ *$//'`
+[ -n "$crts" ] || {
+ print "${prog}: Missing cert file argument"
+ false
+}
+
+echo '+' cp $crts $cacertdir
+cp $crts $cacertdir
+
diff --git a/scripts/makedist.nerdmdocs b/scripts/makedist.nerdmdocs
index 3c34540..49140e0 100755
--- a/scripts/makedist.nerdmdocs
+++ b/scripts/makedist.nerdmdocs
@@ -78,8 +78,8 @@ echo '+' PACKAGE_NAME=$PACKAGE_NAME
echo '+' version=$version
# build the components
+# set -x
installdir=$BUILD_DIR/docs
-set -x
mkdir -p $installdir
# export schema files
diff --git a/scripts/record_deps.py b/scripts/record_deps.py
index 6bccde7..46b4e14 100755
--- a/scripts/record_deps.py
+++ b/scripts/record_deps.py
@@ -12,7 +12,7 @@
# The default package name (oar-sdp) can be over-ridden by the environment
# variable PACKAGE_NAME
#
-import os, sys, json, re
+import os, sys, json, re, traceback as tb
from collections import OrderedDict
prog = os.path.basename(sys.argv[0])
@@ -80,17 +80,23 @@ def ejschemadep():
def jmergedep():
import jsonmerge
- eggre = re.compile(r'^jsonmerge-(.*)\.egg-info$')
+ eggre = re.compile(r'^jsonmerge-(.*)\.egg')
modfile = jsonmerge.__file__
libdir = os.path.dirname(os.path.dirname(modfile))
vers="(unknown)"
- try:
- egginfo = [d for d in os.listdir(libdir) if eggre.match(d)]
- if len(egginfo) > 0:
- m = eggre.match(egginfo[0])
- vers = m.group(1)
- except Exception as ex:
- tb.print_exc()
+ m = eggre.match(os.path.basename(libdir))
+ if m:
+ # zipped egg
+ vers = m.group(1)
+ else:
+ # it's the dist-packages dir; look for the egg-info file
+ try:
+ egginfo = [d for d in os.listdir(libdir) if eggre.match(d)]
+ if len(egginfo) > 0:
+ m = eggre.match(egginfo[0])
+ vers = m.group(1)
+ except Exception as ex:
+ tb.print_exc()
return OrderedDict([
("name", "jsonmerge"),
("version", vers)