diff --git a/.vscode/settings.json b/.vscode/settings.json
index 667b00c..2cd3a39 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -5,7 +5,7 @@
"python.linting.enabled": true,
"[python]": {
"editor.codeActionsOnSave": {
- "source.organizeImports": true
+ "source.organizeImports": "explicit"
}
},
"terminal.integrated.env.linux": {
diff --git a/adsingestp/parsers/arxiv.py b/adsingestp/parsers/dubcore.py
similarity index 75%
rename from adsingestp/parsers/arxiv.py
rename to adsingestp/parsers/dubcore.py
index 1ce06d1..130946b 100644
--- a/adsingestp/parsers/arxiv.py
+++ b/adsingestp/parsers/dubcore.py
@@ -13,18 +13,18 @@
logger = logging.getLogger(__name__)
-class MultiArxivParser(IngestBase):
+class MultiDublinCoreParser(IngestBase):
start_re = r"]*>"
end_re = r"]*>"
def parse(self, text, header=False):
"""
- Separate multi-record arXiv XML document into individual XML documents
+ Separate multi-record DublinCore XML document into individual XML documents
:param text: string, input XML text from a multi-record XML document
:param header: boolean (default: False), set to True to preserve overall
document header/footer for each separate record's document
- :return: list, each item is the XML of a separate arXiv document
+ :return: list, each item is the XML of a separate DublinCore document
"""
output_chunks = []
for chunk in self.get_chunks(text, self.start_re, self.end_re, head_foot=header):
@@ -33,15 +33,14 @@ def parse(self, text, header=False):
return output_chunks
-class ArxivParser(BaseBeautifulSoupParser):
- # Dublin Core parser for arXiv
+class DublinCoreParser(BaseBeautifulSoupParser):
+ # Generic Dublin Core parser
DUBCORE_SCHEMA = ["http://www.openarchives.org/OAI/2.0/oai_dc/"]
author_collaborations_params = {
"keywords": ["group", "team", "collaboration"],
"remove_the": False,
- "fix_arXiv_mixed_collaboration_string": True,
}
def __init__(self):
@@ -50,24 +49,14 @@ def __init__(self):
self.input_metadata = None
def _parse_ids(self):
- if self.input_header.find("identifier"):
- ids = self.input_header.find("identifier").get_text()
- id_array = ids.split(":")
- arxiv_id = id_array[-1]
+ self.base_metadata["ids"] = {}
+ self.base_metadata["ids"]["pub-id"] = []
- # TODO what should the key on this actually be?
- self.base_metadata["publication"] = "eprint arXiv:" + arxiv_id
-
- self.base_metadata["ids"] = {"preprint": {}}
-
- self.base_metadata["ids"]["preprint"]["source"] = "arXiv"
- self.base_metadata["ids"]["preprint"]["id"] = arxiv_id
-
- dc_ids = self.input_metadata.find_all("dc:identifier")
- for d in dc_ids:
- d_text = d.get_text()
- if "doi:" in d_text:
- self.base_metadata["ids"]["doi"] = d_text.replace("doi:", "")
+ if self.input_metadata.find("dc:identifier"):
+ for dc_id in self.input_metadata.find_all("dc:identifier"):
+ self.base_metadata["ids"]["pub-id"].append(
+ {"attribute": "publisher-id", "Identifier": dc_id.get_text()}
+ )
def _parse_title(self):
title_array = self.input_metadata.find_all("dc:title")
@@ -104,16 +93,21 @@ def _parse_pubdate(self):
"dc:date"
).get_text()
+ def _parse_publisher(self):
+ if self.input_metadata.find("dc:publisher"):
+ self.base_metadata["publisher"] = self.input_metadata.find("dc:publisher").get_text()
+
def _parse_abstract(self):
desc_array = self.input_metadata.find_all("dc:description")
- # for arXiv.org, only 'dc:description'[0] is the abstract, the rest are comments
+ # in general, only 'dc:description'[0] is the abstract, the rest are comments
if desc_array:
self.base_metadata["abstract"] = self._clean_output(desc_array.pop(0).get_text())
if desc_array:
comments_out = []
for d in desc_array:
- comments_out.append({"origin": "arxiv", "text": self._clean_output(d.get_text())})
+ # TODO: FIX
+ comments_out.append({"text": self._clean_output(d.get_text())})
self.base_metadata["comments"] = comments_out
@@ -123,12 +117,12 @@ def _parse_keywords(self):
if keywords_array:
keywords_out = []
for k in keywords_array:
- keywords_out.append({"system": "arxiv", "string": k.get_text()})
+ keywords_out.append({"string": k.get_text()})
self.base_metadata["keywords"] = keywords_out
def parse(self, text):
"""
- Parse arXiv XML into standard JSON format
+ Parse DublinCore XML into standard JSON format
:param text: string, contents of XML file
:return: parsed file contents in JSON format
"""
@@ -154,6 +148,7 @@ def parse(self, text):
self._parse_pubdate()
self._parse_abstract()
self._parse_keywords()
+ self._parse_publisher()
self.base_metadata = self._entity_convert(self.base_metadata)
diff --git a/tests/stubdata/input/dubcore_pos_ecrs_002.xml b/tests/stubdata/input/dubcore_pos_ecrs_002.xml
new file mode 100644
index 0000000..1d3bac4
--- /dev/null
+++ b/tests/stubdata/input/dubcore_pos_ecrs_002.xml
@@ -0,0 +1,33 @@
+
+
+ oai:pos.sissa.it:ECRS/002
+ 2023-02-15
+ conference:ECRS
+ group:14
+
+
+
+ The Memories of the First European Cosmic Ray Symposium: Łódź 1968
+ Alan Watson
+ Astroparticle Physics
+ The origins of the series of European Cosmic-Ray Symposia are briefly described. The first
+ meeting in the seri
+ es, on ‘Hadronic Interactions and Extensive Air Showers’, held in Łódź, Poland in 1968, was attended by
+ the author: some memories are recounted.
+ Sissa Medialab
+ 2023-02-15
+ Text
+ application/pdf
+ PoS(ECRS)002
+ 10.22323/1.423.0002
+ https://pos.sissa.it/423/002/
+ en
+ ECRS (27th European Cosmic Ray Symposium) Opening; isPartOf
+ Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License (CC BY-NC-ND
+ 4.0)
+
+
+
diff --git a/tests/stubdata/output/arxiv_0901_2443.json b/tests/stubdata/output/arxiv_0901_2443.json
index c9d1df5..b1a2700 100644
--- a/tests/stubdata/output/arxiv_0901_2443.json
+++ b/tests/stubdata/output/arxiv_0901_2443.json
@@ -1,17 +1,104 @@
-{"abstract": {"textEnglish": "The $^{112,120}$Sn$(\\gamma,\\gamma')$ reactions have been studied at the S-DALINAC. Electric dipole (E1) strength distributions have been determined including contributions from unresolved strength extracted by a fluctuation analysis. Together with available data on $^{116,124}$Sn, an experimental systematics of the pygmy dipole resonance (PDR) in stable even-mass tin isotopes is established. The PDR centroid excitation energies and summed strengths are in reasonable agreement with quasiparticle-phonon model calculations based on a nonrelativistic description of the mean field but disagree with relativistic quasiparticle random-phase approximation predictions."},
- "authors": [{"name": {"given_name": "B.", "pubraw": "Özel, B.", "surname": "Özel"}},
- {"name": {"given_name": "J.", "pubraw": "Enders, J.", "surname": "Enders"}},
- {"name": {"given_name": "H.", "pubraw": "Lenske, H.", "surname": "Lenske"}},
- {"name": {"given_name": "P.", "pubraw": "von Neumann-Cosel, P.", "surname": "von Neumann-Cosel"}},
- {"name": {"given_name": "I.", "pubraw": "Poltoratska, I.", "surname": "Poltoratska"}},
- {"name": {"given_name": "V.", "middle_name": "Yu.", "pubraw": "Ponomarev, V. Yu.", "surname": "Ponomarev"}},
- {"name": {"given_name": "A.", "pubraw": "Richter, A.", "surname": "Richter"}},
- {"name": {"given_name": "D.", "pubraw": "Savran, D.", "surname": "Savran"}},
- {"name": {"given_name": "N.", "pubraw": "Tsoneva, N.", "surname": "Tsoneva"}}],
- "comments": [{"commentOrigin": "arxiv", "commentText": "Comment: submitted to Phys. Lett. B"}],
- "keywords": [{"keyString": "Nuclear Experiment", "keySystem": "arxiv"}],
- "persistentIDs": [{"preprint": {"identifier": "0901.2443", "source": "arXiv"}}],
- "pubDate": {"electrDate": "2009-01-16"},
- "publication": {"pubName": "eprint arXiv:0901.2443", "pubYear": "2009"},
- "recordData": {"createdTime": "", "loadFormat": "OtherXML", "loadLocation": "", "loadType": "fromFile", "parsedTime": "", "recordOrigin": ""},
- "title": {"textEnglish": "Excitation energy and strength of the pygmy dipole resonance in stable tin isotopes"}}
+{
+ "abstract": {
+ "textEnglish": "The $^{112,120}$Sn$(\\gamma,\\gamma')$ reactions have been studied at the S-DALINAC. Electric dipole (E1) strength distributions have been determined including contributions from unresolved strength extracted by a fluctuation analysis. Together with available data on $^{116,124}$Sn, an experimental systematics of the pygmy dipole resonance (PDR) in stable even-mass tin isotopes is established. The PDR centroid excitation energies and summed strengths are in reasonable agreement with quasiparticle-phonon model calculations based on a nonrelativistic description of the mean field but disagree with relativistic quasiparticle random-phase approximation predictions."
+ },
+ "authors": [
+ {
+ "name": {
+ "given_name": "B.",
+ "pubraw": "\u00d6zel, B.",
+ "surname": "\u00d6zel"
+ }
+ },
+ {
+ "name": {
+ "given_name": "J.",
+ "pubraw": "Enders, J.",
+ "surname": "Enders"
+ }
+ },
+ {
+ "name": {
+ "given_name": "H.",
+ "pubraw": "Lenske, H.",
+ "surname": "Lenske"
+ }
+ },
+ {
+ "name": {
+ "given_name": "P.",
+ "pubraw": "von Neumann-Cosel, P.",
+ "surname": "von Neumann-Cosel"
+ }
+ },
+ {
+ "name": {
+ "given_name": "I.",
+ "pubraw": "Poltoratska, I.",
+ "surname": "Poltoratska"
+ }
+ },
+ {
+ "name": {
+ "given_name": "V.",
+ "middle_name": "Yu.",
+ "pubraw": "Ponomarev, V. Yu.",
+ "surname": "Ponomarev"
+ }
+ },
+ {
+ "name": {
+ "given_name": "A.",
+ "pubraw": "Richter, A.",
+ "surname": "Richter"
+ }
+ },
+ {
+ "name": {
+ "given_name": "D.",
+ "pubraw": "Savran, D.",
+ "surname": "Savran"
+ }
+ },
+ {
+ "name": {
+ "given_name": "N.",
+ "pubraw": "Tsoneva, N.",
+ "surname": "Tsoneva"
+ }
+ }
+ ],
+ "comments": [
+ {
+ "commentText": "Comment: submitted to Phys. Lett. B"
+ }
+ ],
+ "keywords": [
+ {
+ "keyString": "Nuclear Experiment"
+ }
+ ],
+ "pubDate": {
+ "electrDate": "2009-01-16"
+ },
+ "publication": {
+ "pubYear": "2009"
+ },
+ "publisherIDs": [
+ {
+ "Identifier": "http://arxiv.org/abs/0901.2443",
+ "attribute": "publisher-id"
+ }
+ ],
+ "recordData": {
+ "createdTime": "",
+ "loadFormat": "OtherXML",
+ "loadLocation": "",
+ "loadType": "fromFile",
+ "parsedTime": "",
+ "recordOrigin": ""
+ },
+ "title": {
+ "textEnglish": "Excitation energy and strength of the pygmy dipole resonance in stable tin isotopes"
+ }
+}
diff --git a/tests/stubdata/output/arxiv_1711_04702.json b/tests/stubdata/output/arxiv_1711_04702.json
index c5e5f48..28b46d6 100644
--- a/tests/stubdata/output/arxiv_1711_04702.json
+++ b/tests/stubdata/output/arxiv_1711_04702.json
@@ -1,13 +1,76 @@
-{"abstract": {"textEnglish": "Background: Gene co-expression network analyses have become a central approach for the systems-level analysis of biological data. Several software packages exist for generating and analyzing such networks, either from correlation scores or the absolute value of a transformed score called weighted topological overlap (wTO). However, since some genes are able to up- or down-regulate other genes, it is important to explicitly consider both positive and negative correlations when constructing a gene co-expression network. Additionally, there has been a growing interest in the systematic comparison of multiple networks to identify deferentially changed links. Typically, such analyses are focused on the comparison of networks or data from two different conditions. Results: Here, we present an R package for calculating the weighted topological overlap (wTO), that explicitly addresses the sign of wTO values. The package includes the calculation of p-values (raw and adjusted) for each pairwise gene score. Our package also allows the calculation of networks from time series, without replicates. Additionally, our R package incorporates a novel method for calculating a consensus network (CN) from two or more networks. To visualize the resulting networks, the R package contains a visualization tool which allows for the direct network manipulation and access of node and link information. When testing the package on a standard laptop computer, we can conduct all calculations for systems of 20,000 genes in under two hours. Conclusion: In this work, we developed an R package that allows the computation of wTO networks, CNs and a visualization tool in the R statistical environment. It is publicly available on CRAN repositories under the GPL-2 Open Source License (https://cran.r-project.org/web/packages/wTO/)."},
- "authors": [{"name": {"given_name": "Deisy", "pubraw": "Gysi, Deisy Morselli", "surname": "Morselli Gysi"}},
- {"name": {"given_name": "Andre", "pubraw": "Voigt, Andre", "surname": "Voigt"}},
- {"name": {"given_name": "Tiago", "pubraw": "Fragoso, Tiago de Miranda", "middle_name": "de Miranda", "surname": "Fragoso"}},
- {"name": {"given_name": "Eivind", "pubraw": "Almaas, Eivind", "surname": "Almaas"}},
- {"name": {"given_name": "Katja", "pubraw": "Nowick, Katja", "surname": "Nowick"}}],
- "comments": [{"commentOrigin": "arxiv", "commentText": "Comment: 13 pages, 3 Figures"}],
- "keywords": [{"keyString": "Quantitative Biology - Molecular Networks", "keySystem": "arxiv"}],
- "persistentIDs": [{"preprint": {"identifier": "1711.04702", "source": "arXiv"}}],
- "pubDate": {"electrDate": "2017-11-13"},
- "publication": {"pubName": "eprint arXiv:1711.04702", "pubYear": "2017"},
- "recordData": {"createdTime": "", "loadFormat": "OtherXML", "loadLocation": "", "loadType": "fromFile", "parsedTime": "", "recordOrigin": ""},
- "title": {"textEnglish": "wTO: an R package for computing weighted topological overlap and consensus networks with an integrated visualization tool"}}
+{
+ "abstract": {
+ "textEnglish": "Background: Gene co-expression network analyses have become a central approach for the systems-level analysis of biological data. Several software packages exist for generating and analyzing such networks, either from correlation scores or the absolute value of a transformed score called weighted topological overlap (wTO). However, since some genes are able to up- or down-regulate other genes, it is important to explicitly consider both positive and negative correlations when constructing a gene co-expression network. Additionally, there has been a growing interest in the systematic comparison of multiple networks to identify deferentially changed links. Typically, such analyses are focused on the comparison of networks or data from two different conditions. Results: Here, we present an R package for calculating the weighted topological overlap (wTO), that explicitly addresses the sign of wTO values. The package includes the calculation of p-values (raw and adjusted) for each pairwise gene score. Our package also allows the calculation of networks from time series, without replicates. Additionally, our R package incorporates a novel method for calculating a consensus network (CN) from two or more networks. To visualize the resulting networks, the R package contains a visualization tool which allows for the direct network manipulation and access of node and link information. When testing the package on a standard laptop computer, we can conduct all calculations for systems of 20,000 genes in under two hours. Conclusion: In this work, we developed an R package that allows the computation of wTO networks, CNs and a visualization tool in the R statistical environment. It is publicly available on CRAN repositories under the GPL-2 Open Source License (https://cran.r-project.org/web/packages/wTO/)."
+ },
+ "authors": [
+ {
+ "name": {
+ "given_name": "Deisy",
+ "pubraw": "Gysi, Deisy Morselli",
+ "surname": "Morselli Gysi"
+ }
+ },
+ {
+ "name": {
+ "given_name": "Andre",
+ "pubraw": "Voigt, Andre",
+ "surname": "Voigt"
+ }
+ },
+ {
+ "name": {
+ "given_name": "Tiago",
+ "middle_name": "de Miranda",
+ "pubraw": "Fragoso, Tiago de Miranda",
+ "surname": "Fragoso"
+ }
+ },
+ {
+ "name": {
+ "given_name": "Eivind",
+ "pubraw": "Almaas, Eivind",
+ "surname": "Almaas"
+ }
+ },
+ {
+ "name": {
+ "given_name": "Katja",
+ "pubraw": "Nowick, Katja",
+ "surname": "Nowick"
+ }
+ }
+ ],
+ "comments": [
+ {
+ "commentText": "Comment: 13 pages, 3 Figures"
+ }
+ ],
+ "keywords": [
+ {
+ "keyString": "Quantitative Biology - Molecular Networks"
+ }
+ ],
+ "pubDate": {
+ "electrDate": "2017-11-13"
+ },
+ "publication": {
+ "pubYear": "2017"
+ },
+ "publisherIDs": [
+ {
+ "Identifier": "http://arxiv.org/abs/1711.04702",
+ "attribute": "publisher-id"
+ }
+ ],
+ "recordData": {
+ "createdTime": "",
+ "loadFormat": "OtherXML",
+ "loadLocation": "",
+ "loadType": "fromFile",
+ "parsedTime": "",
+ "recordOrigin": ""
+ },
+ "title": {
+ "textEnglish": "wTO: an R package for computing weighted topological overlap and consensus networks with an integrated visualization tool"
+ }
+}
diff --git a/tests/stubdata/output/arxiv_1711_05739.json b/tests/stubdata/output/arxiv_1711_05739.json
index 2d45fa7..40ed568 100644
--- a/tests/stubdata/output/arxiv_1711_05739.json
+++ b/tests/stubdata/output/arxiv_1711_05739.json
@@ -1,9 +1,61 @@
-{"abstract": {"textEnglish": "We explore the occurrence and detectability of planet-planet occultations (PPOs) in exoplanet systems. These are events during which a planet occults the disk of another planet in the same system, imparting a small photometric signal as its thermal or reflected light is blocked. We focus on the planets in TRAPPIST-1, whose orbital planes we show are aligned to within 0.3 degrees at 90% confidence. We present a photodynamical model for predicting and computing PPOs in TRAPPIST-1 and other systems for various assumptions of the planets' atmospheric states. When marginalizing over the uncertainties on all orbital parameters, we find that the rate of PPOs in TRAPPIST-1 is about 1.4 per day. We investigate the prospects for detection of these events with the James Webb Space Telescope, finding that ~10-20 occultations per year of b and c should be above the noise level at 12-15 microns. Joint modeling of several of these PPOs could lead to a robust detection. Alternatively, observations with the proposed Origins Space Telescope should be able to detect individual PPOs at high signal-to-noise. We show how PPOs can be used to break transit timing variation degeneracies, imposing strong constraints on the eccentricities and masses of the planets, as well as to constrain the longitudes of nodes and thus the complete three-dimensional structure of the system. We further show how modeling of these events can be used to reveal a planet's day/night temperature contrast and construct crude surface maps. We make our photodynamical code available on github."},
- "authors": [{"name": {"given_name": "Rodrigo", "pubraw": "Luger, Rodrigo", "surname": "Luger"}}, {"name": {"given_name": "Jacob", "pubraw": "Lustig-Yaeger, Jacob", "surname": "Lustig-Yaeger"}}, {"name": {"given_name": "Eric", "pubraw": "Agol, Eric", "surname": "Agol"}}],
- "comments": [{"commentOrigin": "arxiv", "commentText": "Comment: 36 pages, 25 figures. Accepted to ApJ. Multi-purpose photodynamical code available at github.com/rodluger/planetplanet"}],
- "keywords": [{"keyString": "Astrophysics - Earth and Planetary Astrophysics", "keySystem": "arxiv"}],
- "persistentIDs": [{"preprint": {"identifier": "1711.05739", "source": "arXiv"}}],
- "pubDate": {"electrDate": "2017-11-15"},
- "publication": {"pubName": "eprint arXiv:1711.05739", "pubYear": "2017"},
- "recordData": {"createdTime": "", "loadFormat": "OtherXML", "loadLocation": "", "loadType": "fromFile", "parsedTime": "", "recordOrigin": ""},
- "title": {"textEnglish": "Planet-Planet Occultations in TRAPPIST-1 and Other Exoplanet Systems"}}
+{
+ "abstract": {
+ "textEnglish": "We explore the occurrence and detectability of planet-planet occultations (PPOs) in exoplanet systems. These are events during which a planet occults the disk of another planet in the same system, imparting a small photometric signal as its thermal or reflected light is blocked. We focus on the planets in TRAPPIST-1, whose orbital planes we show are aligned to within 0.3 degrees at 90% confidence. We present a photodynamical model for predicting and computing PPOs in TRAPPIST-1 and other systems for various assumptions of the planets' atmospheric states. When marginalizing over the uncertainties on all orbital parameters, we find that the rate of PPOs in TRAPPIST-1 is about 1.4 per day. We investigate the prospects for detection of these events with the James Webb Space Telescope, finding that ~10-20 occultations per year of b and c should be above the noise level at 12-15 microns. Joint modeling of several of these PPOs could lead to a robust detection. Alternatively, observations with the proposed Origins Space Telescope should be able to detect individual PPOs at high signal-to-noise. We show how PPOs can be used to break transit timing variation degeneracies, imposing strong constraints on the eccentricities and masses of the planets, as well as to constrain the longitudes of nodes and thus the complete three-dimensional structure of the system. We further show how modeling of these events can be used to reveal a planet's day/night temperature contrast and construct crude surface maps. We make our photodynamical code available on github."
+ },
+ "authors": [
+ {
+ "name": {
+ "given_name": "Rodrigo",
+ "pubraw": "Luger, Rodrigo",
+ "surname": "Luger"
+ }
+ },
+ {
+ "name": {
+ "given_name": "Jacob",
+ "pubraw": "Lustig-Yaeger, Jacob",
+ "surname": "Lustig-Yaeger"
+ }
+ },
+ {
+ "name": {
+ "given_name": "Eric",
+ "pubraw": "Agol, Eric",
+ "surname": "Agol"
+ }
+ }
+ ],
+ "comments": [
+ {
+ "commentText": "Comment: 36 pages, 25 figures. Accepted to ApJ. Multi-purpose photodynamical code available at github.com/rodluger/planetplanet"
+ }
+ ],
+ "keywords": [
+ {
+ "keyString": "Astrophysics - Earth and Planetary Astrophysics"
+ }
+ ],
+ "pubDate": {
+ "electrDate": "2017-11-15"
+ },
+ "publication": {
+ "pubYear": "2017"
+ },
+ "publisherIDs": [
+ {
+ "Identifier": "http://arxiv.org/abs/1711.05739",
+ "attribute": "publisher-id"
+ }
+ ],
+ "recordData": {
+ "createdTime": "",
+ "loadFormat": "OtherXML",
+ "loadLocation": "",
+ "loadType": "fromFile",
+ "parsedTime": "",
+ "recordOrigin": ""
+ },
+ "title": {
+ "textEnglish": "Planet-Planet Occultations in TRAPPIST-1 and Other Exoplanet Systems"
+ }
+}
diff --git a/tests/stubdata/output/arxiv_math_0306266.json b/tests/stubdata/output/arxiv_math_0306266.json
index 4091d53..1a82f19 100644
--- a/tests/stubdata/output/arxiv_math_0306266.json
+++ b/tests/stubdata/output/arxiv_math_0306266.json
@@ -1,14 +1,53 @@
-{"abstract": {"textEnglish": "We study the Lovasz number theta along with two further SDP relaxations theta1, theta1/2 of the independence number and the corresponding relaxations of the chromatic number on random graphs G(n,p). We prove that these relaxations are concentrated about their means Moreover, extending a result of Juhasz, we compute the asymptotic value of the relaxations for essentially the entire range of edge probabilities p. As an application, we give an improved algorithm for approximating the independence number in polynomial expected time, thereby extending a result of Krivelevich and Vu. We also improve on the analysis of an algorithm of Krivelevich for deciding whether G(n,p) is k-colorable."},
- "authors": [{"name": {"given_name": "Amin", "pubraw": "Coja-Oghlan, Amin", "surname": "Coja-Oghlan"}}],
- "keywords": [{"keyString": "Mathematics - Combinatorics", "keySystem": "arxiv"},
- {"keyString": "05C80, 05C15", "keySystem": "arxiv"}],
- "persistentIDs": [{"DOI": "10.1017/S0963548305006826", "preprint": {"identifier": "math/0306266", "source": "arXiv"}}],
- "pubDate": {"electrDate": "2003-06-18"},
- "publication": {"pubName": "eprint arXiv:math/0306266", "pubYear": "2003"},
- "recordData": {"createdTime": "",
- "loadFormat": "OtherXML",
- "loadLocation": "",
- "loadType": "fromFile",
- "parsedTime": "",
- "recordOrigin": ""},
- "title": {"textEnglish": "The Lovasz number of random graphs"}}
+{
+ "abstract": {
+ "textEnglish": "We study the Lovasz number theta along with two further SDP relaxations theta1, theta1/2 of the independence number and the corresponding relaxations of the chromatic number on random graphs G(n,p). We prove that these relaxations are concentrated about their means Moreover, extending a result of Juhasz, we compute the asymptotic value of the relaxations for essentially the entire range of edge probabilities p. As an application, we give an improved algorithm for approximating the independence number in polynomial expected time, thereby extending a result of Krivelevich and Vu. We also improve on the analysis of an algorithm of Krivelevich for deciding whether G(n,p) is k-colorable."
+ },
+ "authors": [
+ {
+ "name": {
+ "given_name": "Amin",
+ "pubraw": "Coja-Oghlan, Amin",
+ "surname": "Coja-Oghlan"
+ }
+ }
+ ],
+ "keywords": [
+ {
+ "keyString": "Mathematics - Combinatorics"
+ },
+ {
+ "keyString": "05C80, 05C15"
+ }
+ ],
+ "pubDate": {
+ "electrDate": "2003-06-18"
+ },
+ "publication": {
+ "pubYear": "2003"
+ },
+ "publisherIDs": [
+ {
+ "Identifier": "http://arxiv.org/abs/math/0306266",
+ "attribute": "publisher-id"
+ },
+ {
+ "Identifier": "Combinatorics, Probability and Computing 14 (2005) 439 - 465",
+ "attribute": "publisher-id"
+ },
+ {
+ "Identifier": "doi:10.1017/S0963548305006826",
+ "attribute": "publisher-id"
+ }
+ ],
+ "recordData": {
+ "createdTime": "",
+ "loadFormat": "OtherXML",
+ "loadLocation": "",
+ "loadType": "fromFile",
+ "parsedTime": "",
+ "recordOrigin": ""
+ },
+ "title": {
+ "textEnglish": "The Lovasz number of random graphs"
+ }
+}
diff --git a/tests/stubdata/output/dubcore_pos_ecrs_002.json b/tests/stubdata/output/dubcore_pos_ecrs_002.json
new file mode 100644
index 0000000..af968be
--- /dev/null
+++ b/tests/stubdata/output/dubcore_pos_ecrs_002.json
@@ -0,0 +1,51 @@
+{
+ "abstract": {
+ "textEnglish": "The origins of the series of European Cosmic-Ray Symposia are briefly described. The first meeting in the seri es, on \u2018Hadronic Interactions and Extensive Air Showers\u2019, held in \u0141\u00f3d\u017a, Poland in 1968, was attended by the author: some memories are recounted."
+ },
+ "authors": [
+ {
+ "name": {
+ "given_name": "Alan",
+ "pubraw": "Alan Watson",
+ "surname": "Watson"
+ }
+ }
+ ],
+ "keywords": [
+ {
+ "keyString": "Astroparticle Physics"
+ }
+ ],
+ "pubDate": {
+ "electrDate": "2023-02-15"
+ },
+ "publication": {
+ "pubYear": "2023",
+ "publisher": "Sissa Medialab"
+ },
+ "publisherIDs": [
+ {
+ "Identifier": "PoS(ECRS)002",
+ "attribute": "publisher-id"
+ },
+ {
+ "Identifier": "10.22323/1.423.0002",
+ "attribute": "publisher-id"
+ },
+ {
+ "Identifier": "https://pos.sissa.it/423/002/",
+ "attribute": "publisher-id"
+ }
+ ],
+ "recordData": {
+ "createdTime": "",
+ "loadFormat": "OtherXML",
+ "loadLocation": "",
+ "loadType": "fromFile",
+ "parsedTime": "",
+ "recordOrigin": ""
+ },
+ "title": {
+ "textEnglish": "The Memories of the First European Cosmic Ray Symposium: \u0141\u00f3d\u017a 1968"
+ }
+}
diff --git a/tests/test_arxiv.py b/tests/test_dublincore.py
similarity index 89%
rename from tests/test_arxiv.py
rename to tests/test_dublincore.py
index 313887c..59b1f81 100644
--- a/tests/test_arxiv.py
+++ b/tests/test_dublincore.py
@@ -5,19 +5,20 @@
from adsingestschema import ads_schema_validator
-from adsingestp.parsers import arxiv
+from adsingestp.parsers import dubcore
TIMESTAMP_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
-class TestArxiv(unittest.TestCase):
+class TestDublinCore(unittest.TestCase):
def setUp(self):
stubdata_dir = os.path.join(os.path.dirname(__file__), "stubdata/")
self.inputdir = os.path.join(stubdata_dir, "input")
self.outputdir = os.path.join(stubdata_dir, "output")
- def test_arxiv(self):
+ def test_dubcore(self):
filenames = [
+ "dubcore_pos_ecrs_002",
"arxiv_1711_05739",
"arxiv_0901_2443",
"arxiv_1711_04702",
@@ -26,7 +27,7 @@ def test_arxiv(self):
for f in filenames:
test_infile = os.path.join(self.inputdir, f + ".xml")
test_outfile = os.path.join(self.outputdir, f + ".json")
- parser = arxiv.ArxivParser()
+ parser = dubcore.DublinCoreParser()
with open(test_infile, "rb") as fp:
input_data = fp.read()
@@ -55,18 +56,18 @@ def test_arxiv(self):
self.assertEqual(parsed, output_data)
-class TextArxivMulti(unittest.TestCase):
+class TextDublinCoreMulti(unittest.TestCase):
def setUp(self):
stubdata_dir = os.path.join(os.path.dirname(__file__), "stubdata/")
self.inputdir = os.path.join(stubdata_dir, "input")
self.outputdir = os.path.join(stubdata_dir, "output")
- def test_arxiv_multi(self):
+ def test_dubcore_multi(self):
filenames = [
"arxiv_multi_20230125",
]
- parser = arxiv.MultiArxivParser()
+ parser = dubcore.MultiDublinCoreParser()
for f in filenames:
test_infile = os.path.join(self.inputdir, f + ".xml")
@@ -81,7 +82,6 @@ def test_arxiv_multi(self):
output_data_header = output_text.strip().split("\n\n")
parsed = parser.parse(input_data, header=True)
-
self.assertEqual(parsed, output_data_header)
with open(test_outfile_noheader, "r") as fp: