Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade TypeScript to 5.7.2 #586

Merged
merged 2 commits into from
Nov 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
"husky": "^9.1.7",
"lint-staged": "^15.2.10",
"type-detect": "^4.1.0",
"typescript": "^5.6.3",
"typescript": "^5.7.2",
"wsrun": "^5.2.4"
}
}
7 changes: 0 additions & 7 deletions packages/ckan/src/namespace.js

This file was deleted.

36 changes: 18 additions & 18 deletions packages/ckan/src/query.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// @ts-check

import { sparql } from '@tpluscode/rdf-string'
import * as ns from './namespace.js'
import rdf from '@zazuko/env'

/**
* Query to retrieve all datasets for a given organization.
Expand All @@ -14,48 +14,48 @@ const datasetsQuery = (organizationId) => {
CONSTRUCT {
?dataset ?p ?o .
?o ?nestedP ?nestedO .
?copyright ${ns.schema.identifier} ?copyrightIdentifier .
?dataset ${ns.dcterms.accrualPeriodicity} ?accrualPeriodicity .
?publisher ${ns.schema.name} ?publisherName .
?dataset ${ns.dcat.theme} ?euTheme .
?copyright ${rdf.ns.schema.identifier} ?copyrightIdentifier .
?dataset ${rdf.ns.dcterms.accrualPeriodicity} ?accrualPeriodicity .
?publisher ${rdf.ns.schema.name} ?publisherName .
?dataset ${rdf.ns.dcat.theme} ?euTheme .
}
WHERE {
?dataset ?p ?o .

?dataset ${ns.dcterms.creator} ${organizationId} .
?dataset ${ns.schema.workExample} <https://ld.admin.ch/application/opendataswiss> .
?dataset ${ns.schema.creativeWorkStatus} <https://ld.admin.ch/vocabulary/CreativeWorkStatus/Published> .
?dataset ${rdf.ns.dcterms.creator} ${organizationId} .
?dataset ${rdf.ns.schema.workExample} <https://ld.admin.ch/application/opendataswiss> .
?dataset ${rdf.ns.schema.creativeWorkStatus} <https://ld.admin.ch/vocabulary/CreativeWorkStatus/Published> .

FILTER ( NOT EXISTS { ?dataset ${ns.schema.validThrough} ?expiration1 . } )
FILTER ( NOT EXISTS { ?dataset ${ns.schema.expires} ?expiration2 . } )
FILTER ( NOT EXISTS { ?dataset ${rdf.ns.schema.validThrough} ?expiration1 . } )
FILTER ( NOT EXISTS { ?dataset ${rdf.ns.schema.expires} ?expiration2 . } )

OPTIONAL {
?o ?nestedP ?nestedO .
FILTER( ?nestedP != <https://cube.link/observation> )
}

OPTIONAL {
?dataset ${ns.dcterms.rights} ?copyright .
?dataset ${rdf.ns.dcterms.rights} ?copyright .
GRAPH ?copyrightGraph {
?copyright ${ns.schema.identifier} ?copyrightIdentifier .
?copyright ${rdf.ns.schema.identifier} ?copyrightIdentifier .
}
}

OPTIONAL {
?dataset ${ns.dcterms.accrualPeriodicity} ?accrualPeriodicity .
?dataset ${rdf.ns.dcterms.accrualPeriodicity} ?accrualPeriodicity .
}

OPTIONAL {
?dataset ${ns.dcterms.publisher} ?publisher .
?publisher ${ns.schema.name} ?publisherName .
?dataset ${rdf.ns.dcterms.publisher} ?publisher .
?publisher ${rdf.ns.schema.name} ?publisherName .
}

OPTIONAL {
?dataset ${ns.dcat.theme} ?theme .
?theme ${ns.schema.supersededBy}?/${ns.schema.sameAs} ?euTheme .
?dataset ${rdf.ns.dcat.theme} ?theme .
?theme ${rdf.ns.schema.supersededBy}?/${rdf.ns.schema.sameAs} ?euTheme .
}

FILTER (?p != ${ns.dcat.theme})
FILTER (?p != ${rdf.ns.dcat.theme})
}
`
}
Expand Down
77 changes: 38 additions & 39 deletions packages/ckan/src/xml.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import rdf from '@zazuko/env'
import prefixes, { shrink } from '@zazuko/prefixes'
import { create as createXml } from 'xmlbuilder2'
import { isBlankNode, isLiteral, isNamedNode } from 'is-graph-pointer'
import * as ns from './namespace.js'

/**
* Generate a CKAN-compatible XML representation of the dataset.
Expand All @@ -14,7 +13,7 @@ import * as ns from './namespace.js'
*/
const toXML = (dataset) => {
const pointer = rdf.clownface({ dataset: rdf.dataset(dataset) })
const datasetsPointer = pointer.node(ns.dcat.Dataset).in(ns.rdf.type)
const datasetsPointer = pointer.node(rdf.ns.dcat.Dataset).in(rdf.ns.rdf.type)

const pf = Object.entries(prefixes)
// `xml` prefix is reserved and must not be re-declared
Expand All @@ -37,27 +36,27 @@ const toXML = (dataset) => {
'dcat:Catalog': {
'dcat:dataset': datasetsPointer.map((dataset) => {
// Verify that identifiers is CKAN-valid, ignore the dataset otherwise
const identifiers = dataset.out(ns.dcterms.identifier)
const identifiers = dataset.out(rdf.ns.dcterms.identifier)
if (!identifiers.value) {
// eslint-disable-next-line no-console
console.error(`Ignoring dataset ${dataset.value} because it has no or multiple identifiers`)
return null
}

// The initial query ensures that there is a creator
const creators = dataset.out(ns.dcterms.creator)
const creators = dataset.out(rdf.ns.dcterms.creator)
const creatorSlug = creators.values[0].split('/').slice(-1)[0]
const identifier = identifiers.value.includes('@')
? identifiers.value
: `${identifiers.value}@${creatorSlug}`

// Ignore keywords without a language specified because CKAN rejects them
// @ts-ignore
const keywords = dataset.out(ns.dcat.keyword).filter(({ term: { language } }) => !!language)
const keywords = dataset.out(rdf.ns.dcat.keyword).filter(({ term: { language } }) => !!language)

const copyright = dataset.out(ns.dcterms.rights).out(ns.schema.identifier)
const copyright = dataset.out(rdf.ns.dcterms.rights).out(rdf.ns.schema.identifier)

const legalBasisPointer = dataset.out(ns.dcterms.license)
const legalBasisPointer = dataset.out(rdf.ns.dcterms.license)
const legalBasis = legalBasisPointer.term
? {
'rdf:Description': {
Expand All @@ -67,45 +66,45 @@ const toXML = (dataset) => {
}
: null

const workExampleDstributions = dataset.out(ns.schema.workExample)
.filter(workExample => workExample.out(ns.schema.encodingFormat).terms.length > 0)
const workExampleDstributions = dataset.out(rdf.ns.schema.workExample)
.filter(workExample => workExample.out(rdf.ns.schema.encodingFormat).terms.length > 0)
.map(workExample => ({
'dcat:Distribution': {
'@': { 'rdf:about': workExample.out(ns.schema.url).value },
'dcterms:issued': serializeTerm(dataset.out(ns.dcterms.issued)),
'dcat:mediaType': serializeTerm(workExample.out(ns.schema.encodingFormat)),
'dcat:accessURL': serializeTerm(workExample.out(ns.schema.url)),
'dcterms:title': serializeTerm(workExample.out(ns.schema.name)),
'@': { 'rdf:about': workExample.out(rdf.ns.schema.url).value },
'dcterms:issued': serializeTerm(dataset.out(rdf.ns.dcterms.issued)),
'dcat:mediaType': serializeTerm(workExample.out(rdf.ns.schema.encodingFormat)),
'dcat:accessURL': serializeTerm(workExample.out(rdf.ns.schema.url)),
'dcterms:title': serializeTerm(workExample.out(rdf.ns.schema.name)),
'dcterms:license': serializeTerm(copyright),
'dcterms:format': {
'@': {
'rdf:resource': distributionFormatFromEncoding(workExample.out(ns.schema.encodingFormat)),
'rdf:resource': distributionFormatFromEncoding(workExample.out(rdf.ns.schema.encodingFormat)),
},
},
},
}))

const copiedDistributions = dataset.out(ns.dcat.distribution)
const copiedDistributions = dataset.out(rdf.ns.dcat.distribution)
.map((distribution, index) => ({
'dcat:Distribution': {
'@': { 'rdf:about': `${dataset.value}/distribution/${index + 1}` },
'dcterms:issued': serializeTerm(dataset.out(ns.dcterms.issued)),
'dcterms:modified': serializeTerm(dataset.out(ns.dcterms.modified)),
'dcterms:issued': serializeTerm(dataset.out(rdf.ns.dcterms.issued)),
'dcterms:modified': serializeTerm(dataset.out(rdf.ns.dcterms.modified)),
'dcterms:license': serializeTerm(copyright),
...serializeProperties(distribution),
},
}))

const publishers = dataset.out(ns.dcterms.publisher)
const publishers = dataset.out(rdf.ns.dcterms.publisher)
.map(publisher => {
const attr = {}
/** @type {string | string[]} */
let name = publisher.value

if (isNamedNode(publisher)) {
attr['rdf:about'] = publisher.value
if (publisher.out(ns.schema.name).values.length > 0) {
name = publisher.out(ns.schema.name).values
if (publisher.out(rdf.ns.schema.name).values.length > 0) {
name = publisher.out(rdf.ns.schema.name).values
}
}

Expand All @@ -121,7 +120,7 @@ const toXML = (dataset) => {
// The query makes sure we get both legacy and new ones, we only
// provide the new ones to CKAN, by converting legacy ones if needed.
const euFreqPrefix = 'http://publications.europa.eu/resource/authority/frequency/'
const accrualPeriodicity = dataset.out(ns.dcterms.accrualPeriodicity)
const accrualPeriodicity = dataset.out(rdf.ns.dcterms.accrualPeriodicity)
.map((t) => {
if (!t.term || !t.term.value) {
return t
Expand All @@ -136,34 +135,34 @@ const toXML = (dataset) => {
'dcat:Dataset': {
'@': { 'rdf:about': dataset.value },
'dcterms:identifier': { '#': identifier },
'dcterms:title': serializeTerm(dataset.out(ns.dcterms.title)),
'dcterms:description': serializeTerm(dataset.out(ns.dcterms.description)),
'dcterms:issued': serializeTerm(dataset.out(ns.dcterms.issued)),
'dcterms:modified': serializeTerm(dataset.out(ns.dcterms.modified)),
'dcterms:title': serializeTerm(dataset.out(rdf.ns.dcterms.title)),
'dcterms:description': serializeTerm(dataset.out(rdf.ns.dcterms.description)),
'dcterms:issued': serializeTerm(dataset.out(rdf.ns.dcterms.issued)),
'dcterms:modified': serializeTerm(dataset.out(rdf.ns.dcterms.modified)),
'dcterms:publisher': publishers,
'dcterms:creator': serializeTerm(creators),
'dcat:contactPoint': serializeBlankNode(
dataset.out(ns.dcat.contactPoint),
[ns.vcard.Organization, ns.vcard.Individual],
dataset.out(rdf.ns.dcat.contactPoint),
[rdf.ns.vcard.Organization, rdf.ns.vcard.Individual],
),
'dcat:theme': serializeTerm(dataset.out(ns.dcat.theme)),
'dcterms:language': serializeTerm(dataset.out(ns.dcterms.language)),
'dcat:theme': serializeTerm(dataset.out(rdf.ns.dcat.theme)),
'dcterms:language': serializeTerm(dataset.out(rdf.ns.dcterms.language)),
'dcterms:relation': [
legalBasis,
serializeTerm(dataset.out(ns.dcterms.relation), { properties: [ns.rdfs.label] }),
serializeTerm(dataset.out(rdf.ns.dcterms.relation), { properties: [rdf.ns.rdfs.label] }),
],
'dcat:keyword': serializeTerm(keywords),
'dcat:landingPage': serializeTerm(dataset.out(ns.dcat.landingPage)),
'dcterms:spatial': serializeTerm(dataset.out(ns.dcterms.spatial)),
'dcterms:coverage': serializeTerm(dataset.out(ns.dcterms.coverage)),
'dcterms:temporal': serializeTerm(dataset.out(ns.dcterms.temporal)),
'dcat:landingPage': serializeTerm(dataset.out(rdf.ns.dcat.landingPage)),
'dcterms:spatial': serializeTerm(dataset.out(rdf.ns.dcterms.spatial)),
'dcterms:coverage': serializeTerm(dataset.out(rdf.ns.dcterms.coverage)),
'dcterms:temporal': serializeTerm(dataset.out(rdf.ns.dcterms.temporal)),
// @ts-ignore
'dcterms:accrualPeriodicity': serializeTerm(accrualPeriodicity),
'dcat:distribution': [
...workExampleDstributions,
...copiedDistributions,
],
'foaf:page': serializeTerm(dataset.out(ns.foaf.page)),
'foaf:page': serializeTerm(dataset.out(rdf.ns.foaf.page)),
},
}
}).filter(Boolean),
Expand Down Expand Up @@ -201,7 +200,7 @@ const serializeLiteral = (pointer) => {
attrs['xml:lang'] = term.language
}

if (term.datatype && !term.datatype.equals(ns.rdf.langString) && !term.datatype.equals(ns.xsd.string)) {
if (term.datatype && !term.datatype.equals(rdf.ns.rdf.langString) && !term.datatype.equals(rdf.ns.xsd.string)) {
attrs['rdf:datatype'] = term.datatype.value
}

Expand Down Expand Up @@ -251,7 +250,7 @@ const serializeBlankNode = (pointer, allowedTypesArr = []) => {
if (!isBlankNode(pointer)) return null

const allowedTypes = rdf.termSet(allowedTypesArr)
const types = pointer.out(ns.rdf.type).terms
const types = pointer.out(rdf.ns.rdf.type).terms
const type = types.find((term) => !allowedTypes.size || allowedTypes.has(term))

if (!type) return {}
Expand All @@ -264,7 +263,7 @@ const serializeBlankNode = (pointer, allowedTypesArr = []) => {
function serializeProperties (pointer) {
const properties = rdf.termSet([...pointer.dataset.match(pointer.term)]
.map(({ predicate }) => predicate)
.filter((term) => !term.equals(ns.rdf.type)))
.filter((term) => !term.equals(rdf.ns.rdf.type)))

return [...properties].reduce((acc, property) =>
({ ...acc, [shrink(property.value)]: serializeTerm(pointer.out(property)) }), {})
Expand Down
2 changes: 1 addition & 1 deletion packages/core/lib/sparql.js
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ export const generateClient = (sparqlEndpoint, options) => {
const query = async (query, options = {}) => {
const isAsk = options && options.ask
const isSelect = options && options.select
const headers = options && options.headers
const headers = (options && options.headers) || {}
const rewriteResponse = (options && options.rewriteResponse) || []

if (isAsk) {
Expand Down