Bundling Fourriere et al. 2016 to a DwC Archive

This is an R Markdown Notebook for converting the species checklist found in the following reference to DarwinCore format for upload into OBIS as part of UNESCO’s eDNA Expeditions project:

Fourriére M, Reyes-Bonilla H, Ayala-Bocos A, Ketchum JA, Chávez-Comparan JC. Checklist and analysis of completeness of the reef fish fauna of the Revillagigedo Archipelago, Mexico. Zootaxa. 2016 Aug 15;4150(4):436-66.

Setup

Call the necessary libraries and variables. Suppresses loading messages.

library(magrittr)                       # To use %<>% pipes
suppressMessages(library(janitor))      # To clean input data
suppressMessages(library(dplyr))        # To clean input data
library(stringr)                        # To clean input data
suppressMessages(library(rgnparser))    # To clean species names
suppressMessages(library(taxize))       # To get WoRMS IDs
library(worrms)                         # To get WoRMS IDs
library(digest)                         # To generate hashes
suppressMessages(library(obistools))    # To generate centroid lat/long and uncertainty
suppressMessages(library(sf))           # To generate wkt polygon
suppressMessages(library(EML))          # To create eml.xml file
library(xml2)                           # To create the meta.xml file
suppressMessages(library(zip))          # To zip DwC file

Input Parameters and Paths

path_to_project_root <- "../../.."
site_dir_name <- "archipielago_de_revillagigedo"
dataset_dir_name <- "Forriere_et_al_2016"
original_pdf <- "23431-Article_Text-64444-77378-10-20160815_5-25_rotated.pdf"
short_name <- "revillagigedo-forriere-2016"

Parsing PDF table to CSV

The data for this reference is formatted as an image-based table inside a PDF across multiple sheets. First, we use pdf_to_table to OCR and parse out the table to a CSV.

#conda environment
condaenv <- "mwhs-data-mobilization"

# Path to the Python script
script <- paste(path_to_project_root, "scripts_data/pdf_to_tables/pdf_to_table.py", sep="/")

# Input PDF file path
input_pdf <- paste(path_to_project_root, "datasets", site_dir_name, dataset_dir_name, "raw", original_pdf, sep="/")

# Output directory for OCR/table files
output_dir <- paste(path_to_project_root, "datasets", site_dir_name, dataset_dir_name, "processed", sep="/")

# Define page numbers and table areas (see documentation)
page_args <- c(
"-a 169.979,106.829,528.881,317.329 -p 1",
"-a 114.196,106.829,521.514,316.276 -p 2",
"-a 111.039,106.829,521.514,316.276 -p 3",
"-a 122.616,106.829,521.514,316.276 -p 4",
"-a 109.986,89.989,521.514,316.276 -p 5",
"-a 114.196,106.829,521.514,316.276 -p 6",
"-a 113.144,106.829,521.514,316.276 -p 7",
"-a 121.564,103.671,514.146,316.276 -p 8",
"-a 114.196,106.829,521.514,316.276 -p 9",
"-a 108.934,100.514,527.829,338.379 -p 10",
"-a 108.934,106.829,525.724,316.276 -p 11",
"-a 132.089,106.829,490.991,316.276 -p 12",
"-a 180.504,106.829,521.514,316.276 -p 13",
"-a 119.459,106.829,504.674,316.276 -p 14",
"-a 123.669,106.829,506.779,316.276 -p 15",
"-a 122.616,106.829,507.831,316.276 -p 16",
"-a 118.406,108.934,504.674,333.116 -p 17",
"-a 118.406,101.566,516.251,316.276 -p 18",
"-a 117.354,106.829,516.251,316.276 -p 19",
"-a 119.459,106.829,517.304,316.276 -p 20",
"-a 144.719,106.829,484.676,316.276 -p 21"

)

# Define run parameters (see documentation)
run_parameters <- "-s -c -ocr -# 8 -f -nh"

# Combine page arguments and execute
page_args_combined <- paste(page_args, collapse = " ")
command <- paste("conda run -n", condaenv, "python", script, "-i", input_pdf, run_parameters, page_args_combined, "-o", output_dir)
system(command, intern=TRUE)
##  [1] ""                                                                                                                                                                              
##  [2] "Script Execution Summary"                                                                                                                                                      
##  [3] "Date and Time: 2023-09-15 04:52:41"                                                                                                                                            
##  [4] "------------------------------"                                                                                                                                                
##  [5] ""                                                                                                                                                                              
##  [6] "PDF input: ../../../datasets/archipielago_de_revillagigedo/Forriere_et_al_2016/raw/23431-Article_Text-64444-77378-10-20160815_5-25_rotated.pdf"                                
##  [7] "Perform OCR: True"                                                                                                                                                             
##  [8] "Number of Cores: 8"                                                                                                                                                            
##  [9] "Perform Table Parsing: TRUE"                                                                                                                                                   
## [10] "Selected Areas:"                                                                                                                                                               
## [11] "  Area 1: [169.979, 106.829, 528.881, 317.329]"                                                                                                                                
## [12] "  Area 2: [114.196, 106.829, 521.514, 316.276]"                                                                                                                                
## [13] "  Area 3: [111.039, 106.829, 521.514, 316.276]"                                                                                                                                
## [14] "  Area 4: [122.616, 106.829, 521.514, 316.276]"                                                                                                                                
## [15] "  Area 5: [109.986, 89.989, 521.514, 316.276]"                                                                                                                                 
## [16] "  Area 6: [114.196, 106.829, 521.514, 316.276]"                                                                                                                                
## [17] "  Area 7: [113.144, 106.829, 521.514, 316.276]"                                                                                                                                
## [18] "  Area 8: [121.564, 103.671, 514.146, 316.276]"                                                                                                                                
## [19] "  Area 9: [114.196, 106.829, 521.514, 316.276]"                                                                                                                                
## [20] "  Area 10: [108.934, 100.514, 527.829, 338.379]"                                                                                                                               
## [21] "  Area 11: [108.934, 106.829, 525.724, 316.276]"                                                                                                                               
## [22] "  Area 12: [132.089, 106.829, 490.991, 316.276]"                                                                                                                               
## [23] "  Area 13: [180.504, 106.829, 521.514, 316.276]"                                                                                                                               
## [24] "  Area 14: [119.459, 106.829, 504.674, 316.276]"                                                                                                                               
## [25] "  Area 15: [123.669, 106.829, 506.779, 316.276]"                                                                                                                               
## [26] "  Area 16: [122.616, 106.829, 507.831, 316.276]"                                                                                                                               
## [27] "  Area 17: [118.406, 108.934, 504.674, 333.116]"                                                                                                                               
## [28] "  Area 18: [118.406, 101.566, 516.251, 316.276]"                                                                                                                               
## [29] "  Area 19: [117.354, 106.829, 516.251, 316.276]"                                                                                                                               
## [30] "  Area 20: [119.459, 106.829, 517.304, 316.276]"                                                                                                                               
## [31] "  Area 21: [144.719, 106.829, 484.676, 316.276]"                                                                                                                               
## [32] "Pages: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21"                                                                                              
## [33] "Concatenate: True"                                                                                                                                                             
## [34] "Concatenate across headers: True"                                                                                                                                              
## [35] "Stream Extraction: True"                                                                                                                                                       
## [36] "Lattice Extraction: False"                                                                                                                                                     
## [37] ""                                                                                                                                                                              
## [38] "OCRing PDF"                                                                                                                                                                    
## [39] "------------------------------"                                                                                                                                                
## [40] ""                                                                                                                                                                              
## [41] ""                                                                                                                                                                              
## [42] "Parsing Tables"                                                                                                                                                                
## [43] "------------------------------"                                                                                                                                                
## [44] ""                                                                                                                                                                              
## [45] ""                                                                                                                                                                              
## [46] "Saving to CSV"                                                                                                                                                                 
## [47] "CSV file: ../../../datasets/archipielago_de_revillagigedo/Forriere_et_al_2016/processed/23431-Article_Text-64444-77378-10-20160815_5-25_rotated_tables_parsed_concatenated.csv"
## [48] "------------------------------"                                                                                                                                                
## [49] ""                                                                                                                                                                              
## [50] ""                                                                                                                                                                              
## [51] "Run Details: ../../../datasets/archipielago_de_revillagigedo/Forriere_et_al_2016/processed/23431-Article_Text-64444-77378-10-20160815_5-25_rotated_parameters.txt"             
## [52] "Finished"                                                                                                                                                                      
## [53] ""

Read source data

Now we’ll read in the csv table outputted from the previous step

processed_csv <- "23431-Article_Text-64444-77378-10-20160815_5-25_rotated_tables_parsed_concatenated.csv"

input_data <- read.csv(paste(path_to_project_root, "datasets", site_dir_name, dataset_dir_name, "processed", processed_csv, sep="/"))

#to preview pretty table
knitr::kable(head(input_data))
X0 X1 X2 X3 X4
Phylum CHORDATA Unnamed: 1
Clase CHONDRICHTHYES
Order LAMNIFORMES
Family Lamnidae
Carcharodon carcharias (Linnaeus,1758)
Order CARCHARHINIFORMES

Preprocessing

Here we tidy the data up, since OCR and table parsing errors are common and only take the list of species, since this is a checklist.

Tidy Data

input_data %<>%
  remove_empty(c("rows", "cols")) %>%       # Remove empty rows and columns
  clean_names()

# Remove Classes, Families and Orders and take first column only
cleaned_data <- input_data %>%              
     mutate(across(everything(), ~ if_else(str_detect(.x, "Phylum|Family|Order|Clase"), "", .x))) %>%
     filter(x0 != "") %>%
     select(c(x0))

#to preview pretty table
knitr::kable(head(cleaned_data))
x0
Carcharodon carcharias (Linnaeus,1758)
Carcharhinus albimarginatus (Riippell, 1837)
Carcharhinus galapagensis (Snodgrass & Heller,
Carcharhinus leucas (Miller & Henle, 1839)
Carcharhinus limbatus (Miller & Henle, 1839)
Galeocerdo cuvier (Péron & Lesueur, 1822)

Get WoRMS IDs

Auto matching

First we will try to do this automatically by first cleaning the species names using gnparser and then using the taxise library to call the WoRMS database.

#Parse author names out
parsed_names <- rgnparser::gn_parse(cleaned_data[,])

#Function to get WoRMS IDs. Search for accepted names first and if not found, search for unaccepted. If still not found, use the worrms package to search.
get_worms_id_from_element <- function(element) {
  worms_id <- get_wormsid(element$canonical$full, searchtype="scientific", fuzzy=TRUE, messages = FALSE, accepted = TRUE)
  if (attr(worms_id, "match") == "not found") {
    worms_id <- get_wormsid(element$canonical$full, searchtype="scientific", messages = FALSE, fuzzy=TRUE)
    if (attr(worms_id, "match") == "not found") {
      worms_id <- NA
    }
  }
  return(worms_id)
}

#Call the function
worms_ids <- lapply(parsed_names, function(element) {
  if (element$parsed) {
    return(get_worms_id_from_element(element))
  } else {
    return(NA)
  }
})

#combine original names, parsed data and WoRMS ID into one data frame
combined_dataframe <- data.frame()

for (i in 1:nrow(cleaned_data)) {
  cleaned_value <- cleaned_data[i,]
  canonical_value <- parsed_names[[i]]$canonical$full
  worms_id_value <- worms_ids[[i]][1]
  if (is.null(canonical_value)){
    canonical_value <- NA
  }
  temp_row <- data.frame(CleanedData = cleaned_value, CanonicalFull = canonical_value, WormsIDs = worms_id_value)
  combined_dataframe <- rbind(combined_dataframe, temp_row)
}

knitr::kable(head(combined_dataframe))
CleanedData CanonicalFull WormsIDs
Carcharodon carcharias (Linnaeus,1758) Carcharodon carcharias 105838
Carcharhinus albimarginatus (Riippell, 1837) Carcharhinus albimarginatus 217352
Carcharhinus galapagensis (Snodgrass & Heller, Carcharhinus galapagensis 105790
Carcharhinus leucas (Miller & Henle, 1839) Carcharhinus leucas 105792
Carcharhinus limbatus (Miller & Henle, 1839) Carcharhinus limbatus 105793
Galeocerdo cuvier (Péron & Lesueur, 1822) Galeocerdo cuvier 105799

Human Verification

Sometimes there are misspellings in the original text or incorrect OCR that can be searched for and fixed by hand. To do this, view the combined dataframe, search for unmatched species in WoRMS and add the ID, and remove rows that were not autoremoved in the earlier cleaning steps

combined_dataframe[121, c("CanonicalFull", "identificationQualifier", "WormsIDs")] <- c("Pareques", "species A", 270286)
combined_dataframe[171, c("CanonicalFull", "identificationQualifier", "WormsIDs")] <- c("Xyrichtys", "species B", 126025)

Darwin Core mapping

Required Terms

OBIS currently has eight required DwC terms: scientificName, scientificNameID, occurrenceID, eventDate, decimalLongitude, decimalLatitude, occurrenceStatus, basisOfRecord.

scientificName/scientificNameID

Create a dataframe with unique taxa only (though this should already be unique). This will be our primary DarwinCore data frame.

#rename and restructure WoRMSIDs to OBIS requirements
occurrence <- combined_dataframe %>%
  distinct(CanonicalFull, identificationQualifier, WormsIDs) %>%
  rename(scientificName = CanonicalFull) %>%
  rename(scientificNameID = WormsIDs) %>%
  mutate(scientificNameID = ifelse(!is.na(scientificNameID), paste("urn:lsid:marinespecies.org:taxname:", scientificNameID, sep = ""), NA))

occurrenceID

OccurrenceID is an identifier for the occurrence record and should be persistent and globally unique. It is a combination of dataset-shortname:occurrence: and a hash based on the scientific name.

# Vectorize the digest function (The digest() function isn't vectorized. So if you pass in a vector, you get one value for the whole vector rather than a digest for each element of the vector):
vdigest <- Vectorize(digest)

# Generate taxonID:
occurrence %<>% mutate(occurrenceID = paste(short_name, "occurrence", vdigest (paste(scientificName, identificationQualifier), algo="md5"), sep=":"))

eventDate

This is NULL since this is technically a checklist and we do not know the collection date.

eventDate <- ""
occurrence %<>% mutate(eventDate)

decimalLongitude/decimalLatitude

Use obistools::calculate_centroid to calculate a centroid and radius for WKT strings. This is useful for populating decimalLongitude, decimalLatitude and coordinateUncertaintyInMeters. The WKT strings are from https://github.com/iobis/mwhs-shapes.

if (!file.exists(paste(path_to_project_root, "scripts_data/marine_world_heritage.gpkg", sep="/"))) {
  download.file("https://github.com/iobis/mwhs-shapes/blob/master/output/marine_world_heritage.gpkg?raw=true", paste(path_to_project_root, "scripts_data/marine_world_heritage.gpkg", sep="/"))
}

shapes <- st_read(paste(path_to_project_root, "scripts_data/marine_world_heritage.gpkg", sep="/"))
## Reading layer `marine_world_heritage' from data source 
##   `/mnt/c/Users/Chandra Earl/Desktop/Labs/UNESCO/mwhs-data-mobilization/scripts_data/marine_world_heritage.gpkg' 
##   using driver `GPKG'
## Simple feature collection with 60 features and 4 fields
## Geometry type: MULTIPOLYGON
## Dimension:     XY
## Bounding box:  xmin: -180 ymin: -55.32282 xmax: 180 ymax: 71.81381
## Geodetic CRS:  4326
#For some sites, the GeoPackage has core as well as buffer areas. Merge the geometries by site.
shapes_processed <- shapes %>%
  group_by(name) %>%
  summarize()

#Archipiélago de Revillagigedo
ind_shape <- shapes_processed$geom[which(shapes_processed$name == "Archipiélago de Revillagigedo")]


#convert shape to WKT
wkt <- st_as_text(ind_shape, digits = 6)

localities <- calculate_centroid(wkt)

occurrence %<>% mutate(decimalLatitude = localities$decimalLatitude)
occurrence %<>% mutate(decimalLongitude = localities$decimalLongitude)

occurrenceStatus

occurrenceStatus <- "present"
occurrence %<>% mutate(occurrenceStatus)

basisOfRecord

basisOfRecord <- "HumanObservation"
occurrence %<>% mutate(basisOfRecord)

Extra Terms

footprintWKT

occurrence %<>% mutate(footprintWKT = wkt)

coordinateUncertaintyInMeters

occurrence %<>% mutate(coordinateUncertaintyInMeters = localities$coordinateUncertaintyInMeters)

geodeticDatum

geodeticDatum <- "WGS84"
occurrence %<>% mutate(geodeticDatum)

country

country <- "Mexico"
occurrence %<>% mutate(country)

locality

locality <- "Archipiélago de Revillagigedo"
occurrence %<>% mutate(locality)

Post-processing

Check data

Use the check_fields command from obistools to check if all OBIS required fields are present in an occurrence table and if any values are missing.

#Reorganize columns
occurrence = occurrence %>% select(occurrenceID, scientificName, identificationQualifier,scientificNameID, eventDate, country, locality, decimalLatitude, decimalLongitude, coordinateUncertaintyInMeters, footprintWKT, geodeticDatum, occurrenceStatus, basisOfRecord)

#Check fields
check_fields(occurrence)
## Warning: `data_frame()` was deprecated in tibble 1.1.0.
## ℹ Please use `tibble()` instead.
## ℹ The deprecated feature was likely used in the obistools package.
##   Please report the issue to the authors.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## # A tibble: 390 × 4
##    level field       row message                                 
##    <chr> <chr>     <int> <chr>                                   
##  1 error eventDate     1 Empty value for required field eventDate
##  2 error eventDate     2 Empty value for required field eventDate
##  3 error eventDate     3 Empty value for required field eventDate
##  4 error eventDate     4 Empty value for required field eventDate
##  5 error eventDate     5 Empty value for required field eventDate
##  6 error eventDate     6 Empty value for required field eventDate
##  7 error eventDate     7 Empty value for required field eventDate
##  8 error eventDate     8 Empty value for required field eventDate
##  9 error eventDate     9 Empty value for required field eventDate
## 10 error eventDate    10 Empty value for required field eventDate
## # ℹ 380 more rows

Create the EML file

This is a file which contains the dataset’s metadata and is required in a DarwinCore-Archive.

emld::eml_version("eml-2.1.1")
## [1] "eml-2.1.1"
#Title
title <- "Checklist and analysis of completeness of the reef fish fauna of the Revillagigedo Archipelago, Mexico: Fishes Checklist"

#AlternateIdentifier
alternateIdentifier <- paste("https://ipt.obis.org/secretariat/resource?r=", short_name, sep="")

#Abstract
abstract <- eml$abstract(
  para = "This paper presents an updated checklist of cartilaginous and bony fishes from the Revillagigedo Archipelago reefs and nearby areas (Tropical Eastern Pacific). To compile this list, we gathered data from field surveys between 1994 and 2015, from an exhaustive literature review, and by consulting museum collections and databases. With these records we estimated the completeness of the local fish inventory using four non-parametric rarefaction methods. We report a total of 389 species in 102 families; 235 of these are reef fish that occur in the Eastern but also in the Central Pacific, and 13 species were identified as endemic to the archipelago. A non-parametric statistical model predicts that the expected number of reef fish present at Revillagigedo should be 244.3 ± 3.2 species, which is 9 species more than the observed richness, and this difference was statistically significant (p = 0.02). That predictive model estimates that about 96% of the total richness of reef fish from the archipelago is known. Comparisons of the completeness of the inventory at Revillagigedo to that reported for the fish fauna of the Eastern Pacific and worldwide, showed that the quality of the sampling effort is remarkably high, in spite of the geographic isolation of the archipelago."
)

People

Here we add the people involved in the project:

The creator is the person or organization responsible for creating the resource itself.

The contact is the person or institution to contact with questions about the use, interpretation of a data set.

The metadataProvider is the person responsible for providing the metadata documentation for the resource.

The associatedParty (in this case the Data Curator) is the person who mobilized the data from the original resource.

creator <- list(eml$creator(
    individualName = eml$individualName(
      givenName = "Manon", 
      surName = "Fourriére"),
    organizationName = "Universidad Autónoma de Baja California Sur"
  ), eml$creator(
    individualName = eml$individualName(
      givenName = "HÉctor", 
      surName = "Reyes-Bonilla"),
    organizationName = "Universidad Autónoma de Baja California Sur"
  ), eml$creator(
    individualName = eml$individualName(
      givenName = "Arturo", 
      surName = "Ayala-Bocos"),
    organizationName = "Ecosistemas y Conservación: Proazul Terrestre"
  ), eml$creator(
    individualName = eml$individualName(
      givenName = "James", 
      surName = "Ketchum"),
    organizationName = "Pelagios-Kakunjá"
  ), eml$creator(
    individualName = eml$individualName(
      givenName = "Juan Carlos", 
      surName = "Chávez-Comparan"),
    organizationName = "Universidad de Colima"
  )
)


contact <- eml$creator(
  individualName = eml$individualName(
    givenName = "OBIS", 
    surName = "Secretariat"),
  electronicMailAddress = "helpdesk@obis.org",
  organizationName = "OBIS",
  positionName = "Secretariat"
)

metadataProvider <- eml$metadataProvider(
  individualName = eml$individualName(
    givenName = "Chandra", 
    surName = "Earl"),
  electronicMailAddress = "c.earl@unesco.org",
  organizationName = "UNESCO",
  positionName = "eDNA Scientific Officer"
)

associatedParty <- eml$associatedParty(
  role = "processor",
  individualName = eml$individualName(
    givenName = "Chandra", 
    surName = "Earl"),
  electronicMailAddress = "c.earl@unesco.org",
  organizationName = "UNESCO",
  positionName = "eDNA Scientific Officer"
)

Additional Metadata

Here we add the additionalMetadata element, which is required for a GBIF-type EML file and contains information such as the citation of the dataset, the citation of the original resource and the creation timestamp of the EML.

#{dataset.authors} ({dataset.pubDate}) {dataset.title}. [Version {dataset.version}]. {organization.title}. {dataset.type} Dataset {dataset.doi}, {dataset.url}

additionalMetadata <- eml$additionalMetadata(
  metadata = list(
    gbif = list(
      dateStamp = paste0(format(Sys.time(), "%Y-%m-%dT%H:%M:%OS3"), paste0(substr(format(Sys.time(), "%z"), 1, 3), ":", paste0(substr(format(Sys.time(), "%z"), 4, 5)))),
      hierarchyLevel = "dataset",
      citation = "IPT will autogenerate this",
      bibliography = list(
        citation = "Fourriére M, Reyes-Bonilla H, Ayala-Bocos A, Ketchum JA, Chávez-Comparan JC. Checklist and analysis of completeness of the reef fish fauna of the Revillagigedo Archipelago, Mexico. Zootaxa. 2016 Aug 15;4150(4):436-66.")
    )
  )
)

citationdoi <- "https://doi.org/10.11646/zootaxa.4150.4.4"

Coverage

Here we describe the dataset’s geographic, taxonomic and temporal coverage.

#Coverage
coverage <- eml$coverage(
  geographicCoverage = eml$geographicCoverage(
    geographicDescription = "Archipiélago de Revillagigedo",
    boundingCoordinates = eml$boundingCoordinates(
      westBoundingCoordinate = st_bbox(ind_shape)$xmax,
      eastBoundingCoordinate = st_bbox(ind_shape)$xmin,
      northBoundingCoordinate = st_bbox(ind_shape)$ymax,
      southBoundingCoordinate = st_bbox(ind_shape)$ymin)
    ),
  taxonomicCoverage = eml$taxonomicCoverage(
    generalTaxonomicCoverage = "Fishes",
    taxonomicClassification = list(
      eml$taxonomicClassification(
        taxonRankName = "Superclass",
        taxonRankValue = "Agnatha"),
      eml$taxonomicClassification(
        taxonRankName = "unranked",
        taxonRankValue = "Chondrichthyes"),
      eml$taxonomicClassification(
        taxonRankName = "unranked",
        taxonRankValue = "Osteichthyes")
      )
    
#  ),
#  temporalCoverage = eml$temporalCoverage(
#    rangeOfDates = eml$rangeOfDates(
#      beginDate = eml$beginDate(
#        calendarDate = "2019-05-01"
#      ),
#      endDate = eml$endDate(
#        calendarDate = "2016-05-06"
#      )
#    )
   )
)

Extra MetaData

These fields are not required, though they make the metadata more complete.

methods <- eml$methods(
  methodStep = eml$methodStep(
    description = eml$description(
      para = paste("See Github <a href=\"https://github.com/iobis/mwhs-data-mobilization\">Project</a> and <a href=\"https://iobis.github.io/mwhs-data-mobilization/notebooks/", site_dir_name, "/", dataset_dir_name, "\"> R Notebook</a> for dataset construction methods", sep="")
    )
  )
)

#Other Data
pubDate <- "2023-10-15"

#language of original document
language <- "eng"

keywordSet <- eml$keywordSet(
  keyword = "Occurrence",
  keywordThesaurus = "GBIF Dataset Type Vocabulary: http://rs.gbif.org/vocabulary/gbif/dataset_type_2015-07-10.xml"
)

maintenance <- eml$maintenance(
  description = eml$description(
    para = ""),
  maintenanceUpdateFrequency = "notPlanned"
)

#Universal CC
intellectualRights <- eml$intellectualRights(
  para = "To the extent possible under law, the publisher has waived all rights to these data and has dedicated them to the <ulink url=\"http://creativecommons.org/publicdomain/zero/1.0/legalcode\"><citetitle>Public Domain (CC0 1.0)</citetitle></ulink>. Users may copy, modify, distribute and use the work, including for commercial purposes, without restriction."
)


purpose <- eml$purpose(
  para = "These data were made accessible through UNESCO's eDNA Expeditions project to mobilize available marine species and occurrence datasets from World Heritage Sites."
)

additionalInfo <- eml$additionalInfo(
  para = "marine, harvested by iOBIS"
)

Create and Validate EML

#Put it all together
my_eml <- eml$eml(
           packageId = paste("https://ipt.obis.org/secretariat/resource?id=", short_name, "/v1.0", sep = ""),  
           system = "http://gbif.org",
           scope = "system",
           dataset = eml$dataset(
               alternateIdentifier = alternateIdentifier,
               title = title,
               creator = creator,
               metadataProvider = metadataProvider,
               associatedParty = associatedParty,
               pubDate = pubDate,
               coverage = coverage,
               language = language,
               abstract = abstract,
               keywordSet = keywordSet,
               contact = contact,
               methods = methods,
               intellectualRights = intellectualRights,
               purpose = purpose,
               maintenance = maintenance,
               additionalInfo = additionalInfo),
           additionalMetadata = additionalMetadata
)

eml_validate(my_eml)
## [1] TRUE
## attr(,"errors")
## character(0)

Create meta.xml file

This is a file which describes the archive and data file structure and is required in a DarwinCore-Archive. It is based on the template file “meta_occurrence_checklist_template.xml”

meta_template <- paste(path_to_project_root, "scripts_data/meta_occurrence_checklist_template.xml", sep="/")
meta <- read_xml(meta_template)

fields <- xml_find_all(meta, "//d1:field")

for (field in fields) {
  term <- xml_attr(field, "term")
  if (term == "http://rs.tdwg.org/dwc/terms/eventDate") {
    xml_set_attr(field, "default", eventDate)
  } else if (term == "http://rs.tdwg.org/dwc/terms/country") {
    xml_set_attr(field, "default", country)
  } else if (term == "http://rs.tdwg.org/dwc/terms/locality") {
    xml_set_attr(field, "default", locality)
  } else if (term == "http://rs.tdwg.org/dwc/terms/decimalLatitude") {
    xml_set_attr(field, "default", localities$decimalLatitude)
  } else if (term == "http://rs.tdwg.org/dwc/terms/decimalLongitude") {
    xml_set_attr(field, "default", localities$decimalLongitude)
  } else if (term == "http://rs.tdwg.org/dwc/terms/coordinateUncertaintyInMeters") {
    xml_set_attr(field, "default", localities$coordinateUncertaintyInMeters)
  } else if (term == "http://rs.tdwg.org/dwc/terms/footprintWKT") {
    xml_set_attr(field, "default", wkt)
  } else if (term == "http://rs.tdwg.org/dwc/terms/geodeticDatum") {
    xml_set_attr(field, "default", geodeticDatum)
  } else if (term == "http://rs.tdwg.org/dwc/terms/occurrenceStatus") {
    xml_set_attr(field, "default", occurrenceStatus)
  } else if (term == "http://rs.tdwg.org/dwc/terms/basisOfRecord") {
    xml_set_attr(field, "default", basisOfRecord)
  }
}

#Add identificationQualifier
new_field <- xml_add_sibling(fields[[3]], "field")
xml_set_attr(new_field, "index", "3")
xml_set_attr(new_field, "term", "http://rs.tdwg.org/dwc/terms/identificationQualifier")

fields <- append(fields, list(new_field))

Save outputs

dwc_output_dir <- paste(path_to_project_root, "output", site_dir_name, dataset_dir_name, sep="/")

write.csv(occurrence, paste(dwc_output_dir, "/occurrence.csv", sep = ""), na = "", row.names=FALSE)
write_xml(meta, file = paste(dwc_output_dir, "/meta.xml", sep = ""))
write_eml(my_eml, paste(dwc_output_dir, "/eml.xml", sep = ""))

Edit EML

We have to further edit the eml file to conform to GBIF-specific requirements that cannot be included in the original EML construction. This includes changing the schemaLocation and rearranging the GBIF element, since the construction automatically arranges the children nodes to alphabetical order.

#edit the schemaLocation and rearrange gbif node for gbif specific eml file
eml_content <- read_xml(paste(dwc_output_dir, "/eml.xml", sep = ""))

#change schemaLocation attributes for GBIF
root_node <- xml_root(eml_content)
xml_set_attr(root_node, "xsi:schemaLocation", "https://eml.ecoinformatics.org/eml-2.1.1 http://rs.gbif.org/schema/eml-gbif-profile/1.2/eml.xsd")
xml_set_attr(root_node, "xmlns:dc", "http://purl.org/dc/terms/")
xml_set_attr(root_node, "xmlns:stmml", NULL)
xml_set_attr(root_node, "xml:lang", "eng")


#rearrange children nodes under the GBIF element
hierarchyLevel <- eml_content %>% xml_find_all(".//hierarchyLevel")
dateStamp <- eml_content %>% xml_find_all(".//dateStamp")
citation <- eml_content %>% xml_find_all("./additionalMetadata/metadata/gbif/citation")
bibcitation <- eml_content %>% xml_find_all("./additionalMetadata/metadata/gbif/bibliography/citation")
xml_set_attr(bibcitation, "identifier", citationdoi)

eml_content %>% xml_find_all(".//hierarchyLevel") %>% xml_remove()
eml_content %>% xml_find_all(".//dateStamp") %>% xml_remove()
eml_content %>% xml_find_all("./additionalMetadata/metadata/gbif/citation") %>% xml_remove()
eml_content %>% xml_find_all(".//gbif") %>% xml_add_child(citation, .where=0)
eml_content %>% xml_find_all(".//gbif") %>% xml_add_child(hierarchyLevel, .where=0)
eml_content %>% xml_find_all(".//gbif") %>% xml_add_child(dateStamp, .where=0)

write_xml(eml_content, paste(dwc_output_dir, "/eml.xml", sep = ""))

Zip files to DwC-A

output_zip <- paste(dwc_output_dir, "DwC-A.zip", sep="/")

if (file.exists(output_zip)) {
  unlink(output_zip)
}

file_paths <- list.files(dwc_output_dir, full.names = TRUE)
zip(zipfile = output_zip, files = file_paths, mode = "cherry-pick")

if (file.exists(output_zip)) {
  unlink(file_paths)
}