Dataset Viewer
Auto-converted to Parquet Duplicate
week_year
string
geohash
string
hierarchical_codes
list
2023_36
dp3
[ "K3P49C4O18F1764G50255", "K3P49C4O13F1641G279008", "K3P49C4O187F3208G231581", "K3P10C10O911F10382G91368", "K2P32C1O261F2233G63551", "K2P32C1O56F2050G84852", "K3P49C4O11F1789G171013", "K2P32C1O6F1659G110282", "K3P10C12O263F3459G100325", "K3P49C4O18F4903G171783", "K3P49C4O11F1926G284691", "K3P49...
2023_34
drg
[ "K3P49C4O187F3208G231581", "K2P32C1O56F2050G84852", "K3P49C4O11F1789G171013", "K2P32C25O50F1879G49333", "K3P49C4O4F2176G37835", "K2P44C172O1142F12160G98917", "K3P49C4O18F1783G327606", "K3P49C4O11F1926G284691", "K3P49C4O5F1699G108723", "K1P16C3O3F2862G20740", "K3P10C10O462F5162G190340", "K2P32C...
2023_37
9r0
[ "K3P49C4O11F3384G102226", "K3P10C12O237F2117G20472", "K3P10C37O0F2695G62546", "K3P49C4O4F3753G26029", "K3P10C0O29F3558G129731", "K3P10C37O0F2695G62912", "K2P32C1O56F2050G84852", "K3P49C4O13F1642G15039", "K1P16C3O283F2359G18815", "K1P16C3O16F1714G138426", "K3P10C37O0F2695G62783", "K3P49C4O11F19...
2023_37
drs
[ "K2P32C1O6F1659G18778", "K1P16C3O150F2387G19011", "K3P49C4O13F2370G209312", "K2P32C1O56F2050G84852", "K3P49C4O11F1789G171013", "K2P32C1O47F1685G23148", "K2P32C25O50F1879G49333", "K3P49C14O87F3890G212723", "K3P49C4O13F1644G136049", "K3P49C4O5F1699G108723", "K2P32C8O85F1746G97021", "K2P32C1O594F...
2023_37
dp0
[ "K2P32C1O6F1659G84261", "K2P32C1O65F1712G80938", "K3P10C37O0F2695G62912", "K3P49C4O11F5659G286549", "K3P10C10O259F10121G97285", "K2P32C1O56F2050G84852", "K3P10C10O259F10121G258394", "K2P32C1O218F2075G128254", "K3P10C10O19F1640G97249", "K3P49C4O4F1624G15363", "K3P49C4O5F1699G108723", "K3P49C4O5...
2023_37
9q5
[ "K2P32C1O6F1659G18778", "K3P49C4O4F2696G28102", "K3P10C10O911F10382G91368", "K3P10C10O259F10043G174679", "K3P49C4O11F5659G286549", "K2P32C1O56F2050G84852", "K2P32C1O52F1696G128159", "K3P10C12O263F3459G100325", "K2P32C1O6F1659G110838", "K2P32C1O6F1659G15015", "K3P49C4O4F2237G25514", "K3P10C37O0...
2023_37
9pz
[ "K3P49C4O11F2310G287576", "K2P32C1O1F1621G82450", "K3P49C4O165F16625G175598", "K3P49C4O4F2696G28489", "K1P16C3O16F1714G138426", "K6P56C75O767F14295G34634", "K3P49C6O10F4487G293743", "K3P49C4O18F4903G88869", "K2P32C1O6F1659G109174", "K3P36C125O319F3023G21374", "K3P49C4O11F19449G181956", "K2P32C...
2024_17
9vg
[ "K3P49C4O5F3204G265839", "K3P49C4O187F3208G231581", "K3P10C10O911F10382G91368", "K3P49C4O4F1776G22753", "K3P49C4O11F3384G287109", "K3P10C10O259F10043G174679", "K2P32C1O261F2233G63551", "K2P32C1O261F2233G12721", "K3P10C37O0F2082G58585", "K3P49C4O165F16625G175598", "K2P32C1O56F2050G84852", "K3P4...
2012_20
9qb
[ "K2P32C1O21F1645G63280", "K3P10C12O263F3172G11615", "K3P10C10O817F10286G338259", "K2P32C1O1F1996G218113", "K3P10C37O0F2695G62912", "K2P32C1O1F1621G82450", "K3P10C10O259F10121G97285", "K2P32C1O52F1696G128159", "K2P32C1O218F2075G128254", "K2P32C1O127F1832G183092", "K2P32C1O151F3002G61018", "K2P3...
2012_39
dpm
[ "K2P32C8O85F1746G108557", "K3P10C37O0F2695G62795", "K2P32C1O6F1659G110714", "K3P49C4O4F3753G26029", "K2P32C1O1F1996G94004", "K1P16C3O3F4528G90708", "K3P49C4O18F12395G101908", "K3P49C14O87F2191G213812", "K2P32C25O50F1879G49333", "K3P49C4O13F1644G223615", "K3P49C14O87F3897G214133", "K8P108C208O1...
2013_24
9q9
[ "K2P32C1O1F1996G49574", "K3P10C10O259F10043G174679", "K3P10C10O259F10121G97285", "K2P32C1O56F2050G84852", "K2P32C1O52F1696G128159", "K2P32C1O218F2075G128254", "K2P32C1O151F3002G61018", "K3P10C10O96F3892G104045", "K3P49C4O13F1644G136049", "K3P10C10O462F5162G190340", "K3P10C10O96F3313G22571", "K...
2024_17
9vf
[ "K3P49C4O187F3208G231581", "K3P10C10O911F10382G91368", "K3P49C4O4F1776G22753", "K2P32C1O261F2233G63551", "K3P49C4O11F5659G286549", "K3P10C37O0F2082G58585", "K2P32C1O261F2233G12721", "K2P32C1O56F2050G84852", "K3P49C4O165F16625G175598", "K3P49C4O18F1698G203570", "K3P10C12O263F3459G100325", "K3P4...
2024_17
dp3
[ "K2P32C1O261F2233G63551", "K3P10C10O259F10043G174679", "K2P32C1O56F2050G84852", "K2P32C1O6F1659G110282", "K3P10C12O263F3459G100325", "K2P32C1O1F1621G82321", "K2P32C1O6F1659G111351", "K3P49C4O18F4903G171783", "K2P44C30O1373F11681G86077", "K3P49C4O11F1926G284691", "K3P49C4O5F1699G108723", "K1P16...
2013_38
9qf
[ "K2P32C68O157F2896G96486", "K3P10C10O911F10380G78239", "K2P32C1O69F1775G15261", "K3P10C66O154F6553G223703", "K3P10C10O462F5162G128901", "K3P10C0O1024F14936G220749", "K3P49C4O13F1642G92325", "K3P10C66O154F2784G243298" ]
2024_17
9mu
[ "K2P32C1O21F1645G63280", "K3P10C10O911F10382G91368", "K2P32C1O261F2233G63551", "K3P50C60O476F13322G112716", "K3P49C4O11F5659G286549", "K3P10C10O259F10043G174679", "K2P32C1O261F2233G12721", "K2P32C1O56F2050G84852", "K3P10C0O9F13031G278298", "K2P32C1O17F1638G189816", "K3P10C12O263F3459G100325", ...
2013_46
9vs
[ "K2P32C68O157F2896G231755", "K3P10C10O817F5482G125465", "K2P32C1O68F3698G25261", "K2P32C1O68F1716G40480", "K2P32C1O65F1712G120511", "K2P32C1O68F1716G20508", "K3P10C10O129F1835G131821", "K2P32C1O82F11029G33988", "K2P32C1O594F4862G144460", "K3P10C10O911F10382G169737", "K2P32C1O6F1659G109174", "K...
2024_17
c22
[ "K2P32C1O6F1659G18778", "K3P36C9O25F2214G230522", "K2P32C25O50F1879G49333", "K3P10C0O1014F17078G226769", "K2P32C1O1F1621G82321", "K3P49C4O18F11952G191500", "K2P32C1O6F1659G111351", "K3P49C4O178F3840G148871", "K8P108C208O1690F14336G354099", "K2P32C8O255F2694G106234", "K2P32C1O594F4992G164588", ...
2007_38
9tx
[ "K3P10C66O154F2784G243298" ]
2014_11
9q4
[ "K2P32C1O314F2688G180987", "K2P32C1O61F2889G20800", "K2P32C1O47F1903G15263", "K3P49C14O87F2191G213841", "K3P49C4O13F1802G115536", "K3P49C4O13F1644G223615", "K2P32C1O68F1716G20508", "K2P32C1O151F3002G61018", "K3P10C12O78F3792G110256", "K3P10C12O263F2720G21378", "K3P10C10O462F5162G190340", "K3P1...
2014_16
9qb
[ "K3P10C10O96F3116G87084", "K2P32C1O21F1645G63280", "K2P32C1O1F1996G49574", "K3P10C10O817F10286G338259", "K3P10C37O0F2695G62912", "K2P32C1O1F1621G82450", "K2P32C25O50F1879G49333", "K2P32C1O1F1621G82321", "K2P32C1O65F1712G120511", "K2P32C1O6F1659G111351", "K3P49C14O28F9989G84282", "K2P32C1O151F3...
End of preview. Expand in Data Studio

Dataset Card for gbif-co-occurrence

Dataset Summary

  • Motivation: To create a spatiotemporally binned co-occurrence matrix of biological taxa based on iNaturalist observation data (accessed through GBIF) suitable for machine learning tasks like predicting species distributions or analyzing community patterns.

  • Source: GBIF.org (Global Biodiversity Information Facility) occurrence data, specifically from the dataset with download DOI: https://doi.org/10.15468/dl.wn7arn

  • Processing: The raw observation data was processed through several steps:

    • Loading data stream from the provided URI.
    • Calculating geohash for spatial binning based on latitude and longitude.
    • Calculating week-year for temporal binning based on the event date.
    • Mapping taxonKey to a standardized hierarchical_code string based on taxonomic ranks (Kingdom, Phylum, Class, Order, Family, Genus) derived from a separate taxonomy dataset (taxonomy_dict.csv).
    • Aggregating observations by week-year and geohash bins to identify the unique hierarchical_codes present in each bin, forming the basis of the co-occurrence data.
    • The resulting co-occurrence data was saved as a dictionary of sets and then converted into a Hugging Face datasets object.
    • An intermediate column hierarchical_code_indices was created (mapping code strings to integer indices based on a vocabulary) but has since been removed from the final dataset for simplicity.

Supported Tasks and Leaderboards

  • Supported Tasks: This dataset is primarily designed for machine learning tasks related to species co-occurrence analysis, species distribution modeling, and community ecology pattern analysis. Potential tasks include:
    • Predicting the presence/absence of certain taxa based on the presence of others in a geo-temporal bin.
    • Clustering spatiotemporal bins based on their taxonomic composition.
    • Analyzing changes in community composition over time and space.
  • Leaderboards: Currently, there are no specific leaderboards associated with this dataset. Users are encouraged to use it for research and potentially establish benchmarks.

Dataset Structure

Data Fields

The dataset (cooccurrence_dataset) is a Hugging Face Dataset with the following columns:

  • week_year (string): A string representing the week and year of the observation bin (e.g., "2023_36").
  • geohash (string): A string representing the geohash of the observation bin (e.g., "dp3"). The precision of the geohash was set to 3 during processing.
  • hierarchical_codes (list of strings): A list of unique hierarchical taxonomic code strings observed within that specific week_year and geohash bin.

Hierarchical Code Scheme

The hierarchical_codes are structured strings that represent the taxonomic path of an observation from Kingdom down to Genus. This scheme was developed to provide a standardized, concise, and comparable representation of taxonomic identity for each observation, derived from the original GBIF taxonomic classification.

Each code string is constructed by concatenating prefixes followed by numerical identifiers for each taxonomic rank present, in the order: K (Kingdom), P (Phylum), C (Class), O (Order), F (Family), G (Genus).

The numerical identifier following each prefix corresponds to a unique integer assigned to each distinct taxonomic name within that specific rank found across the dataset's taxonomy. This mapping from taxonomic name to numerical identifier was generated based on the unique entries in the taxonomy_dict.csv file.

For example, a code like K3P49C4O13F1641G280194 indicates:

  • K3: Corresponds to the 3rd unique Kingdom name in the vocabulary for the Kingdom rank (e.g., 'Animalia').
  • P49: Corresponds to the 49th unique Phylum name (e.g., 'Arthropoda').
  • C4: Corresponds to the 4th unique Class name (e.g., 'Insecta').
  • ...and so on for Order, Family, and Genus.

A code like K2P32C1O0F0G0 would represent an observation classified only to the Class level within that Kingdom and Phylum, with Order, Family, and Genus information missing.

To translate these full hierarchical code strings back into human-readable taxonomic rank names, please refer to the hierarchical_code_to_ranks.json file provided in the repository.

Accompanying Files

In addition to the main dataset, the repository includes the following important files:

  • vocabulary.json: A JSON file containing an ordered list of all unique hierarchical taxonomic code strings found across the entire dataset. This list serves as the vocabulary for the hierarchical codes. While the hierarchical_code_indices column has been removed from the dataset, this vocabulary can still be useful if users reconstruct an index-based representation or need the complete list of unique codes.
  • hierarchical_code_to_ranks.json: A JSON file containing a dictionary that maps each unique hierarchical taxonomic code string (e.g., "K3P49C4O13F1641G280194") to its corresponding taxonomic ranks (Kingdom, Phylum, Class, Order, Family, Genus). This file is essential for interpreting the hierarchical code strings found in the dataset.

Dataset Creation

Source Data

The dataset is derived from a specific GBIF occurrence data download. DOI: https://doi.org/10.15468/dl.wn7arn Creation Date: 14:16:47 14 August 2025 Records included: 52026569 records from 1 published datasets Compressed data size: 8.2 GB Download format: simple tab-separated values (TSV) Filter used:

{ "and" : [ "BasisOfRecord is Human Observation", "Country is United States of America", "DatasetKey is iNaturalist Research-grade Observations", "OccurrenceStatus is Present" ] }

Data Processing

The dataset was created by processing the raw GBIF occurrence stream. Key steps involved:

  1. Loading data using the datasets library in streaming mode.
  2. Calculating spatiotemporal bins (week_year and geohash) for each observation.
  3. Mapping taxon IDs to standardized hierarchical code strings using a separate taxonomy source.
  4. Aggregating observations into spatiotemporal bins and collecting the unique hierarchical codes present in each bin.
  5. Structuring the aggregated data into a Hugging Face Dataset.

(More detailed steps or code snippets could be added here if desired)

Code Interpretation

The hierarchical_codes column contains structured strings representing the taxonomic hierarchy. To translate these codes into human-readable taxonomic names, use the hierarchical_code_to_ranks.json file.

Loading and Using the Taxonomic Ranks Mapping

You can load and use the hierarchical_code_to_ranks.json file with the huggingface_hub library and Python's json module:

try: from datasets import load_dataset

repo_id = "gbif-co-occurrence" # Replace with your actual repo ID
cooccurrence_dataset = load_dataset(repo_id, split='train') # Assuming the data is in the 'train' split
print("Dataset loaded successfully.")
print(cooccurrence_dataset)

except Exception as e: print(f"An error occurred while loading the dataset: {e}") print(f"Please ensure the repository ID '{repo_id}' is correct.")

Usage and Splitting Strategies

The cooccurrence_dataset provides the core spatiotemporal co-occurrence data, with each entry representing a unique geo-temporal bin and the list of hierarchical taxonomic codes observed within it.

For machine learning tasks, you will typically need to convert the list of hierarchical_codes into a numerical representation, such as a sparse matrix or tensor. You can use the hierarchical_code_to_ranks.json file to interpret the meaning of these codes.

Load the Dataset

You can load the dataset using the datasets library:

try: import json from huggingface_hub import hf_hub_download from scipy.sparse import csr_matrix import os

repo_id = "gbif-co-occurrence" # Replace with your actual repo ID
vocabulary_filename = "vocabulary.json"
# Download and load the vocabulary file
vocabulary_path = hf_hub_download(repo_id=repo_id, filename=vocabulary_filename, repo_type="dataset")
with open(vocabulary_path, 'r', encoding='utf-8') as f:
    vocabulary = json.load(f)

print(f"Successfully loaded vocabulary from {vocabulary_path}. Size: {len(vocabulary)}")

# Assume cooccurrence_dataset is already loaded

# Prepare data for sparse matrix construction
data = []
row_indices = []
col_indices = []

# Create a mapping from hierarchical code string to its index in the vocabulary
code_to_index = {code: index for index, code in enumerate(vocabulary)}

print("Preparing data for sparse matrix creation...")
for row_idx, record in enumerate(cooccurrence_dataset):
    codes_list = record.get('hierarchical_codes', [])
    for code in codes_list:
        # Get the index from the code_to_index mapping
        code_index = code_to_index.get(code)
        if code_index is not None: # Ensure the code is in our vocabulary
            data.append(1) # Indicate presence
            row_indices.append(row_idx)
            col_indices.append(code_index)

# Determine dimensions
num_rows = len(cooccurrence_dataset)
num_cols = len(vocabulary)

# Create the CSR matrix
cooccurrence_sparse_matrix = csr_matrix((data, (row_indices, col_indices)), shape=(num_rows, num_cols))

print("\nCSR sparse matrix created successfully.")
print(f"Shape of the sparse matrix: {cooccurrence_sparse_matrix.shape}")
# Note: This matrix can now be used as input features for ML models

except Exception as e: print(f"An error occurred while creating the sparse matrix: {e}")

Split the Dataset

from sklearn.model_selection import train_test_split

'''Assuming cooccurrence_dataset is already loaded and you have prepared your features (X) and labels (y) For example, if your features are the sparse matrix:''' X = cooccurrence_sparse_matrix

'''And your labels might be derived from the data or an external source (e.g., environmental variables) For this example, let's assume a dummy label array''' import numpy as np y = np.random.randint(0, 2, size=len(cooccurrence_dataset)) # Dummy binary labels

'''Example random split (80% train, 10% validation, 10% test) First split: train_val vs test''' X_train_val, X_test, y_train_val, y_test = train_test_split( X, y, test_size=0.1, random_state=42 )

'''Second split: train vs validation''' X_train, X_val, y_train, y_val = train_test_split( X_train_val, y_train_val, test_size=0.1 / (1 - 0.1), random_state=42 # Adjust test_size for the second split )

print(f"Original dataset size: {len(cooccurrence_dataset)}") print(f"Train set size: {X_train.shape[0]}") print(f"Validation set size: {X_val.shape[0]}") print(f"Test set size: {X_test.shape[0]}")

Additional Information

Dataset Curators

Licensing Information

Citation Information

If you use this dataset, please cite both the processed dataset (this Hugging Face dataset) and the original source data from GBIF.

To cite this dataset (the processed co-occurrence data):

https://huggingface.co/datasets/nppiech/gbif-co-occurrence, accessed (date)

To cite the original GBIF source data:

GBIF.org (14 August 2025) GBIF Occurrence Download https://doi.org/10.15468/dl.wn7arn

Contributions

Downloads last month
19