| | |
| | """using_dataset_hugginface.ipynb |
| | |
| | Automatically generated by Colaboratory. |
| | |
| | Original file is located at |
| | https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT |
| | """ |
| |
|
| | """**Hugginface loggin for push on Hub**""" |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import os |
| | import time |
| | import math |
| | from huggingface_hub import login |
| | from datasets import load_dataset, concatenate_datasets |
| | from functools import reduce |
| | from pathlib import Path |
| | import pandas as pd |
| | import pathlib |
| | |
| | from transformers import AutoTokenizer, AutoModelForCausalLM |
| |
|
| | HF_TOKEN = '' |
| | DATASET_TO_LOAD = 'bigbio/distemist' |
| | DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm' |
| | DATASET_SOURCE_ID = '11' |
| | BASE_DIR = "BARR2" + os.sep + "txt" |
| |
|
| | |
| | login(token = HF_TOKEN) |
| |
|
| | dataset_CODING = load_dataset(DATASET_TO_LOAD) |
| | royalListOfCode = {} |
| | issues_path = 'dataset' |
| | tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium") |
| |
|
| | |
| | path = Path(__file__).parent.absolute() |
| | MAIN_FILE_ADRESS = str(path) + os.sep + BASE_DIR |
| | |
| |
|
| | files = [ str(path) + os.sep + BASE_DIR + os.sep + f for f in os.listdir(MAIN_FILE_ADRESS) if os.path.isfile(str(path) + os.sep + BASE_DIR + os.sep + f) and pathlib.Path(MAIN_FILE_ADRESS + os.sep + f).suffix == ".txt" ] |
| |
|
| | |
| | for iFile in files: |
| | with open( iFile,encoding='utf8') as file: |
| | linesInFile = file.readlines() |
| | text = reduce(lambda a, b: a + " "+ b, linesInFile, "") |
| |
|
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| |
|
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| |
|
| |
|
| | |
| |
|
| | |
| |
|
| | |
| |
|
| | |
| |
|
| | |
| |
|
| | |
| |
|
| | |
| | cantemistDstDict = { |
| | 'raw_text': '', |
| | 'topic': '', |
| | 'speciallity': '', |
| | 'raw_text_type': 'clinic_case', |
| | 'topic_type': '', |
| | 'source': DATASET_SOURCE_ID, |
| | 'country': 'es', |
| | 'document_id': '' |
| | } |
| |
|
| | totalOfTokens = 0 |
| | corpusToLoad = [] |
| | countCopySeveralDocument = 0 |
| | counteOriginalDocument = 0 |
| |
|
| | |
| |
|
| | for iFile in files: |
| | with open( iFile,encoding='utf8') as file: |
| | linesInFile = file.readlines() |
| | text = reduce(lambda a, b: a + " "+ b, linesInFile, "") |
| | |
| | |
| | |
| | counteOriginalDocument += 1 |
| |
|
| | listOfTokens = tokenizer.tokenize(text) |
| | currentSizeOfTokens = len(listOfTokens) |
| | totalOfTokens += currentSizeOfTokens |
| | newCorpusRow = cantemistDstDict.copy() |
| |
|
| | |
| | newCorpusRow['raw_text'] = text |
| | newCorpusRow['document_id'] = str(counteOriginalDocument) |
| | corpusToLoad.append(newCorpusRow) |
| | |
| |
|
| | df = pd.DataFrame.from_records(corpusToLoad) |
| |
|
| | if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"): |
| | os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") |
| |
|
| | df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True) |
| | print( |
| | f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl" |
| | ) |
| |
|
| | print(' On dataset there are as document ', counteOriginalDocument) |
| | print(' On dataset there are as copy document ', countCopySeveralDocument) |
| | print(' On dataset there are as size of Tokens ', totalOfTokens) |
| | file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") |
| | size = file.stat().st_size |
| | print ('File size on Kilobytes (kB)', size >> 10) |
| | print ('File size on Megabytes (MB)', size >> 20 ) |
| | print ('File size on Gigabytes (GB)', size >> 30 ) |
| |
|
| | |
| | local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train") |
| |
|
| |
|
| | |
| | try: |
| | spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train") |
| | print("=== Before ====") |
| | print(spanish_dataset) |
| | spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset]) |
| | except Exception: |
| | spanish_dataset = local_spanish_dataset |
| |
|
| | spanish_dataset.push_to_hub(DATASET_TO_UPDATE) |
| |
|
| | print("=== After ====") |
| | print(spanish_dataset) |
| |
|
| | |
| |
|
| | |
| | |
| |
|
| |
|
| |
|
| |
|