Update README.md
Browse files
README.md
CHANGED
@@ -20,4 +20,91 @@ configs:
|
|
20 |
data_files:
|
21 |
- split: train
|
22 |
path: data/train-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
data_files:
|
21 |
- split: train
|
22 |
path: data/train-*
|
23 |
+
license: apache-2.0
|
24 |
+
language:
|
25 |
+
- es
|
26 |
+
tags:
|
27 |
+
- legal
|
28 |
+
size_categories:
|
29 |
+
- n<1K
|
30 |
---
|
31 |
+
|
32 |
+
|
33 |
+
```python
|
34 |
+
# -*- coding: utf-8 -*-
|
35 |
+
"""
|
36 |
+
Automatically generated by Colab.
|
37 |
+
|
38 |
+
Original file is located at
|
39 |
+
https://colab.research.google.com/drive/1iAhLoc8FxHXijhyljdKhrIJbn342bhPD
|
40 |
+
"""
|
41 |
+
|
42 |
+
# Commented out IPython magic to ensure Python compatibility.
|
43 |
+
# %pip install --upgrade langchain datasets
|
44 |
+
|
45 |
+
import requests
|
46 |
+
from bs4 import BeautifulSoup
|
47 |
+
|
48 |
+
CONFIG = {
|
49 |
+
'title': 'Constitución Española',
|
50 |
+
'url': "https://www.boe.es/diario_boe/xml.php?id=BOE-A-1978-31229",
|
51 |
+
'chunk_size': 1300,
|
52 |
+
'chunk_overlap': 150,
|
53 |
+
}
|
54 |
+
|
55 |
+
"""# Downloading BOE document"""
|
56 |
+
|
57 |
+
response = requests.get(CONFIG['url'])
|
58 |
+
response.raise_for_status()
|
59 |
+
soup = BeautifulSoup(response.text, "lxml")
|
60 |
+
|
61 |
+
filename = "constitucion.txt"
|
62 |
+
with open(filename, 'w') as fn:
|
63 |
+
text = soup.select_one("documento > texto").get_text()
|
64 |
+
fn.write(text)
|
65 |
+
|
66 |
+
"""# Splitting by chunks the document"""
|
67 |
+
|
68 |
+
from langchain_community.document_loaders import TextLoader
|
69 |
+
|
70 |
+
loader = TextLoader(filename)
|
71 |
+
document = loader.load()
|
72 |
+
|
73 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
74 |
+
|
75 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
76 |
+
chunk_size=CONFIG["chunk_size"],
|
77 |
+
chunk_overlap=CONFIG["chunk_overlap"],
|
78 |
+
)
|
79 |
+
|
80 |
+
docs_chunks = text_splitter.split_documents(document)
|
81 |
+
|
82 |
+
print(len(docs_chunks))
|
83 |
+
|
84 |
+
docs_chunks
|
85 |
+
|
86 |
+
"""# Loading chunks in a dataset"""
|
87 |
+
|
88 |
+
from datasets import Dataset
|
89 |
+
|
90 |
+
data_dict = {
|
91 |
+
'id': [],
|
92 |
+
'url': [],
|
93 |
+
'title': [],
|
94 |
+
'content': []
|
95 |
+
}
|
96 |
+
|
97 |
+
for idx, chunk in enumerate(docs_chunks):
|
98 |
+
data_dict['id'].append(idx)
|
99 |
+
data_dict['url'].append(CONFIG['url'])
|
100 |
+
data_dict['title'].append(CONFIG['title'])
|
101 |
+
data_dict['content'].append(chunk.page_content)
|
102 |
+
|
103 |
+
dataset = Dataset.from_dict(data_dict)
|
104 |
+
|
105 |
+
"""# Loading to HuggingFace"""
|
106 |
+
|
107 |
+
# !huggingface-cli login
|
108 |
+
|
109 |
+
dataset.push_to_hub("dariolopez/justicio-BOE-A-1978-31229-constitucion-100-chunks")
|
110 |
+
```
|