Upload segment_notebooks.py
Browse files- segment_notebooks.py +57 -0
segment_notebooks.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import itertools
|
| 3 |
+
from datasets import load_dataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def segment_cells(content):
|
| 7 |
+
|
| 8 |
+
# segment notebooks into lists of individual cells
|
| 9 |
+
cells = []
|
| 10 |
+
cell_types = []
|
| 11 |
+
for cell in content['cells']:
|
| 12 |
+
# select only non-empty cells
|
| 13 |
+
if len(cell['source']) != 0:
|
| 14 |
+
cells.append(' '.join(cell['source']))
|
| 15 |
+
cell_types.append(cell['cell_type'])
|
| 16 |
+
|
| 17 |
+
return cells, cell_types
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def parse_notebook(batch):
|
| 21 |
+
try:
|
| 22 |
+
cells, types = segment_cells(json.loads(batch['content']))
|
| 23 |
+
|
| 24 |
+
# get cell types and group them into lists
|
| 25 |
+
cell_type_groups = [list(g) for k,g in itertools.groupby(types)]
|
| 26 |
+
cell_types = [k for k,g in itertools.groupby(types)]
|
| 27 |
+
cell_groups = []
|
| 28 |
+
|
| 29 |
+
group_start = 0
|
| 30 |
+
for g in cell_type_groups:
|
| 31 |
+
cell_groups.append(cells[group_start:group_start+len(g)])
|
| 32 |
+
group_start += len(g)
|
| 33 |
+
|
| 34 |
+
batch['cells'] = cell_groups
|
| 35 |
+
batch['cell_types'] = cell_types
|
| 36 |
+
batch['cell_type_groups'] = cell_type_groups
|
| 37 |
+
|
| 38 |
+
except:
|
| 39 |
+
# if json.loads() returns error, skip and add a placeholder
|
| 40 |
+
batch['cells'] = [['empty']]
|
| 41 |
+
batch['cell_types'] = ['empty']
|
| 42 |
+
batch['cell_type_groups'] = [['empty']]
|
| 43 |
+
|
| 44 |
+
del batch['content']
|
| 45 |
+
return batch
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if __name__ == "__main__":
|
| 49 |
+
|
| 50 |
+
# load dataset
|
| 51 |
+
dataset = load_dataset("bigcode/the-stack",data_dir="data/jupyter-notebook", split="train",use_auth_token=True)
|
| 52 |
+
# segment notebooks
|
| 53 |
+
dataset = dataset.map(segment)
|
| 54 |
+
# filter out erronous cells via placeholders
|
| 55 |
+
dataset = dataset.filter(lambda entry: entry['cell_types']!=['empty'])
|
| 56 |
+
# push to hub
|
| 57 |
+
dataset.push_to_hub("bigcode/jupyter-parsed")
|