-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathutils.py
73 lines (60 loc) · 2.56 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import os
import torch
import random
import argparse
import numpy as np
import pandas as pd
from torch.utils.data import DataLoader, Dataset
def makedir(path):
is_exist = os.path.exists(path)
if is_exist:
return '%s already exists!'%path
else:
os.makedirs(path)
def setup_seed(seed):
torch.backends.cudnn.deterministic = True
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
class OrderNamespace(argparse.Namespace):
def __init__(self, **kwargs):
self.__dict__['order'] = []
super(OrderNamespace, self).__init__(**kwargs)
def __setattr__(self,attr,value):
# 如果没有这个if,args中的str类型会在log中打印两次。
# 猜测可能是由于父类会对str重复调用__setattr__方法的缘故
if attr not in self.__dict__['order']:
self.__dict__['order'].append(attr)
super(OrderNamespace, self).__setattr__(attr, value)
def fix_seed(i):
random.seed(i)
np.random.seed(i)
torch.manual_seed(i)
if torch.cuda.device_count() > 0:
torch.cuda.manual_seed_all(i)
class MyDataset(Dataset):
def __init__(self, tokenizer, texts, labels, label2idx, maxlen):
self.tokenizer = tokenizer
# no padding now,use data_collator for dynamic padding later
texts = [t if (t != None and str(t) != 'nan') else '' for t in texts]
self.encodings = tokenizer(texts, truncation=True, max_length=maxlen)
self.labels = labels
self.label2idx = label2idx
def __getitem__(self, idx):
item = {k:torch.tensor(v[idx]) for k,v in self.encodings.items()}
item['labels'] = torch.tensor(self.label2idx[self.labels[idx]]) # 'labels' column should contain the idx of label, instead of the label string
return item
def __len__(self):
return len(self.labels)
def get_dataloader(file_path, tokenizer, label2idx, maxlen, bsz, collate_fn, shuffle=True):
# input a csv file, return a dataloader
df = pd.read_csv(file_path)
texts, labels = list(df['content']), list(df['label'])
dataset = MyDataset(tokenizer, texts, labels, label2idx, maxlen)
dataloader = DataLoader(dataset, batch_size=bsz, collate_fn=collate_fn, shuffle=shuffle)
return dataloader
def get_dataloader_from_list(texts, labels, tokenizer, label2idx, maxlen, bsz, collate_fn, shuffle=True):
dataset = MyDataset(tokenizer, texts, labels, label2idx, maxlen)
dataloader = DataLoader(dataset, batch_size=bsz, collate_fn=collate_fn, shuffle=shuffle)
return dataloader