This repository has been archived by the owner on Feb 12, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 4
/
splitFieldIntoMultipleFields.py
120 lines (110 loc) · 5.38 KB
/
splitFieldIntoMultipleFields.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# -*- coding: utf-8 -*-
import json
import requests
import secrets
import csv
import time
import urllib3
from datetime import datetime
import ast
import argparse
secretsVersion = input('To edit production server, enter the name of the secrets file: ')
if secretsVersion != '':
try:
secrets = __import__(secretsVersion)
print('Editing Production')
except ImportError:
print('Editing Stage')
baseURL = secrets.baseURL
email = secrets.email
password = secrets.password
filePath = secrets.filePath
verify = secrets.verify
skippedCollections = secrets.skippedCollections
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--replacedKey', help='the key to be replaced. optional - if not provided, the script will ask for input')
parser.add_argument('-f', '--fileName', help='the CSV file of changes. optional - if not provided, the script will ask for input')
args = parser.parse_args()
if args.replacedKey:
replacedKey = args.replacedKey
else:
replacedKey = input('Enter the key to be replaced: ')
if args.fileName:
fileName = filePath+args.fileName
else:
fileName = filePath+input('Enter the file name of the CSV of changes (including \'.csv\'): ')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
startTime = time.time()
data = {'email':email,'password':password}
header = {'content-type':'application/json','accept':'application/json'}
session = requests.post(baseURL+'/rest/login', headers=header, verify=verify, params=data).cookies['JSESSIONID']
cookies = {'JSESSIONID': session}
headerFileUpload = {'accept':'application/json'}
status = requests.get(baseURL+'/rest/status', headers=header, cookies=cookies, verify=verify).json()
userFullName = status['fullname']
print('authenticated')
recordsEdited = 0
elementsEdited = 0
f=csv.writer(open(filePath+'splitFieldIntoMultipleFields'+datetime.now().strftime('%Y-%m-%d %H.%M.%S')+'.csv', 'w'))
f.writerow(['itemID']+['replacedKey']+['replacementValueList']+['delete']+['post'])
replacedElement = ''
with open(fileName) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
replacedValue = row['value']
print(replacedValue)
replacementValueList = ast.literal_eval(row['structuredList'])
offset = 0
items = ''
itemLinks = []
while items != []:
endpoint = baseURL+'/rest/filtered-items?query_field[]='+replacedKey+'&query_op[]=equals&query_val[]='+replacedValue+'&limit=200&offset='+str(offset)
response = requests.get(endpoint, headers=header, cookies=cookies, verify=verify).json()
items = response['items']
for item in items:
itemLink = item['link']
itemLinks.append(itemLink)
offset = offset + 200
print(offset)
for itemLink in itemLinks:
itemMetadataProcessed = []
print(itemLink)
metadata = requests.get(baseURL + itemLink + '/metadata', headers=header, cookies=cookies, verify=verify).json()
for l in range (0, len (metadata)):
metadata[l].pop('schema', None)
metadata[l].pop('element', None)
metadata[l].pop('qualifier', None)
languageValue = metadata[l]['language']
if metadata[l]['key'] == replacedKey and metadata[l]['value'] == replacedValue:
print('match')
replacedElement = metadata[l]
for replacementValue in replacementValueList:
updatedMetadataElement = {}
updatedMetadataElement['key'] = replacedKey
updatedMetadataElement['value'] = replacementValue
updatedMetadataElement['language'] = languageValue
itemMetadataProcessed.append(updatedMetadataElement)
provNote = '\''+replacedKey+': '+replacedValue+'\' split into \''+replacedKey+': '+replacementValue+'\' through a batch process on '+datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'.'
provNoteElement = {}
provNoteElement['key'] = 'dc.description.provenance'
provNoteElement['value'] = provNote
provNoteElement['language'] = 'en_US'
itemMetadataProcessed.append(provNoteElement)
elementsEdited = elementsEdited + 1
else:
if metadata[l] not in itemMetadataProcessed:
itemMetadataProcessed.append(metadata[l])
recordsEdited = recordsEdited + 1
itemMetadataProcessed = json.dumps(itemMetadataProcessed)
#print(itemMetadataProcessed)
print('updated', itemLink, recordsEdited, elementsEdited)
delete = requests.delete(baseURL + itemLink + '/metadata', headers=header, cookies=cookies, verify=verify)
print(delete)
post = requests.put(baseURL + itemLink + '/metadata', headers=header, cookies=cookies, verify=verify, data=itemMetadataProcessed)
print(post)
f.writerow([itemLink]+[replacedKey]+[replacementValueList]+[delete]+[post])
logout = requests.post(baseURL+'/rest/logout', headers=header, cookies=cookies, verify=verify)
elapsedTime = time.time() - startTime
m, s = divmod(elapsedTime, 60)
h, m = divmod(m, 60)
print('Total script run time: ', '%d:%02d:%02d' % (h, m, s))